kenken999 commited on
Commit
afef542
2 Parent(s): eefb42f 14dc68f
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +6 -0
  2. babyagi/.gitattributes +1 -0
  3. babyagi/.gitignore +24 -0
  4. babyagi/.pre-commit-config.yaml +28 -0
  5. babyagi/CNAME +1 -0
  6. babyagi/Dockerfile +13 -0
  7. babyagi/LICENSE +21 -0
  8. babyagi/README.md +130 -0
  9. babyagi/_config.yml +14 -0
  10. babyagi/babyagi.py +643 -0
  11. babyagi/babycoder/README.md +40 -0
  12. babyagi/babycoder/babycoder.py +610 -0
  13. babyagi/babycoder/embeddings.py +207 -0
  14. babyagi/babycoder/objective.sample.txt +6 -0
  15. babyagi/classic/ABOUT.md +3 -0
  16. babyagi/classic/BabyBeeAGI.py +300 -0
  17. babyagi/classic/BabyCatAGI.py +320 -0
  18. babyagi/classic/BabyDeerAGI.py +354 -0
  19. babyagi/classic/BabyElfAGI/main.py +118 -0
  20. babyagi/classic/BabyElfAGI/skills/code_reader.py +79 -0
  21. babyagi/classic/BabyElfAGI/skills/directory_structure.py +56 -0
  22. babyagi/classic/BabyElfAGI/skills/objective_saver.py +43 -0
  23. babyagi/classic/BabyElfAGI/skills/skill.py +33 -0
  24. babyagi/classic/BabyElfAGI/skills/skill_registry.py +57 -0
  25. babyagi/classic/BabyElfAGI/skills/skill_saver.py +58 -0
  26. babyagi/classic/BabyElfAGI/skills/text_completion.py +31 -0
  27. babyagi/classic/BabyElfAGI/skills/web_search.py +151 -0
  28. babyagi/classic/BabyElfAGI/tasks/example_objectives/example1.json +26 -0
  29. babyagi/classic/BabyElfAGI/tasks/example_objectives/example2.json +33 -0
  30. babyagi/classic/BabyElfAGI/tasks/example_objectives/example3.json +40 -0
  31. babyagi/classic/BabyElfAGI/tasks/example_objectives/example4.json +54 -0
  32. babyagi/classic/BabyElfAGI/tasks/example_objectives/example5.json +26 -0
  33. babyagi/classic/BabyElfAGI/tasks/example_objectives/example6.json +26 -0
  34. babyagi/classic/BabyElfAGI/tasks/task_registry.py +269 -0
  35. babyagi/classic/README.md +45 -0
  36. babyagi/classic/babyagi.py +138 -0
  37. babyagi/classic/babyfoxagi/README.md +140 -0
  38. babyagi/classic/babyfoxagi/babyagi.py +140 -0
  39. babyagi/classic/babyfoxagi/forever_cache.ndjson +1 -0
  40. babyagi/classic/babyfoxagi/main.py +309 -0
  41. babyagi/classic/babyfoxagi/ongoing_tasks.py +1 -0
  42. babyagi/classic/babyfoxagi/overall_summary.ndjson +0 -0
  43. babyagi/classic/babyfoxagi/poetry.lock +886 -0
  44. babyagi/classic/babyfoxagi/public/static/chat.js +279 -0
  45. babyagi/classic/babyfoxagi/public/static/style.css +97 -0
  46. babyagi/classic/babyfoxagi/pyproject.toml +28 -0
  47. babyagi/classic/babyfoxagi/skills/__init__.py +0 -0
  48. babyagi/classic/babyfoxagi/skills/airtable_search.py +123 -0
  49. babyagi/classic/babyfoxagi/skills/call_babyagi.py +41 -0
  50. babyagi/classic/babyfoxagi/skills/code_reader.py +79 -0
README.md CHANGED
@@ -10,3 +10,9 @@ pinned: false
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
13
+ for gpt-enginner
14
+ export OPENAI_API_BASE="https://api.groq.com/openai/v1"
15
+ export OPENAI_API_KEY="*************"
16
+ export MODEL_NAME="llama3-8b-8192"
17
+ export LOCAL_MODEL=true
18
+ for interpreter
babyagi/.gitattributes ADDED
@@ -0,0 +1 @@
 
 
1
+ *.py text eol=lf
babyagi/.gitignore ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__/
2
+ *.py[cod]
3
+ *$py.class
4
+
5
+ .env
6
+ .env.*
7
+ env/
8
+ .venv
9
+ *venv/
10
+
11
+ .vscode/
12
+ .idea/
13
+
14
+ models
15
+ llama/
16
+
17
+ babycoder/playground/*
18
+ babycoder/playground_data/*
19
+ babycoder/objective.txt
20
+
21
+ # for node
22
+ chroma/
23
+ node_modules/
24
+ .DS_Store
babyagi/.pre-commit-config.yaml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v4.4.0
4
+ hooks:
5
+ - id: trailing-whitespace
6
+ - id: end-of-file-fixer
7
+ - id: check-yaml
8
+ - id: check-added-large-files
9
+ - id: check-merge-conflict
10
+ - id: debug-statements
11
+ - id: requirements-txt-fixer
12
+ files: requirements.txt
13
+
14
+ - repo: https://github.com/psf/black
15
+ rev: 23.3.0
16
+ hooks:
17
+ - id: black
18
+
19
+ - repo: https://github.com/pycqa/isort
20
+ rev: 5.11.5
21
+ hooks:
22
+ - id: isort
23
+
24
+ - repo: https://github.com/pycqa/flake8
25
+ rev: 6.0.0
26
+ hooks:
27
+ - id: flake8
28
+ args: ["--max-line-length=140"]
babyagi/CNAME ADDED
@@ -0,0 +1 @@
 
 
1
+ babyagi.org
babyagi/Dockerfile ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.11-slim
2
+
3
+ ENV PIP_NO_CACHE_DIR=true
4
+ WORKDIR /tmp
5
+ RUN apt-get update && apt-get install build-essential -y
6
+
7
+ COPY requirements.txt /tmp/requirements.txt
8
+ RUN pip install -r requirements.txt
9
+
10
+ WORKDIR /app
11
+ COPY . /app
12
+ ENTRYPOINT ["./babyagi.py"]
13
+ EXPOSE 8080
babyagi/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 Yohei Nakajima
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
babyagi/README.md ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Translations:
2
+
3
+ [<img title="عربي" alt="عربي" src="https://cdn.staticaly.com/gh/hjnilsson/country-flags/master/svg/sa.svg" width="30">](docs/README-ar.md)
4
+ [<img title="Français" alt="Français" src="https://cdn.staticaly.com/gh/hjnilsson/country-flags/master/svg/fr.svg" width="30">](docs/README-fr.md)
5
+ [<img title="Polski" alt="Polski" src="https://cdn.staticaly.com/gh/hjnilsson/country-flags/master/svg/pl.svg" width="30">](docs/README-pl.md)
6
+ [<img title="Portuguese" alt="Portuguese" src="https://cdn.staticaly.com/gh/hjnilsson/country-flags/master/svg/br.svg" width="30">](docs/README-pt-br.md)
7
+ [<img title="Romanian" alt="Romanian" src="https://cdn.staticaly.com/gh/hjnilsson/country-flags/master/svg/ro.svg" width="30">](docs/README-ro.md)
8
+ [<img title="Russian" alt="Russian" src="https://cdn.staticaly.com/gh/hjnilsson/country-flags/master/svg/ru.svg" width="30">](docs/README-ru.md)
9
+ [<img title="Slovenian" alt="Slovenian" src="https://cdn.staticaly.com/gh/hjnilsson/country-flags/master/svg/si.svg" width="30">](docs/README-si.md)
10
+ [<img title="Spanish" alt="Spanish" src="https://cdn.staticaly.com/gh/hjnilsson/country-flags/master/svg/es.svg" width="30">](docs/README-es.md)
11
+ [<img title="Turkish" alt="Turkish" src="https://cdn.staticaly.com/gh/hjnilsson/country-flags/master/svg/tr.svg" width="30">](docs/README-tr.md)
12
+ [<img title="Ukrainian" alt="Ukrainian" src="https://cdn.staticaly.com/gh/hjnilsson/country-flags/master/svg/ua.svg" width="30">](docs/README-ua.md)
13
+ [<img title="简体中文" alt="Simplified Chinese" src="https://cdn.staticaly.com/gh/hjnilsson/country-flags/master/svg/cn.svg" width="30">](docs/README-cn.md)
14
+ [<img title="繁體中文 (Traditional Chinese)" alt="繁體中文 (Traditional Chinese)" src="https://cdn.staticaly.com/gh/hjnilsson/country-flags/master/svg/tw.svg" width="30">](docs/README-zh-tw.md)
15
+ [<img title="日本語" alt="日本語" src="https://cdn.staticaly.com/gh/hjnilsson/country-flags/master/svg/jp.svg" width="30">](docs/README-ja.md)
16
+ [<img title="한국어" alt="한국어" src="https://cdn.staticaly.com/gh/hjnilsson/country-flags/master/svg/kr.svg" width="30">](docs/README-ko.md)
17
+ [<img title="Magyar" alt="Magyar" src="https://cdn.staticaly.com/gh/hjnilsson/country-flags/master/svg/hu.svg" width="30">](docs/README-hu.md)
18
+ [<img title="فارسی" alt="فارسی" src="https://cdn.staticaly.com/gh/hjnilsson/country-flags/master/svg/ir.svg" width="30">](docs/README-fa.md)
19
+ [<img title="German" alt="German" src="https://cdn.staticaly.com/gh/hjnilsson/country-flags/master/svg/de.svg" width="30">](docs/README-de.md)
20
+ [<img title="Indian" alt="Indian" src="https://cdn.staticaly.com/gh/hjnilsson/country-flags/master/svg/in.svg" width="30">](docs/README-in.md)
21
+ [<img title="Vietnamese" alt="Vietnamese" src="https://raw.githubusercontent.com/hampusborgos/country-flags/main/svg/vn.svg" width="30">](docs/README-vn.md)
22
+
23
+
24
+ # Objective
25
+
26
+ This Python script is an example of an AI-powered task management system. The system uses OpenAI and vector databases such as Chroma or Weaviate to create, prioritize, and execute tasks. The main idea behind this system is that it creates tasks based on the result of previous tasks and a predefined objective. The script then uses OpenAI's natural language processing (NLP) capabilities to create new tasks based on the objective, and Chroma/Weaviate to store and retrieve task results for context. This is a pared-down version of the original [Task-Driven Autonomous Agent](https://twitter.com/yoheinakajima/status/1640934493489070080?s=20) (Mar 28, 2023).
27
+
28
+ This README will cover the following:
29
+
30
+ - [How the script works](#how-it-works)
31
+
32
+ - [How to use the script](#how-to-use)
33
+
34
+ - [Supported Models](#supported-models)
35
+
36
+ - [Warning about running the script continuously](#continuous-script-warning)
37
+
38
+ # How It Works<a name="how-it-works"></a>
39
+
40
+ The script works by running an infinite loop that does the following steps:
41
+
42
+ 1. Pulls the first task from the task list.
43
+ 2. Sends the task to the execution agent, which uses OpenAI's API to complete the task based on the context.
44
+ 3. Enriches the result and stores it in [Chroma](https://docs.trychroma.com)/[Weaviate](https://weaviate.io/).
45
+ 4. Creates new tasks and reprioritizes the task list based on the objective and the result of the previous task.
46
+ </br>
47
+
48
+ ![image](https://user-images.githubusercontent.com/21254008/235015461-543a897f-70cc-4b63-941a-2ae3c9172b11.png)
49
+
50
+ The `execution_agent()` function is where the OpenAI API is used. It takes two parameters: the objective and the task. It then sends a prompt to OpenAI's API, which returns the result of the task. The prompt consists of a description of the AI system's task, the objective, and the task itself. The result is then returned as a string.
51
+
52
+ The `task_creation_agent()` function is where OpenAI's API is used to create new tasks based on the objective and the result of the previous task. The function takes four parameters: the objective, the result of the previous task, the task description, and the current task list. It then sends a prompt to OpenAI's API, which returns a list of new tasks as strings. The function then returns the new tasks as a list of dictionaries, where each dictionary contains the name of the task.
53
+
54
+ The `prioritization_agent()` function is where OpenAI's API is used to reprioritize the task list. The function takes one parameter, the ID of the current task. It sends a prompt to OpenAI's API, which returns the reprioritized task list as a numbered list.
55
+
56
+ Finally, the script uses Chroma/Weaviate to store and retrieve task results for context. The script creates a Chroma/Weaviate collection based on the table name specified in the TABLE_NAME variable. Chroma/Weaviate is then used to store the results of the task in the collection, along with the task name and any additional metadata.
57
+
58
+ # How to Use<a name="how-to-use"></a>
59
+
60
+ To use the script, you will need to follow these steps:
61
+
62
+ 1. Clone the repository via `git clone https://github.com/yoheinakajima/babyagi.git` and `cd` into the cloned repository.
63
+ 2. Install the required packages: `pip install -r requirements.txt`
64
+ 3. Copy the .env.example file to .env: `cp .env.example .env`. This is where you will set the following variables.
65
+ 4. Set your OpenAI API key in the OPENAI_API_KEY and OPENAI_API_MODEL variables. In order to use with Weaviate you will also need to setup additional variables detailed [here](docs/weaviate.md).
66
+ 5. Set the name of the table where the task results will be stored in the TABLE_NAME variable.
67
+ 6. (Optional) Set the name of the BabyAGI instance in the BABY_NAME variable.
68
+ 7. (Optional) Set the objective of the task management system in the OBJECTIVE variable.
69
+ 8. (Optional) Set the first task of the system in the INITIAL_TASK variable.
70
+ 9. Run the script: `python babyagi.py`
71
+
72
+ All optional values above can also be specified on the command line.
73
+
74
+ # Running inside a docker container
75
+
76
+ As a prerequisite, you will need docker and docker-compose installed. Docker desktop is the simplest option https://www.docker.com/products/docker-desktop/
77
+
78
+ To run the system inside a docker container, setup your .env file as per steps above and then run the following:
79
+
80
+ ```
81
+ docker-compose up
82
+ ```
83
+
84
+ # Supported Models<a name="supported-models"></a>
85
+
86
+ This script works with all OpenAI models, as well as Llama and its variations through Llama.cpp. Default model is **gpt-3.5-turbo**. To use a different model, specify it through LLM_MODEL or use the command line.
87
+
88
+ ## Llama
89
+
90
+ Llama integration requires llama-cpp package. You will also need the Llama model weights.
91
+
92
+ - **Under no circumstances share IPFS, magnet links, or any other links to model downloads anywhere in this repository, including in issues, discussions or pull requests. They will be immediately deleted.**
93
+
94
+ Once you have them, set LLAMA_MODEL_PATH to the path of the specific model to use. For convenience, you can link `models` in BabyAGI repo to the folder where you have the Llama model weights. Then run the script with `LLM_MODEL=llama` or `-l` argument.
95
+
96
+ # Warning<a name="continuous-script-warning"></a>
97
+
98
+ This script is designed to be run continuously as part of a task management system. Running this script continuously can result in high API usage, so please use it responsibly. Additionally, the script requires the OpenAI API to be set up correctly, so make sure you have set up the API before running the script.
99
+
100
+ # Contribution
101
+
102
+ Needless to say, BabyAGI is still in its infancy and thus we are still determining its direction and the steps to get there. Currently, a key design goal for BabyAGI is to be _simple_ such that it's easy to understand and build upon. To maintain this simplicity, we kindly request that you adhere to the following guidelines when submitting PRs:
103
+
104
+ - Focus on small, modular modifications rather than extensive refactoring.
105
+ - When introducing new features, provide a detailed description of the specific use case you are addressing.
106
+
107
+ A note from @yoheinakajima (Apr 5th, 2023):
108
+
109
+ > I know there are a growing number of PRs, appreciate your patience - as I am both new to GitHub/OpenSource, and did not plan my time availability accordingly this week. Re:direction, I've been torn on keeping it simple vs expanding - currently leaning towards keeping a core Baby AGI simple, and using this as a platform to support and promote different approaches to expanding this (eg. BabyAGIxLangchain as one direction). I believe there are various opinionated approaches that are worth exploring, and I see value in having a central place to compare and discuss. More updates coming shortly.
110
+
111
+ I am new to GitHub and open source, so please be patient as I learn to manage this project properly. I run a VC firm by day, so I will generally be checking PRs and issues at night after I get my kids down - which may not be every night. Open to the idea of bringing in support, will be updating this section soon (expectations, visions, etc). Talking to lots of people and learning - hang tight for updates!
112
+
113
+ # BabyAGI Activity Report
114
+
115
+ To help the BabyAGI community stay informed about the project's progress, Blueprint AI has developed a Github activity summarizer for BabyAGI. This concise report displays a summary of all contributions to the BabyAGI repository over the past 7 days (continuously updated), making it easy for you to keep track of the latest developments.
116
+
117
+ To view the BabyAGI 7-day activity report, go here: [https://app.blueprint.ai/github/yoheinakajima/babyagi](https://app.blueprint.ai/github/yoheinakajima/babyagi)
118
+
119
+ [<img width="293" alt="image" src="https://user-images.githubusercontent.com/334530/235789974-f49d3cbe-f4df-4c3d-89e9-bfb60eea6308.png">](https://app.blueprint.ai/github/yoheinakajima/babyagi)
120
+
121
+
122
+ # Inspired projects
123
+
124
+ In the short time since it was release, BabyAGI inspired many projects. You can see them all [here](docs/inspired-projects.md).
125
+
126
+ # Backstory
127
+
128
+ BabyAGI is a pared-down version of the original [Task-Driven Autonomous Agent](https://twitter.com/yoheinakajima/status/1640934493489070080?s=20) (Mar 28, 2023) shared on Twitter. This version is down to 140 lines: 13 comments, 22 blanks, and 105 code. The name of the repo came up in the reaction to the original autonomous agent - the author does not mean to imply that this is AGI.
129
+
130
+ Made with love by [@yoheinakajima](https://twitter.com/yoheinakajima), who happens to be a VC (would love to see what you're building!)
babyagi/_config.yml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ title: BabyAGI
2
+ email:
3
+ description: >-
4
+ BabyAGI is an AI-powered task management system that uses OpenAI and Pinecone APIs to create, prioritize, and execute tasks.
5
+ baseurl: ""
6
+ url: "https://babyagi.org"
7
+ logo: docs/babyagi.png
8
+ twitter_username: babyagi_
9
+ github_username: yoheinakajima
10
+ show_downloads: false
11
+ remote_theme: pages-themes/minimal@v0.2.0
12
+ include: [docs]
13
+ plugins:
14
+ - jekyll-remote-theme
babyagi/babyagi.py ADDED
@@ -0,0 +1,643 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from dotenv import load_dotenv
3
+
4
+ # Load default environment variables (.env)
5
+ load_dotenv()
6
+
7
+ import os
8
+ import time
9
+ import logging
10
+ from collections import deque
11
+ from typing import Dict, List
12
+ import importlib
13
+ import openai
14
+ import chromadb
15
+ import tiktoken as tiktoken
16
+ from chromadb.utils.embedding_functions import OpenAIEmbeddingFunction
17
+ from chromadb.api.types import Documents, EmbeddingFunction, Embeddings
18
+ import re
19
+ from groq import Groq
20
+
21
+ # default opt out of chromadb telemetry.
22
+ from chromadb.config import Settings
23
+
24
+ client = chromadb.Client(Settings(anonymized_telemetry=False))
25
+
26
+ # Engine configuration
27
+
28
+ # Model: GPT, LLAMA, HUMAN, etc.
29
+ LLM_MODEL = os.getenv("LLM_MODEL", os.getenv("OPENAI_API_MODEL", "gpt-3.5-turbo")).lower()
30
+
31
+ # API Keys
32
+ OPENAI_API_KEY = os.getenv("api_key", "")
33
+ if not (LLM_MODEL.startswith("llama") or LLM_MODEL.startswith("human")):
34
+ assert OPENAI_API_KEY, "\033[91m\033[1m" + "OPENAI_API_KEY environment variable is missing from .env" + "\033[0m\033[0m"
35
+
36
+ # Table config
37
+ RESULTS_STORE_NAME = os.getenv("RESULTS_STORE_NAME", os.getenv("TABLE_NAME", ""))
38
+ assert RESULTS_STORE_NAME, "\033[91m\033[1m" + "RESULTS_STORE_NAME environment variable is missing from .env" + "\033[0m\033[0m"
39
+
40
+ # Run configuration
41
+ INSTANCE_NAME = os.getenv("INSTANCE_NAME", os.getenv("BABY_NAME", "BabyAGI"))
42
+ COOPERATIVE_MODE = "none"
43
+ JOIN_EXISTING_OBJECTIVE = False
44
+
45
+ # Goal configuration
46
+ OBJECTIVE = os.getenv("OBJECTIVE", "")
47
+ INITIAL_TASK = os.getenv("INITIAL_TASK", os.getenv("FIRST_TASK", ""))
48
+
49
+ # Model configuration
50
+ OPENAI_TEMPERATURE = float(os.getenv("OPENAI_TEMPERATURE", 0.0))
51
+
52
+
53
+ # Extensions support begin
54
+
55
+ def can_import(module_name):
56
+ try:
57
+ importlib.import_module(module_name)
58
+ return True
59
+ except ImportError:
60
+ return False
61
+
62
+
63
+ DOTENV_EXTENSIONS = os.getenv("DOTENV_EXTENSIONS", "").split(" ")
64
+
65
+ # Command line arguments extension
66
+ # Can override any of the above environment variables
67
+ ENABLE_COMMAND_LINE_ARGS = (
68
+ os.getenv("ENABLE_COMMAND_LINE_ARGS", "false").lower() == "true"
69
+ )
70
+ if ENABLE_COMMAND_LINE_ARGS:
71
+ if can_import("extensions.argparseext"):
72
+ from extensions.argparseext import parse_arguments
73
+
74
+ OBJECTIVE, INITIAL_TASK, LLM_MODEL, DOTENV_EXTENSIONS, INSTANCE_NAME, COOPERATIVE_MODE, JOIN_EXISTING_OBJECTIVE = parse_arguments()
75
+
76
+ # Human mode extension
77
+ # Gives human input to babyagi
78
+ if LLM_MODEL.startswith("human"):
79
+ if can_import("extensions.human_mode"):
80
+ from extensions.human_mode import user_input_await
81
+
82
+ # Load additional environment variables for enabled extensions
83
+ # TODO: This might override the following command line arguments as well:
84
+ # OBJECTIVE, INITIAL_TASK, LLM_MODEL, INSTANCE_NAME, COOPERATIVE_MODE, JOIN_EXISTING_OBJECTIVE
85
+ if DOTENV_EXTENSIONS:
86
+ if can_import("extensions.dotenvext"):
87
+ from extensions.dotenvext import load_dotenv_extensions
88
+
89
+ load_dotenv_extensions(DOTENV_EXTENSIONS)
90
+
91
+ # TODO: There's still work to be done here to enable people to get
92
+ # defaults from dotenv extensions, but also provide command line
93
+ # arguments to override them
94
+
95
+ # Extensions support end
96
+
97
+ print("\033[95m\033[1m" + "\n*****CONFIGURATION*****\n" + "\033[0m\033[0m")
98
+ print(f"Name : {INSTANCE_NAME}")
99
+ print(f"Mode : {'alone' if COOPERATIVE_MODE in ['n', 'none'] else 'local' if COOPERATIVE_MODE in ['l', 'local'] else 'distributed' if COOPERATIVE_MODE in ['d', 'distributed'] else 'undefined'}")
100
+ print(f"LLM : {LLM_MODEL}")
101
+
102
+
103
+ # Check if we know what we are doing
104
+ assert OBJECTIVE, "\033[91m\033[1m" + "OBJECTIVE environment variable is missing from .env" + "\033[0m\033[0m"
105
+ assert INITIAL_TASK, "\033[91m\033[1m" + "INITIAL_TASK environment variable is missing from .env" + "\033[0m\033[0m"
106
+
107
+ LLAMA_MODEL_PATH = os.getenv("LLAMA_MODEL_PATH", "models/llama-13B/ggml-model.bin")
108
+ if LLM_MODEL.startswith("llama"):
109
+ if can_import("llama_cpp"):
110
+ from llama_cpp import Llama
111
+
112
+ print(f"LLAMA : {LLAMA_MODEL_PATH}" + "\n")
113
+ assert os.path.exists(LLAMA_MODEL_PATH), "\033[91m\033[1m" + f"Model can't be found." + "\033[0m\033[0m"
114
+
115
+ CTX_MAX = 1024
116
+ LLAMA_THREADS_NUM = int(os.getenv("LLAMA_THREADS_NUM", 8))
117
+
118
+ print('Initialize model for evaluation')
119
+ llm = Llama(
120
+ model_path=LLAMA_MODEL_PATH,
121
+ n_ctx=CTX_MAX,
122
+ n_threads=LLAMA_THREADS_NUM,
123
+ n_batch=512,
124
+ use_mlock=False,
125
+ )
126
+
127
+ print('\nInitialize model for embedding')
128
+ llm_embed = Llama(
129
+ model_path=LLAMA_MODEL_PATH,
130
+ n_ctx=CTX_MAX,
131
+ n_threads=LLAMA_THREADS_NUM,
132
+ n_batch=512,
133
+ embedding=True,
134
+ use_mlock=False,
135
+ )
136
+
137
+ print(
138
+ "\033[91m\033[1m"
139
+ + "\n*****USING LLAMA.CPP. POTENTIALLY SLOW.*****"
140
+ + "\033[0m\033[0m"
141
+ )
142
+ else:
143
+ print(
144
+ "\033[91m\033[1m"
145
+ + "\nLlama LLM requires package llama-cpp. Falling back to GPT-3.5-turbo."
146
+ + "\033[0m\033[0m"
147
+ )
148
+ LLM_MODEL = "gpt-3.5-turbo"
149
+
150
+ if LLM_MODEL.startswith("gpt-4"):
151
+ print(
152
+ "\033[91m\033[1m"
153
+ + "\n*****USING GPT-4. POTENTIALLY EXPENSIVE. MONITOR YOUR COSTS*****"
154
+ + "\033[0m\033[0m"
155
+ )
156
+
157
+ if LLM_MODEL.startswith("human"):
158
+ print(
159
+ "\033[91m\033[1m"
160
+ + "\n*****USING HUMAN INPUT*****"
161
+ + "\033[0m\033[0m"
162
+ )
163
+
164
+ print("\033[94m\033[1m" + "\n*****OBJECTIVE*****\n" + "\033[0m\033[0m")
165
+ print(f"{OBJECTIVE}")
166
+
167
+ if not JOIN_EXISTING_OBJECTIVE:
168
+ print("\033[93m\033[1m" + "\nInitial task:" + "\033[0m\033[0m" + f" {INITIAL_TASK}")
169
+ else:
170
+ print("\033[93m\033[1m" + f"\nJoining to help the objective" + "\033[0m\033[0m")
171
+
172
+ # Configure OpenAI
173
+ openai.api_key = os.getenv("api_key")
174
+
175
+
176
+ # Llama embedding function
177
+ class LlamaEmbeddingFunction(EmbeddingFunction):
178
+ def __init__(self):
179
+ return
180
+
181
+
182
+ def __call__(self, texts: Documents) -> Embeddings:
183
+ embeddings = []
184
+ for t in texts:
185
+ e = llm_embed.embed(t)
186
+ embeddings.append(e)
187
+ return embeddings
188
+
189
+
190
+ # Results storage using local ChromaDB
191
+ class DefaultResultsStorage:
192
+ def __init__(self):
193
+ logging.getLogger('chromadb').setLevel(logging.ERROR)
194
+ # Create Chroma collection
195
+ chroma_persist_dir = "chroma"
196
+ chroma_client = chromadb.PersistentClient(
197
+ settings=chromadb.config.Settings(
198
+ persist_directory=chroma_persist_dir,
199
+ )
200
+ )
201
+
202
+ metric = "cosine"
203
+ if LLM_MODEL.startswith("llama"):
204
+ embedding_function = LlamaEmbeddingFunction()
205
+ else:
206
+ embedding_function = OpenAIEmbeddingFunction(api_key=OPENAI_API_KEY)
207
+ self.collection = chroma_client.get_or_create_collection(
208
+ name=RESULTS_STORE_NAME,
209
+ metadata={"hnsw:space": metric},
210
+ embedding_function=embedding_function,
211
+ )
212
+
213
+ def add(self, task: Dict, result: str, result_id: str):
214
+
215
+ # Break the function if LLM_MODEL starts with "human" (case-insensitive)
216
+ if LLM_MODEL.startswith("human"):
217
+ return
218
+ # Continue with the rest of the function
219
+
220
+ embeddings = llm_embed.embed(result) if LLM_MODEL.startswith("llama") else None
221
+ if (
222
+ len(self.collection.get(ids=[result_id], include=[])["ids"]) > 0
223
+ ): # Check if the result already exists
224
+ self.collection.update(
225
+ ids=result_id,
226
+ embeddings=embeddings,
227
+ documents=result,
228
+ metadatas={"task": task["task_name"], "result": result},
229
+ )
230
+ else:
231
+ self.collection.add(
232
+ ids=result_id,
233
+ embeddings=embeddings,
234
+ documents=result,
235
+ metadatas={"task": task["task_name"], "result": result},
236
+ )
237
+
238
+ def query(self, query: str, top_results_num: int) -> List[dict]:
239
+ count: int = self.collection.count()
240
+ if count == 0:
241
+ return []
242
+ results = self.collection.query(
243
+ query_texts=query,
244
+ n_results=min(top_results_num, count),
245
+ include=["metadatas"]
246
+ )
247
+ return [item["task"] for item in results["metadatas"][0]]
248
+
249
+
250
+ # Initialize results storage
251
+ def try_weaviate():
252
+ WEAVIATE_URL = os.getenv("WEAVIATE_URL", "")
253
+ WEAVIATE_USE_EMBEDDED = os.getenv("WEAVIATE_USE_EMBEDDED", "False").lower() == "true"
254
+ if (WEAVIATE_URL or WEAVIATE_USE_EMBEDDED) and can_import("extensions.weaviate_storage"):
255
+ WEAVIATE_API_KEY = os.getenv("WEAVIATE_API_KEY", "")
256
+ from extensions.weaviate_storage import WeaviateResultsStorage
257
+ print("\nUsing results storage: " + "\033[93m\033[1m" + "Weaviate" + "\033[0m\033[0m")
258
+ return WeaviateResultsStorage(OPENAI_API_KEY, WEAVIATE_URL, WEAVIATE_API_KEY, WEAVIATE_USE_EMBEDDED, LLM_MODEL, LLAMA_MODEL_PATH, RESULTS_STORE_NAME, OBJECTIVE)
259
+ return None
260
+
261
+ def try_pinecone():
262
+ PINECONE_API_KEY = os.getenv("PINECONE_API_KEY", "")
263
+ if PINECONE_API_KEY and can_import("extensions.pinecone_storage"):
264
+ PINECONE_ENVIRONMENT = os.getenv("PINECONE_ENVIRONMENT", "")
265
+ assert (
266
+ PINECONE_ENVIRONMENT
267
+ ), "\033[91m\033[1m" + "PINECONE_ENVIRONMENT environment variable is missing from .env" + "\033[0m\033[0m"
268
+ from extensions.pinecone_storage import PineconeResultsStorage
269
+ print("\nUsing results storage: " + "\033[93m\033[1m" + "Pinecone" + "\033[0m\033[0m")
270
+ return PineconeResultsStorage(OPENAI_API_KEY, PINECONE_API_KEY, PINECONE_ENVIRONMENT, LLM_MODEL, LLAMA_MODEL_PATH, RESULTS_STORE_NAME, OBJECTIVE)
271
+ return None
272
+
273
+ def use_chroma():
274
+ print("\nUsing results storage: " + "\033[93m\033[1m" + "Chroma (Default)" + "\033[0m\033[0m")
275
+ return DefaultResultsStorage()
276
+
277
+ results_storage = try_weaviate() or try_pinecone() or use_chroma()
278
+
279
+ # Task storage supporting only a single instance of BabyAGI
280
+ class SingleTaskListStorage:
281
+ def __init__(self):
282
+ self.tasks = deque([])
283
+ self.task_id_counter = 0
284
+
285
+ def append(self, task: Dict):
286
+ self.tasks.append(task)
287
+
288
+ def replace(self, tasks: List[Dict]):
289
+ self.tasks = deque(tasks)
290
+
291
+ def popleft(self):
292
+ return self.tasks.popleft()
293
+
294
+ def is_empty(self):
295
+ return False if self.tasks else True
296
+
297
+ def next_task_id(self):
298
+ self.task_id_counter += 1
299
+ return self.task_id_counter
300
+
301
+ def get_task_names(self):
302
+ return [t["task_name"] for t in self.tasks]
303
+
304
+
305
+ # Initialize tasks storage
306
+ tasks_storage = SingleTaskListStorage()
307
+ if COOPERATIVE_MODE in ['l', 'local']:
308
+ if can_import("extensions.ray_tasks"):
309
+ import sys
310
+ from pathlib import Path
311
+
312
+ sys.path.append(str(Path(__file__).resolve().parent))
313
+ from extensions.ray_tasks import CooperativeTaskListStorage
314
+
315
+ tasks_storage = CooperativeTaskListStorage(OBJECTIVE)
316
+ print("\nReplacing tasks storage: " + "\033[93m\033[1m" + "Ray" + "\033[0m\033[0m")
317
+ elif COOPERATIVE_MODE in ['d', 'distributed']:
318
+ pass
319
+
320
+
321
+ def limit_tokens_from_string(string: str, model: str, limit: int) -> str:
322
+ """Limits the string to a number of tokens (estimated)."""
323
+
324
+ try:
325
+ encoding = tiktoken.encoding_for_model(model)
326
+ except:
327
+ encoding = tiktoken.encoding_for_model('gpt2') # Fallback for others.
328
+
329
+ encoded = encoding.encode(string)
330
+
331
+ return encoding.decode(encoded[:limit])
332
+
333
+
334
+ def openai_call(
335
+ prompt: str,
336
+ model: str = LLM_MODEL,
337
+ temperature: float = OPENAI_TEMPERATURE,
338
+ max_tokens: int = 100,
339
+ ):
340
+
341
+ messages=[
342
+ {
343
+ "role": "user",
344
+ "content": "prompt"
345
+ }
346
+ ],
347
+ client = Groq(api_key=os.getenv("api_key"))
348
+ res = ""
349
+ completion = client.chat.completions.create(
350
+ model="llama3-8b-8192",
351
+ messages=[
352
+ {
353
+ "role": "user",
354
+ "content": "これじゃだめなのか?"
355
+ }
356
+ ],
357
+ temperature=1,
358
+ max_tokens=1024,
359
+ top_p=1,
360
+ stream=True,
361
+ stop=None,
362
+ )
363
+ for chunk in completion:
364
+ print(chunk.choices[0].delta.content)
365
+ print(chunk.choices[0].delta.content or "", end="")
366
+ res += chunk.choices[0].delta.content or ""
367
+ return res
368
+
369
+ while True:
370
+
371
+
372
+ try:
373
+ if model.lower().startswith("llama"):
374
+ result = llm(prompt[:CTX_MAX],
375
+ stop=["### Human"],
376
+ echo=False,
377
+ temperature=0.2,
378
+ top_k=40,
379
+ top_p=0.95,
380
+ repeat_penalty=1.05,
381
+ max_tokens=200)
382
+ # print('\n*****RESULT JSON DUMP*****\n')
383
+ # print(json.dumps(result))
384
+ # print('\n')
385
+ for chunk in completion:
386
+ print(chunk.choices[0].delta.content or "", end="")
387
+ return result['choices'][0]['text'].strip()
388
+ elif model.lower().startswith("human"):
389
+ return user_input_await(prompt)
390
+ elif not model.lower().startswith("gpt-"):
391
+ # Use completion API
392
+ response = openai.Completion.create(
393
+ engine=model,
394
+ prompt=prompt,
395
+ temperature=temperature,
396
+ max_tokens=max_tokens,
397
+ top_p=1,
398
+ frequency_penalty=0,
399
+ presence_penalty=0,
400
+ )
401
+ return response.choices[0].text.strip()
402
+ else:
403
+ # Use 4000 instead of the real limit (4097) to give a bit of wiggle room for the encoding of roles.
404
+ # TODO: different limits for different models.
405
+
406
+ trimmed_prompt = limit_tokens_from_string(prompt, model, 4000 - max_tokens)
407
+
408
+ # Use chat completion API
409
+ messages = [{"role": "system", "content": trimmed_prompt}]
410
+ response = openai.ChatCompletion.create(
411
+ model=model,
412
+ messages=messages,
413
+ temperature=temperature,
414
+ max_tokens=max_tokens,
415
+ n=1,
416
+ stop=None,
417
+ )
418
+ return response.choices[0].message.content.strip()
419
+ except openai.error.RateLimitError:
420
+ print(
421
+ " *** The OpenAI API rate limit has been exceeded. Waiting 10 seconds and trying again. ***"
422
+ )
423
+ time.sleep(10) # Wait 10 seconds and try again
424
+ except openai.error.Timeout:
425
+ print(
426
+ " *** OpenAI API timeout occurred. Waiting 10 seconds and trying again. ***"
427
+ )
428
+ time.sleep(10) # Wait 10 seconds and try again
429
+ except openai.error.APIError:
430
+ print(
431
+ " *** OpenAI API error occurred. Waiting 10 seconds and trying again. ***"
432
+ )
433
+ time.sleep(10) # Wait 10 seconds and try again
434
+ except openai.error.APIConnectionError:
435
+ print(
436
+ " *** OpenAI API connection error occurred. Check your network settings, proxy configuration, SSL certificates, or firewall rules. Waiting 10 seconds and trying again. ***"
437
+ )
438
+ time.sleep(10) # Wait 10 seconds and try again
439
+ except openai.error.InvalidRequestError:
440
+ print(
441
+ " *** OpenAI API invalid request. Check the documentation for the specific API method you are calling and make sure you are sending valid and complete parameters. Waiting 10 seconds and trying again. ***"
442
+ )
443
+ time.sleep(10) # Wait 10 seconds and try again
444
+ except openai.error.ServiceUnavailableError:
445
+ print(
446
+ " *** OpenAI API service unavailable. Waiting 10 seconds and trying again. ***"
447
+ )
448
+ time.sleep(10) # Wait 10 seconds and try again
449
+ else:
450
+ break
451
+
452
+
453
+ def task_creation_agent(
454
+ objective: str, result: Dict, task_description: str, task_list: List[str]
455
+ ):
456
+ prompt = f"""
457
+ You are to use the result from an execution agent to create new tasks with the following objective: {objective}.
458
+ The last completed task has the result: \n{result["data"]}
459
+ This result was based on this task description: {task_description}.\n"""
460
+
461
+ if task_list:
462
+ prompt += f"These are incomplete tasks: {', '.join(task_list)}\n"
463
+ prompt += "Based on the result, return a list of tasks to be completed in order to meet the objective. "
464
+ if task_list:
465
+ prompt += "These new tasks must not overlap with incomplete tasks. "
466
+
467
+ prompt += """
468
+ Return one task per line in your response. The result must be a numbered list in the format:
469
+
470
+ #. First task
471
+ #. Second task
472
+
473
+ The number of each entry must be followed by a period. If your list is empty, write "There are no tasks to add at this time."
474
+ Unless your list is empty, do not include any headers before your numbered list or follow your numbered list with any other output."""
475
+
476
+ print(f'\n*****TASK CREATION AGENT PROMPT****\n{prompt}\n')
477
+ response = openai_call(prompt, max_tokens=2000)
478
+ print(f'\n****TASK CREATION AGENT RESPONSE****\n{response}\n')
479
+ new_tasks = response.split('\n')
480
+ new_tasks_list = []
481
+ for task_string in new_tasks:
482
+ task_parts = task_string.strip().split(".", 1)
483
+ if len(task_parts) == 2:
484
+ task_id = ''.join(s for s in task_parts[0] if s.isnumeric())
485
+ task_name = re.sub(r'[^\w\s_]+', '', task_parts[1]).strip()
486
+ if task_name.strip() and task_id.isnumeric():
487
+ new_tasks_list.append(task_name)
488
+ # print('New task created: ' + task_name)
489
+
490
+ out = [{"task_name": task_name} for task_name in new_tasks_list]
491
+ return out
492
+
493
+
494
+ def prioritization_agent():
495
+ task_names = tasks_storage.get_task_names()
496
+ bullet_string = '\n'
497
+
498
+ prompt = f"""
499
+ You are tasked with prioritizing the following tasks: {bullet_string + bullet_string.join(task_names)}
500
+ Consider the ultimate objective of your team: {OBJECTIVE}.
501
+ Tasks should be sorted from highest to lowest priority, where higher-priority tasks are those that act as pre-requisites or are more essential for meeting the objective.
502
+ Do not remove any tasks. Return the ranked tasks as a numbered list in the format:
503
+
504
+ #. First task
505
+ #. Second task
506
+
507
+ The entries must be consecutively numbered, starting with 1. The number of each entry must be followed by a period.
508
+ Do not include any headers before your ranked list or follow your list with any other output."""
509
+
510
+ print(f'\n****TASK PRIORITIZATION AGENT PROMPT****\n{prompt}\n')
511
+ response = openai_call(prompt, max_tokens=2000)
512
+ print(f'\n****TASK PRIORITIZATION AGENT RESPONSE****\n{response}\n')
513
+ if not response:
514
+ print('Received empty response from priotritization agent. Keeping task list unchanged.')
515
+ return
516
+ new_tasks = response.split("\n") if "\n" in response else [response]
517
+ new_tasks_list = []
518
+ for task_string in new_tasks:
519
+ task_parts = task_string.strip().split(".", 1)
520
+ if len(task_parts) == 2:
521
+ task_id = ''.join(s for s in task_parts[0] if s.isnumeric())
522
+ task_name = re.sub(r'[^\w\s_]+', '', task_parts[1]).strip()
523
+ if task_name.strip():
524
+ new_tasks_list.append({"task_id": task_id, "task_name": task_name})
525
+
526
+ return new_tasks_list
527
+
528
+
529
+ # Execute a task based on the objective and five previous tasks
530
+ def execution_agent(objective: str, task: str) -> str:
531
+ """
532
+ Executes a task based on the given objective and previous context.
533
+
534
+ Args:
535
+ objective (str): The objective or goal for the AI to perform the task.
536
+ task (str): The task to be executed by the AI.
537
+
538
+ Returns:
539
+ str: The response generated by the AI for the given task.
540
+
541
+ """
542
+
543
+ context = context_agent(query=objective, top_results_num=5)
544
+ # print("\n****RELEVANT CONTEXT****\n")
545
+ # print(context)
546
+ # print('')
547
+ prompt = f'Perform one task based on the following objective: {objective}.\n'
548
+ if context:
549
+ prompt += 'Take into account these previously completed tasks:' + '\n'.join(context)
550
+ prompt += f'\nYour task: {task}\nResponse:'
551
+ return openai_call(prompt, max_tokens=2000)
552
+
553
+
554
+ # Get the top n completed tasks for the objective
555
+ def context_agent(query: str, top_results_num: int):
556
+ """
557
+ Retrieves context for a given query from an index of tasks.
558
+
559
+ Args:
560
+ query (str): The query or objective for retrieving context.
561
+ top_results_num (int): The number of top results to retrieve.
562
+
563
+ Returns:
564
+ list: A list of tasks as context for the given query, sorted by relevance.
565
+
566
+ """
567
+ results = results_storage.query(query=query, top_results_num=top_results_num)
568
+ # print("****RESULTS****")
569
+ # print(results)
570
+ return results
571
+
572
+
573
+ # Add the initial task if starting new objective
574
+ if not JOIN_EXISTING_OBJECTIVE:
575
+ initial_task = {
576
+ "task_id": tasks_storage.next_task_id(),
577
+ "task_name": INITIAL_TASK
578
+ }
579
+ tasks_storage.append(initial_task)
580
+
581
+
582
+ def main():
583
+ loop = True
584
+ while loop:
585
+ # As long as there are tasks in the storage...
586
+ if not tasks_storage.is_empty():
587
+ # Print the task list
588
+ print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m\033[0m")
589
+ for t in tasks_storage.get_task_names():
590
+ print(" • " + str(t))
591
+
592
+ # Step 1: Pull the first incomplete task
593
+ task = tasks_storage.popleft()
594
+ print("\033[92m\033[1m" + "\n*****NEXT TASK*****\n" + "\033[0m\033[0m")
595
+ print(str(task["task_name"]))
596
+
597
+ # Send to execution function to complete the task based on the context
598
+ result = execution_agent(OBJECTIVE, str(task["task_name"]))
599
+ print("\033[93m\033[1m" + "\n*****TASK RESULT*****\n" + "\033[0m\033[0m")
600
+ print(result)
601
+
602
+ # Step 2: Enrich result and store in the results storage
603
+ # This is where you should enrich the result if needed
604
+ enriched_result = {
605
+ "data": result
606
+ }
607
+ # extract the actual result from the dictionary
608
+ # since we don't do enrichment currently
609
+ # vector = enriched_result["data"]
610
+
611
+ result_id = f"result_{task['task_id']}"
612
+
613
+ #results_storage.add(task, result, result_id)
614
+
615
+ # Step 3: Create new tasks and re-prioritize task list
616
+ # only the main instance in cooperative mode does that
617
+ new_tasks = task_creation_agent(
618
+ OBJECTIVE,
619
+ enriched_result,
620
+ task["task_name"],
621
+ tasks_storage.get_task_names(),
622
+ )
623
+
624
+ print('Adding new tasks to task_storage')
625
+ for new_task in new_tasks:
626
+ new_task.update({"task_id": tasks_storage.next_task_id()})
627
+ print(str(new_task))
628
+ tasks_storage.append(new_task)
629
+
630
+ if not JOIN_EXISTING_OBJECTIVE:
631
+ prioritized_tasks = prioritization_agent()
632
+ if prioritized_tasks:
633
+ tasks_storage.replace(prioritized_tasks)
634
+
635
+ # Sleep a bit before checking the task list again
636
+ time.sleep(5)
637
+ else:
638
+ print('Done.')
639
+ loop = False
640
+
641
+
642
+ if __name__ == "__main__":
643
+ main()
babyagi/babycoder/README.md ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Babycoder: Recipe for using BabyAgi to write code
2
+
3
+ Babycoder is a work in progress AI system that is able to write code for small programs given a simple objective. As a part of the BabyAgi system, Babycoder's goal is to lay the foundation for creating increasingly powerful AI agents capable of managing larger and more complex projects.
4
+
5
+ ## Objective
6
+
7
+ The primary objective of Babycoder is to provide a recipe for developing AI agent systems capable of writing and editing code. By starting with a simple system and iterating on it, Babycoder aims to improve over time and eventually handle more extensive projects.
8
+
9
+ ## How It Works
10
+
11
+ <p align="center">
12
+ <img src="https://user-images.githubusercontent.com/115842157/235815563-8e7a9082-f8d7-48fa-b062-59b585944f1b.png" />
13
+ </p>
14
+
15
+ Babycoder's task management system consists of several AI agents working together to create, prioritize, and execute tasks based on a predefined objective and the current state of the project being worked on. The process consists of the following steps:
16
+
17
+ 1. **Task Definition**: Four task agents define tasks in a JSON list, which includes all tasks to be executed by the system.
18
+
19
+ 2. **(Optional) Human feedback**: If enabled, allows to provide feedback for each task before it is executed. The feedback is processed by an agent responsible for applying it to improve the task.
20
+
21
+ 3. **Agent Assignment**: For each task, two agents collaborate to determine the agent responsible for executing the task. The possible executor agents are:
22
+ - `command_executor_agent`
23
+ - `code_writer_agent`
24
+ - `code_refactor_agent`
25
+
26
+ 4. **File Management**: The `files_management_agent` scans files in the project directory to determine which files or folders will be used by the executor agents to accomplish their tasks.
27
+
28
+ 5. **Task Execution**: The executor agents perform their assigned tasks using the following capabilities:
29
+ - The `command_executor_agent` runs OS commands, such as installing dependencies or creating files and folders.
30
+ - The `code_writer_agent` writes new code or updates existing code, using embeddings of the current codebase to retrieve relevant code sections and ensure compatibility with other parts of the codebase.
31
+ - The `code_refactor_agent` edits existing code according to the specified task, with the help of a `code_relevance_agent` that analyzes code chunks and identifies the most relevant section for editing.
32
+
33
+ The code is written to a folder called `playground` in Babycoder's root directory. A folder named `playground_data` is used to save embeddings of the code being written.
34
+
35
+ ## How to use
36
+
37
+ - Configure BabyAgi by following the instructions in the main README file.
38
+ - Navigate to the babycoder directory: `cd babycoder`
39
+ - Make a copy of the objective.sample.txt file (`cp objective.sample.txt objective.txt`) and update it to contain the objective of the project you want to create.
40
+ - Finally, from the `./babycoder` directory, run: `python babycoder.py` and watch it write code for you!
babyagi/babycoder/babycoder.py ADDED
@@ -0,0 +1,610 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import openai
3
+ import time
4
+ import sys
5
+ from typing import List, Dict, Union
6
+ from dotenv import load_dotenv
7
+ import json
8
+ import subprocess
9
+ import platform
10
+
11
+ from embeddings import Embeddings
12
+
13
+ # Set Variables
14
+ load_dotenv()
15
+ current_directory = os.getcwd()
16
+ os_version = platform.release()
17
+
18
+ openai_calls_retried = 0
19
+ max_openai_calls_retries = 3
20
+
21
+ # Set API Keys
22
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
23
+ assert OPENAI_API_KEY, "OPENAI_API_KEY environment variable is missing from .env"
24
+ openai.api_key = OPENAI_API_KEY
25
+
26
+ OPENAI_API_MODEL = os.getenv("OPENAI_API_MODEL", "gpt-3.5-turbo")
27
+ assert OPENAI_API_MODEL, "OPENAI_API_MODEL environment variable is missing from .env"
28
+
29
+ if "gpt-4" in OPENAI_API_MODEL.lower():
30
+ print(
31
+ f"\033[91m\033[1m"
32
+ + "\n*****USING GPT-4. POTENTIALLY EXPENSIVE. MONITOR YOUR COSTS*****"
33
+ + "\033[0m\033[0m"
34
+ )
35
+
36
+ if len(sys.argv) > 1:
37
+ OBJECTIVE = sys.argv[1]
38
+ elif os.path.exists(os.path.join(current_directory, "objective.txt")):
39
+ with open(os.path.join(current_directory, "objective.txt")) as f:
40
+ OBJECTIVE = f.read()
41
+
42
+ assert OBJECTIVE, "OBJECTIVE missing"
43
+
44
+ ## Start of Helper/Utility functions ##
45
+
46
+ def print_colored_text(text, color):
47
+ color_mapping = {
48
+ 'blue': '\033[34m',
49
+ 'red': '\033[31m',
50
+ 'yellow': '\033[33m',
51
+ 'green': '\033[32m',
52
+ }
53
+ color_code = color_mapping.get(color.lower(), '')
54
+ reset_code = '\033[0m'
55
+ print(color_code + text + reset_code)
56
+
57
+ def print_char_by_char(text, delay=0.00001, chars_at_once=3):
58
+ for i in range(0, len(text), chars_at_once):
59
+ chunk = text[i:i + chars_at_once]
60
+ print(chunk, end='', flush=True)
61
+ time.sleep(delay)
62
+ print()
63
+
64
+ def openai_call(
65
+ prompt: str,
66
+ model: str = OPENAI_API_MODEL,
67
+ temperature: float = 0.5,
68
+ max_tokens: int = 100,
69
+ ):
70
+ global openai_calls_retried
71
+ if not model.startswith("gpt-"):
72
+ # Use completion API
73
+ response = openai.Completion.create(
74
+ engine=model,
75
+ prompt=prompt,
76
+ temperature=temperature,
77
+ max_tokens=max_tokens,
78
+ top_p=1,
79
+ frequency_penalty=0,
80
+ presence_penalty=0
81
+ )
82
+ return response.choices[0].text.strip()
83
+ else:
84
+ # Use chat completion API
85
+ messages=[{"role": "user", "content": prompt}]
86
+ try:
87
+ response = openai.ChatCompletion.create(
88
+ model=model,
89
+ messages=messages,
90
+ temperature=temperature,
91
+ max_tokens=max_tokens,
92
+ n=1,
93
+ stop=None,
94
+ )
95
+ openai_calls_retried = 0
96
+ return response.choices[0].message.content.strip()
97
+ except Exception as e:
98
+ # try again
99
+ if openai_calls_retried < max_openai_calls_retries:
100
+ openai_calls_retried += 1
101
+ print(f"Error calling OpenAI. Retrying {openai_calls_retried} of {max_openai_calls_retries}...")
102
+ return openai_call(prompt, model, temperature, max_tokens)
103
+
104
+ def execute_command_json(json_string):
105
+ try:
106
+ command_data = json.loads(json_string)
107
+ full_command = command_data.get('command')
108
+
109
+ process = subprocess.Popen(full_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, shell=True, cwd='playground')
110
+ stdout, stderr = process.communicate(timeout=60)
111
+
112
+ return_code = process.returncode
113
+
114
+ if return_code == 0:
115
+ return stdout
116
+ else:
117
+ return stderr
118
+
119
+ except json.JSONDecodeError as e:
120
+ return f"Error: Unable to decode JSON string: {str(e)}"
121
+ except subprocess.TimeoutExpired:
122
+ process.terminate()
123
+ return "Error: Timeout reached (60 seconds)"
124
+ except Exception as e:
125
+ return f"Error: {str(e)}"
126
+
127
+ def execute_command_string(command_string):
128
+ try:
129
+ result = subprocess.run(command_string, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, shell=True, cwd='playground')
130
+ output = result.stdout or result.stderr or "No output"
131
+ return output
132
+
133
+ except Exception as e:
134
+ return f"Error: {str(e)}"
135
+
136
+ def save_code_to_file(code: str, file_path: str):
137
+ full_path = os.path.join(current_directory, "playground", file_path)
138
+ try:
139
+ mode = 'a' if os.path.exists(full_path) else 'w'
140
+ with open(full_path, mode, encoding='utf-8') as f:
141
+ f.write(code + '\n\n')
142
+ except:
143
+ pass
144
+
145
+ def refactor_code(modified_code: List[Dict[str, Union[int, str]]], file_path: str):
146
+ full_path = os.path.join(current_directory, "playground", file_path)
147
+
148
+ with open(full_path, "r", encoding="utf-8") as f:
149
+ lines = f.readlines()
150
+
151
+ for modification in modified_code:
152
+ start_line = modification["start_line"]
153
+ end_line = modification["end_line"]
154
+ modified_chunk = modification["modified_code"].splitlines()
155
+
156
+ # Remove original lines within the range
157
+ del lines[start_line - 1:end_line]
158
+
159
+ # Insert the new modified_chunk lines
160
+ for i, line in enumerate(modified_chunk):
161
+ lines.insert(start_line - 1 + i, line + "\n")
162
+
163
+ with open(full_path, "w", encoding="utf-8") as f:
164
+ f.writelines(lines)
165
+
166
+ def split_code_into_chunks(file_path: str, chunk_size: int = 50) -> List[Dict[str, Union[int, str]]]:
167
+ full_path = os.path.join(current_directory, "playground", file_path)
168
+
169
+ with open(full_path, "r", encoding="utf-8") as f:
170
+ lines = f.readlines()
171
+
172
+ chunks = []
173
+ for i in range(0, len(lines), chunk_size):
174
+ start_line = i + 1
175
+ end_line = min(i + chunk_size, len(lines))
176
+ chunk = {"start_line": start_line, "end_line": end_line, "code": "".join(lines[i:end_line])}
177
+ chunks.append(chunk)
178
+ return chunks
179
+
180
+ ## End of Helper/Utility functions ##
181
+
182
+ ## TASKS AGENTS ##
183
+
184
+ def code_tasks_initializer_agent(objective: str):
185
+ prompt = f"""You are an AGI agent responsible for creating a detailed JSON checklist of tasks that will guide other AGI agents to complete a given programming objective. Your task is to analyze the provided objective and generate a well-structured checklist with a clear starting point and end point, as well as tasks broken down to be very specific, clear, and executable by other agents without the context of other tasks.
186
+
187
+ The current agents work as follows:
188
+ - code_writer_agent: Writes code snippets or functions and saves them to the appropriate files. This agent can also append code to existing files if required.
189
+ - code_refactor_agent: Responsible for modifying and refactoring existing code to meet the requirements of the task.
190
+ - command_executor_agent: Executes terminal commands for tasks such as creating directories, installing dependencies, etc.
191
+
192
+ Keep in mind that the agents cannot open files in text editors, and tasks should be designed to work within these agent capabilities.
193
+
194
+ Here is the programming objective you need to create a checklist for: {objective}.
195
+
196
+ To generate the checklist, follow these steps:
197
+
198
+ 1. Analyze the objective to identify the high-level requirements and goals of the project. This will help you understand the scope and create a comprehensive checklist.
199
+
200
+ 2. Break down the objective into smaller, highly specific tasks that can be worked on independently by other agents. Ensure that the tasks are designed to be executed by the available agents (code_writer_agent, code_refactor and command_executor_agent) without requiring opening files in text editors.
201
+
202
+ 3. Assign a unique ID to each task for easy tracking and organization. This will help the agents to identify and refer to specific tasks in the checklist.
203
+
204
+ 4. Organize the tasks in a logical order, with a clear starting point and end point. The starting point should represent the initial setup or groundwork necessary for the project, while the end point should signify the completion of the objective and any finalization steps.
205
+
206
+ 5. Provide the current context for each task, which should be sufficient for the agents to understand and execute the task without referring to other tasks in the checklist. This will help agents avoid task duplication.
207
+
208
+ 6. Pay close attention to the objective and make sure the tasks implement all necessary pieces needed to make the program work.
209
+
210
+ 7. Compile the tasks into a well-structured JSON format, ensuring that it is easy to read and parse by other AGI agents. The JSON should include fields such as task ID, description and file_path.
211
+
212
+ IMPORTANT: BE VERY CAREFUL WITH IMPORTS AND MANAGING MULTIPLE FILES. REMEMBER EACH AGENT WILL ONLY SEE A SINGLE TASK. ASK YOURSELF WHAT INFORMATION YOU NEED TO INCLUDE IN THE CONTEXT OF EACH TASK TO MAKE SURE THE AGENT CAN EXECUTE THE TASK WITHOUT SEEING THE OTHER TASKS OR WHAT WAS ACCOMPLISHED IN OTHER TASKS.
213
+
214
+ Pay attention to the way files are passed in the tasks, always use full paths. For example 'project/main.py'.
215
+
216
+ Make sure tasks are not duplicated.
217
+
218
+ Do not take long and complex routes, minimize tasks and steps as much as possible.
219
+
220
+ Here is a sample JSON output for a checklist:
221
+
222
+ {{
223
+ "tasks": [
224
+ {{
225
+ "id": 1,
226
+ "description": "Run a command to create the project directory named 'project'",
227
+ "file_path": "./project",
228
+ }},
229
+ {{
230
+ "id": 2,
231
+ "description": "Run a command to Install the following dependencies: 'numpy', 'pandas', 'scikit-learn', 'matplotlib'",
232
+ "file_path": "null",
233
+ }},
234
+ {{
235
+ "id": 3,
236
+ "description": "Write code to create a function named 'parser' that takes an input named 'input' of type str, [perform a specific task on it], and returns a specific output",
237
+ "file_path": "./project/main.py",
238
+ }},
239
+ ...
240
+ {{
241
+ "id": N,
242
+ "description": "...",
243
+ }}
244
+ ],
245
+ }}
246
+
247
+ The tasks will be executed by either of the three agents: command_executor, code_writer or code_refactor. They can't interact with programs. They can either run terminal commands or write code snippets. Their output is controlled by other functions to run the commands or save their output to code files. Make sure the tasks are compatible with the current agents. ALL tasks MUST start either with the following phrases: 'Run a command to...', 'Write code to...', 'Edit existing code to...' depending on the agent that will execute the task. RETURN JSON ONLY:"""
248
+
249
+ return openai_call(prompt, temperature=0.8, max_tokens=2000)
250
+
251
+ def code_tasks_refactor_agent(objective: str, task_list_json):
252
+ prompt = f"""You are an AGI tasks_refactor_agent responsible for adapting a task list generated by another agent to ensure the tasks are compatible with the current AGI agents. Your goal is to analyze the task list and make necessary modifications so that the tasks can be executed by the agents listed below
253
+
254
+ YOU SHOULD OUTPUT THE MODIFIED TASK LIST IN THE SAME JSON FORMAT AS THE INITIAL TASK LIST. DO NOT CHANGE THE FORMAT OF THE JSON OUTPUT. DO NOT WRITE ANYTHING OTHER THAN THE MODIFIED TASK LIST IN THE JSON FORMAT.
255
+
256
+ The current agents work as follows:
257
+ - code_writer_agent: Writes code snippets or functions and saves them to the appropriate files. This agent can also append code to existing files if required.
258
+ - code_refactor_agent: Responsible for editing current existing code/files.
259
+ - command_executor_agent: Executes terminal commands for tasks such as creating directories, installing dependencies, etc.
260
+
261
+ Here is the overall objective you need to refactor the tasks for: {objective}.
262
+ Here is the JSON task list you need to refactor for compatibility with the current agents: {task_list_json}.
263
+
264
+ To refactor the task list, follow these steps:
265
+ 1. Modify the task descriptions to make them compatible with the current agents, ensuring that the tasks are self-contained, clear, and executable by the agents without additional context. You don't need to mention the agents in the task descriptions, but the tasks should be compatible with the current agents.
266
+ 2. If necessary, add new tasks or remove irrelevant tasks to make the task list more suitable for the current agents.
267
+ 3. Keep the JSON structure of the task list intact, maintaining the "id", "description" and "file_path" fields for each task.
268
+ 4. Pay close attention to the objective and make sure the tasks implement all necessary pieces needed to make the program work.
269
+
270
+ Always specify file paths to files. Make sure tasks are not duplicated. Never write code to create files. If needed, use commands to create files and folders.
271
+ Return the updated JSON task list with the following format:
272
+
273
+ {{
274
+ "tasks": [
275
+ {{
276
+ "id": 1,
277
+ "description": "Run a commmand to create a folder named 'project' in the current directory",
278
+ "file_path": "./project",
279
+ }},
280
+ {{
281
+ "id": 2,
282
+ "description": "Write code to print 'Hello World!' with Python",
283
+ "file_path": "./project/main.py",
284
+ }},
285
+ {{
286
+ "id": 3,
287
+ "description": "Write code to create a function named 'parser' that takes an input named 'input' of type str, [perform a specific task on it], and returns a specific output",
288
+ "file_path": "./project/main.py",
289
+ }}
290
+ {{
291
+ "id": 3,
292
+ "description": "Run a command calling the script in ./project/main.py",
293
+ "file_path": "./project/main.py",
294
+ }}
295
+ ...
296
+ ],
297
+ }}
298
+
299
+ IMPORTANT: All tasks should start either with the following phrases: 'Run a command to...', 'Write a code to...', 'Edit the code to...' depending on the agent that will execute the task:
300
+
301
+ ALWAYS ENSURE ALL TASKS HAVE RELEVANT CONTEXT ABOUT THE CODE TO BE WRITTEN, INCLUDE DETAILS ON HOW TO CALL FUNCTIONS, CLASSES, IMPORTS, ETC. AGENTS HAVE NO VIEW OF OTHER TASKS, SO THEY NEED TO BE SELF-CONTAINED. RETURN THE JSON:"""
302
+
303
+ return openai_call(prompt, temperature=0, max_tokens=2000)
304
+
305
+ def code_tasks_details_agent(objective: str, task_list_json):
306
+ prompt = f"""You are an AGI agent responsible for improving a list of tasks in JSON format and adding ALL the necessary details to each task. These tasks will be executed individually by agents that have no idea about other tasks or what code exists in the codebase. It is FUNDAMENTAL that each task has enough details so that an individual isolated agent can execute. The metadata of the task is the only information the agents will have.
307
+
308
+ Each task should contain the details necessary to execute it. For example, if it creates a function, it needs to contain the details about the arguments to be used in that function and this needs to be consistent across all tasks.
309
+
310
+ Look at all tasks at once, and update the task description adding details to it for each task so that it can be executed by an agent without seeing the other tasks and to ensure consistency across all tasks. DETAILS ARE CRUCIAL. For example, if one task creates a class, it should have all the details about the class, including the arguments to be used in the constructor. If another task creates a function that uses the class, it should have the details about the class and the arguments to be used in the constructor.
311
+
312
+ RETURN JSON OUTPUTS ONLY.
313
+
314
+ Here is the overall objective you need to refactor the tasks for: {objective}.
315
+ Here is the task list you need to improve: {task_list_json}
316
+
317
+ RETURN THE SAME TASK LIST but with the description improved to contain the details you is adding for each task in the list. DO NOT MAKE OTHER MODIFICATIONS TO THE LIST. Your input should go in the 'description' field of each task.
318
+
319
+ RETURN JSON ONLY:"""
320
+ return openai_call(prompt, temperature=0.7, max_tokens=2000)
321
+
322
+ def code_tasks_context_agent(objective: str, task_list_json):
323
+ prompt = f"""You are an AGI agent responsible for improving a list of tasks in JSON format and adding ALL the necessary context to it. These tasks will be executed individually by agents that have no idea about other tasks or what code exists in the codebase. It is FUNDAMENTAL that each task has enough context so that an individual isolated agent can execute. The metadata of the task is the only information the agents will have.
324
+
325
+ Look at all tasks at once, and add the necessary context to each task so that it can be executed by an agent without seeing the other tasks. Remember, one agent can only see one task and has no idea about what happened in other tasks. CONTEXT IS CRUCIAL. For example, if one task creates one folder and the other tasks creates a file in that folder. The second tasks should contain the name of the folder that already exists and the information that it already exists.
326
+
327
+ This is even more important for tasks that require importing functions, classes, etc. If a task needs to call a function or initialize a Class, it needs to have the detailed arguments, etc.
328
+
329
+ Note that you should identify when imports need to happen and specify this in the context. Also, you should identify when functions/classes/etc already exist and specify this very clearly because the agents sometimes duplicate things not knowing.
330
+
331
+ Always use imports with the file name. For example, 'from my_script import MyScript'.
332
+
333
+ RETURN JSON OUTPUTS ONLY.
334
+
335
+ Here is the overall objective you need to refactor the tasks for: {objective}.
336
+ Here is the task list you need to improve: {task_list_json}
337
+
338
+ RETURN THE SAME TASK LIST but with a new field called 'isolated_context' for each task in the list. This field should be a string with the context you are adding. DO NOT MAKE OTHER MODIFICATIONS TO THE LIST.
339
+
340
+ RETURN JSON ONLY:"""
341
+ return openai_call(prompt, temperature=0.7, max_tokens=2000)
342
+
343
+ def task_assigner_recommendation_agent(objective: str, task: str):
344
+ prompt = f"""You are an AGI agent responsible for providing recommendations on which agent should be used to handle a specific task. Analyze the provided major objective of the project and a single task from the JSON checklist generated by the previous agent, and suggest the most appropriate agent to work on the task.
345
+
346
+ The overall objective is: {objective}
347
+ The current task is: {task}
348
+
349
+ The available agents are:
350
+ 1. code_writer_agent: Responsible for writing code based on the task description.
351
+ 2. code_refactor_agent: Responsible for editing existing code.
352
+ 3. command_executor_agent: Responsible for executing commands and handling file operations, such as creating, moving, or deleting files.
353
+
354
+ When analyzing the task, consider the following tips:
355
+ - Pay attention to keywords in the task description that indicate the type of action required, such as "write", "edit", "run", "create", "move", or "delete".
356
+ - Keep the overall objective in mind, as it can help you understand the context of the task and guide your choice of agent.
357
+ - If the task involves writing new code or adding new functionality, consider using the code_writer_agent.
358
+ - If the task involves modifying or optimizing existing code, consider using the code_refactor_agent.
359
+ - If the task involves file operations, command execution, or running a script, consider using the command_executor_agent.
360
+
361
+ Based on the task and overall objective, suggest the most appropriate agent to work on the task."""
362
+ return openai_call(prompt, temperature=0.5, max_tokens=2000)
363
+
364
+ def task_assigner_agent(objective: str, task: str, recommendation: str):
365
+ prompt = f"""You are an AGI agent responsible for choosing the best agent to work on a given task. Your goal is to analyze the provided major objective of the project and a single task from the JSON checklist generated by the previous agent, and choose the best agent to work on the task.
366
+
367
+ The overall objective is: {objective}
368
+ The current task is: {task}
369
+
370
+ Use this recommendation to guide you: {recommendation}
371
+
372
+ The available agents are:
373
+ 1. code_writer_agent: Responsible for writing code based on the task description.
374
+ 2. code_refactor_agent: Responsible for editing existing code.
375
+ 2. command_executor_agent: Responsible for executing commands and handling file operations, such as creating, moving, or deleting files.
376
+
377
+ Please consider the task description and the overall objective when choosing the most appropriate agent. Keep in mind that creating a file and writing code are different tasks. If the task involves creating a file, like "calculator.py" but does not mention writing any code inside it, the command_executor_agent should be used for this purpose. The code_writer_agent should only be used when the task requires writing or adding code to a file. The code_refactor_agent should only be used when the task requires modifying existing code.
378
+
379
+ TLDR: To create files, use command_executor_agent, to write text/code to files, use code_writer_agent, to modify existing code, use code_refactor_agent.
380
+
381
+ Choose the most appropriate agent to work on the task and return a JSON output with the following format: {{"agent": "agent_name"}}. ONLY return JSON output:"""
382
+ return openai_call(prompt, temperature=0, max_tokens=2000)
383
+
384
+ def command_executor_agent(task: str, file_path: str):
385
+ prompt = f"""You are an AGI agent responsible for executing a given command on the {os_version} OS. Your goal is to analyze the provided major objective of the project and a single task from the JSON checklist generated by the previous agent, and execute the command on the {os_version} OS.
386
+
387
+ The current task is: {task}
388
+ File or folder name referenced in the task (relative file path): {file_path}
389
+
390
+ Based on the task, write the appropriate command to execute on the {os_version} OS. Make sure the command is relevant to the task and objective. For example, if the task is to create a new folder, the command should be 'mkdir new_folder_name'. Return the command as a JSON output with the following format: {{"command": "command_to_execute"}}. ONLY return JSON output:"""
391
+ return openai_call(prompt, temperature=0, max_tokens=2000)
392
+
393
+ def code_writer_agent(task: str, isolated_context: str, context_code_chunks):
394
+ prompt = f"""You are an AGI agent responsible for writing code to accomplish a given task. Your goal is to analyze the provided major objective of the project and a single task from the JSON checklist generated by the previous agent, and write the necessary code to complete the task.
395
+
396
+ The current task is: {task}
397
+
398
+ To help you make the code useful in this codebase, use this context as reference of the other pieces of the codebase that are relevant to your task. PAY ATTENTION TO THIS: {isolated_context}
399
+
400
+ The following code chunks were found to be relevant to the task. You can use them as reference to write the code if they are useful. PAY CLOSE ATTENTION TO THIS:
401
+ {context_code_chunks}
402
+
403
+ Note: Always use 'encoding='utf-8'' when opening files with open().
404
+
405
+ Based on the task and objective, write the appropriate code to achieve the task. Make sure the code is relevant to the task and objective, and follows best practices. Return the code as a plain text output and NOTHING ELSE. Use identation and line breaks in the in the code. Make sure to only write the code and nothing else as your output will be saved directly to the file by other agent. IMPORTANT" If the task is asking you to write code to write files, this is a mistake! Interpret it and either do nothing or return the plain code, not a code to write file, not a code to write code, etc."""
406
+ return openai_call(prompt, temperature=0, max_tokens=2000)
407
+
408
+ def code_refactor_agent(task_description: str, existing_code_snippet: str, context_chunks, isolated_context: str):
409
+
410
+ prompt = f"""You are an AGI agent responsible for refactoring code to accomplish a given task. Your goal is to analyze the provided major objective of the project, the task descriptionm and refactor the code accordingly.
411
+
412
+ The current task description is: {task_description}
413
+ To help you make the code useful in this codebase, use this context as reference of the other pieces of the codebase that are relevant to your task: {isolated_context}
414
+
415
+ Here are some context chunks that might be relevant to the task:
416
+ {context_chunks}
417
+
418
+ Existing code you should refactor:
419
+ {existing_code_snippet}
420
+
421
+ Based on the task description, objective, refactor the existing code to achieve the task. Make sure the refactored code is relevant to the task and objective, follows best practices, etc.
422
+
423
+ Return a plain text code snippet with your refactored code. IMPORTANT: JUST RETURN CODE, YOUR OUTPUT WILL BE ADDED DIRECTLY TO THE FILE BY OTHER AGENT. BE MINDFUL OF THIS:"""
424
+
425
+ return openai_call(prompt, temperature=0, max_tokens=2000)
426
+
427
+ def file_management_agent(objective: str, task: str, current_directory_files: str, file_path: str):
428
+ prompt = f"""You are an AGI agent responsible for managing files in a software project. Your goal is to analyze the provided major objective of the project and a single task from the JSON checklist generated by the previous agent, and determine the appropriate file path and name for the generated code.
429
+
430
+ The overall objective is: {objective}
431
+ The current task is: {task}
432
+ Specified file path (relative path from the current dir): {file_path}
433
+
434
+ Make the file path adapted for the current directory files. The current directory files are: {current_directory_files}. Assume this file_path will be interpreted from the root path of the directory.
435
+
436
+ Do not use '.' or './' in the file path.
437
+
438
+ BE VERY SPECIFIC WITH THE FILES, AVOID FILE DUPLICATION, AVOID SPECIFYING THE SAME FILE NAME UNDER DIFFERENT FOLDERS, ETC.
439
+
440
+ Based on the task, determine the file path and name for the generated code. Return the file path and name as a JSON output with the following format: {{"file_path": "file_path_and_name"}}. ONLY return JSON output:"""
441
+ return openai_call(prompt, temperature=0, max_tokens=2000)
442
+
443
+ def code_relevance_agent(objective: str, task_description: str, code_chunk: str):
444
+ prompt = f"""You are an AGI agent responsible for evaluating the relevance of a code chunk in relation to a given task. Your goal is to analyze the provided major objective of the project, the task description, and the code chunk, and assign a relevance score from 0 to 10, where 0 is completely irrelevant and 10 is highly relevant.
445
+
446
+ The overall objective is: {objective}
447
+ The current task description is: {task_description}
448
+ The code chunk is as follows (line numbers included):
449
+ {code_chunk}
450
+
451
+ Based on the task description, objective, and code chunk, assign a relevance score between 0 and 10 (inclusive) for the code chunk. DO NOT OUTPUT ANYTHING OTHER THAN THE RELEVANCE SCORE AS A NUMBER."""
452
+
453
+ relevance_score = openai_call(prompt, temperature=0.5, max_tokens=50)
454
+
455
+ return json.dumps({"relevance_score": relevance_score.strip()})
456
+
457
+ def task_human_input_agent(task: str, human_feedback: str):
458
+ prompt = f"""You are an AGI agent responsible for getting human input to improve the quality of tasks in a software project. Your goal is to analyze the provided task and adapt it based on the human's suggestions. The tasks should start with either 'Run a command to...', 'Write code to...', or 'Edit existing code to...' depending on the agent that will execute the task.
459
+
460
+ For context, this task will be executed by other AGI agents with the following characteristics:
461
+ - code_writer_agent: Writes code snippets or functions and saves them to the appropriate files. This agent can also append code to existing files if required.
462
+ - code_refactor_agent: Responsible for modifying and refactoring existing code to meet the requirements of the task.
463
+ - command_executor_agent: Executes terminal commands for tasks such as creating directories, installing dependencies, etc.
464
+
465
+ The current task is:
466
+ {task}
467
+
468
+ The human feedback is:
469
+ {human_feedback}
470
+
471
+ If the human feedback is empty, return the task as is. If the human feedback is saying to ignore the task, return the following string: <IGNORE_TASK>
472
+
473
+ Note that your output will replace the existing task, so make sure that your output is a valid task that starts with one of the required phrases ('Run a command to...', 'Write code to...', 'Edit existing code to...').
474
+
475
+ Please adjust the task based on the human feedback while ensuring it starts with one of the required phrases ('Run a command to...', 'Write code to...', 'Edit existing code to...'). Return the improved task as a plain text output and nothing else. Write only the new task."""
476
+
477
+ return openai_call(prompt, temperature=0.3, max_tokens=200)
478
+
479
+ ## END OF AGENTS ##
480
+
481
+ print_colored_text(f"****Objective****", color='green')
482
+ print_char_by_char(OBJECTIVE, 0.00001, 10)
483
+
484
+ # Create the tasks
485
+ print_colored_text("*****Working on tasks*****", "red")
486
+ print_colored_text(" - Creating initial tasks", "yellow")
487
+ task_agent_output = code_tasks_initializer_agent(OBJECTIVE)
488
+ print_colored_text(" - Reviewing and refactoring tasks to fit agents", "yellow")
489
+ task_agent_output = code_tasks_refactor_agent(OBJECTIVE, task_agent_output)
490
+ print_colored_text(" - Adding relevant technical details to the tasks", "yellow")
491
+ task_agent_output = code_tasks_details_agent(OBJECTIVE, task_agent_output)
492
+ print_colored_text(" - Adding necessary context to the tasks", "yellow")
493
+ task_agent_output = code_tasks_context_agent(OBJECTIVE, task_agent_output)
494
+ print()
495
+
496
+ print_colored_text("*****TASKS*****", "green")
497
+ print_char_by_char(task_agent_output, 0.00000001, 10)
498
+
499
+ # Task list
500
+ task_json = json.loads(task_agent_output)
501
+
502
+ embeddings = Embeddings(current_directory)
503
+
504
+ for task in task_json["tasks"]:
505
+ task_description = task["description"]
506
+ task_isolated_context = task["isolated_context"]
507
+
508
+ print_colored_text("*****TASK*****", "yellow")
509
+ print_char_by_char(task_description)
510
+ print_colored_text("*****TASK CONTEXT*****", "yellow")
511
+ print_char_by_char(task_isolated_context)
512
+
513
+ # HUMAN FEEDBACK
514
+ # Uncomment below to enable human feedback before each task. This can be used to improve the quality of the tasks,
515
+ # skip tasks, etc. I believe it may be very relevant in future versions that may have more complex tasks and could
516
+ # allow a ton of automation when working on large projects.
517
+ #
518
+ # Get user input as a feedback to the task_description
519
+ # print_colored_text("*****TASK FEEDBACK*****", "yellow")
520
+ # user_input = input("\n>:")
521
+ # task_description = task_human_input_agent(task_description, user_input)
522
+ # if task_description == "<IGNORE_TASK>":
523
+ # continue
524
+ # print_colored_text("*****IMPROVED TASK*****", "green")
525
+ # print_char_by_char(task_description)
526
+
527
+ # Assign the task to an agent
528
+ task_assigner_recommendation = task_assigner_recommendation_agent(OBJECTIVE, task_description)
529
+ task_agent_output = task_assigner_agent(OBJECTIVE, task_description, task_assigner_recommendation)
530
+
531
+ print_colored_text("*****ASSIGN*****", "yellow")
532
+ print_char_by_char(task_agent_output)
533
+
534
+ chosen_agent = json.loads(task_agent_output)["agent"]
535
+
536
+ if chosen_agent == "command_executor_agent":
537
+ command_executor_output = command_executor_agent(task_description, task["file_path"])
538
+ print_colored_text("*****COMMAND*****", "green")
539
+ print_char_by_char(command_executor_output)
540
+
541
+ command_execution_output = execute_command_json(command_executor_output)
542
+ else:
543
+ # CODE AGENTS
544
+ if chosen_agent == "code_writer_agent":
545
+ # Compute embeddings for the codebase
546
+ # This will recompute embeddings for all files in the 'playground' directory
547
+ print_colored_text("*****RETRIEVING RELEVANT CODE CONTEXT*****", "yellow")
548
+ embeddings.compute_repository_embeddings()
549
+ relevant_chunks = embeddings.get_relevant_code_chunks(task_description, task_isolated_context)
550
+
551
+ current_directory_files = execute_command_string("ls")
552
+ file_management_output = file_management_agent(OBJECTIVE, task_description, current_directory_files, task["file_path"])
553
+ print_colored_text("*****FILE MANAGEMENT*****", "yellow")
554
+ print_char_by_char(file_management_output)
555
+ file_path = json.loads(file_management_output)["file_path"]
556
+
557
+ code_writer_output = code_writer_agent(task_description, task_isolated_context, relevant_chunks)
558
+
559
+ print_colored_text("*****CODE*****", "green")
560
+ print_char_by_char(code_writer_output)
561
+
562
+ # Save the generated code to the file the agent selected
563
+ save_code_to_file(code_writer_output, file_path)
564
+
565
+ elif chosen_agent == "code_refactor_agent":
566
+ # The code refactor agent works with multiple agents:
567
+ # For each task, the file_management_agent is used to select the file to edit.Then, the
568
+ # code_relevance_agent is used to select the relevant code chunks from that filewith the
569
+ # goal of finding the code chunk that is most relevant to the task description. This is
570
+ # the code chunk that will be edited. Finally, the code_refactor_agent is used to edit
571
+ # the code chunk.
572
+
573
+ current_directory_files = execute_command_string("ls")
574
+ file_management_output = file_management_agent(OBJECTIVE, task_description, current_directory_files, task["file_path"])
575
+ file_path = json.loads(file_management_output)["file_path"]
576
+
577
+ print_colored_text("*****FILE MANAGEMENT*****", "yellow")
578
+ print_char_by_char(file_management_output)
579
+
580
+ # Split the code into chunks and get the relevance scores for each chunk
581
+ code_chunks = split_code_into_chunks(file_path, 80)
582
+ print_colored_text("*****ANALYZING EXISTING CODE*****", "yellow")
583
+ relevance_scores = []
584
+ for chunk in code_chunks:
585
+ score = code_relevance_agent(OBJECTIVE, task_description, chunk["code"])
586
+ relevance_scores.append(score)
587
+
588
+ # Select the most relevant chunk
589
+ selected_chunk = sorted(zip(relevance_scores, code_chunks), key=lambda x: x[0], reverse=True)[0][1]
590
+
591
+ # Refactor the code
592
+ modified_code_output = code_refactor_agent(task_description, selected_chunk, context_chunks=[selected_chunk], isolated_context=task_isolated_context)
593
+
594
+ # Extract the start_line and end_line of the selected chunk. This will be used to replace the code in the original file
595
+ start_line = selected_chunk["start_line"]
596
+ end_line = selected_chunk["end_line"]
597
+
598
+ # Count the number of lines in the modified_code_output
599
+ modified_code_lines = modified_code_output.count("\n") + 1
600
+ # Create a dictionary with the necessary information for the refactor_code function
601
+ modified_code_info = {
602
+ "start_line": start_line,
603
+ "end_line": start_line + modified_code_lines - 1,
604
+ "modified_code": modified_code_output
605
+ }
606
+ print_colored_text("*****REFACTORED CODE*****", "green")
607
+ print_char_by_char(modified_code_output)
608
+
609
+ # Save the refactored code to the file
610
+ refactor_code([modified_code_info], file_path)
babyagi/babycoder/embeddings.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import csv
3
+ import shutil
4
+ import openai
5
+ import pandas as pd
6
+ import numpy as np
7
+ from transformers import GPT2TokenizerFast
8
+ from dotenv import load_dotenv
9
+ import time
10
+
11
+ # Heavily derived from OpenAi's cookbook example
12
+
13
+ load_dotenv()
14
+
15
+ # the dir is the ./playground directory
16
+ REPOSITORY_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), "playground")
17
+
18
+ class Embeddings:
19
+ def __init__(self, workspace_path: str):
20
+ self.workspace_path = workspace_path
21
+ openai.api_key = os.getenv("OPENAI_API_KEY", "")
22
+
23
+ self.DOC_EMBEDDINGS_MODEL = f"text-embedding-ada-002"
24
+ self.QUERY_EMBEDDINGS_MODEL = f"text-embedding-ada-002"
25
+
26
+ self.SEPARATOR = "\n* "
27
+
28
+ self.tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
29
+ self.separator_len = len(self.tokenizer.tokenize(self.SEPARATOR))
30
+
31
+ def compute_repository_embeddings(self):
32
+ try:
33
+ playground_data_path = os.path.join(self.workspace_path, 'playground_data')
34
+
35
+ # Delete the contents of the playground_data directory but not the directory itself
36
+ # This is to ensure that we don't have any old data lying around
37
+ for filename in os.listdir(playground_data_path):
38
+ file_path = os.path.join(playground_data_path, filename)
39
+
40
+ try:
41
+ if os.path.isfile(file_path) or os.path.islink(file_path):
42
+ os.unlink(file_path)
43
+ elif os.path.isdir(file_path):
44
+ shutil.rmtree(file_path)
45
+ except Exception as e:
46
+ print(f"Failed to delete {file_path}. Reason: {str(e)}")
47
+ except Exception as e:
48
+ print(f"Error: {str(e)}")
49
+
50
+ # extract and save info to csv
51
+ info = self.extract_info(REPOSITORY_PATH)
52
+ self.save_info_to_csv(info)
53
+
54
+ df = pd.read_csv(os.path.join(self.workspace_path, 'playground_data\\repository_info.csv'))
55
+ df = df.set_index(["filePath", "lineCoverage"])
56
+ self.df = df
57
+ context_embeddings = self.compute_doc_embeddings(df)
58
+ self.save_doc_embeddings_to_csv(context_embeddings, df, os.path.join(self.workspace_path, 'playground_data\\doc_embeddings.csv'))
59
+
60
+ try:
61
+ self.document_embeddings = self.load_embeddings(os.path.join(self.workspace_path, 'playground_data\\doc_embeddings.csv'))
62
+ except:
63
+ pass
64
+
65
+ # Extract information from files in the repository in chunks
66
+ # Return a list of [filePath, lineCoverage, chunkContent]
67
+ def extract_info(self, REPOSITORY_PATH):
68
+ # Initialize an empty list to store the information
69
+ info = []
70
+
71
+ LINES_PER_CHUNK = 60
72
+
73
+ # Iterate through the files in the repository
74
+ for root, dirs, files in os.walk(REPOSITORY_PATH):
75
+ for file in files:
76
+ file_path = os.path.join(root, file)
77
+
78
+ # Read the contents of the file
79
+ with open(file_path, "r", encoding="utf-8") as f:
80
+ try:
81
+ contents = f.read()
82
+ except:
83
+ continue
84
+
85
+ # Split the contents into lines
86
+ lines = contents.split("\n")
87
+ # Ignore empty lines
88
+ lines = [line for line in lines if line.strip()]
89
+ # Split the lines into chunks of LINES_PER_CHUNK lines
90
+ chunks = [
91
+ lines[i:i+LINES_PER_CHUNK]
92
+ for i in range(0, len(lines), LINES_PER_CHUNK)
93
+ ]
94
+ # Iterate through the chunks
95
+ for i, chunk in enumerate(chunks):
96
+ # Join the lines in the chunk back into a single string
97
+ chunk = "\n".join(chunk)
98
+ # Get the first and last line numbers
99
+ first_line = i * LINES_PER_CHUNK + 1
100
+ last_line = first_line + len(chunk.split("\n")) - 1
101
+ line_coverage = (first_line, last_line)
102
+ # Add the file path, line coverage, and content to the list
103
+ info.append((os.path.join(root, file), line_coverage, chunk))
104
+
105
+ # Return the list of information
106
+ return info
107
+
108
+ def save_info_to_csv(self, info):
109
+ # Open a CSV file for writing
110
+ os.makedirs(os.path.join(self.workspace_path, "playground_data"), exist_ok=True)
111
+ with open(os.path.join(self.workspace_path, 'playground_data\\repository_info.csv'), "w", newline="") as csvfile:
112
+ # Create a CSV writer
113
+ writer = csv.writer(csvfile)
114
+ # Write the header row
115
+ writer.writerow(["filePath", "lineCoverage", "content"])
116
+ # Iterate through the info
117
+ for file_path, line_coverage, content in info:
118
+ # Write a row for each chunk of data
119
+ writer.writerow([file_path, line_coverage, content])
120
+
121
+ def get_relevant_code_chunks(self, task_description: str, task_context: str):
122
+ query = task_description + "\n" + task_context
123
+ most_relevant_document_sections = self.order_document_sections_by_query_similarity(query, self.document_embeddings)
124
+ selected_chunks = []
125
+ for _, section_index in most_relevant_document_sections:
126
+ try:
127
+ document_section = self.df.loc[section_index]
128
+ selected_chunks.append(self.SEPARATOR + document_section['content'].replace("\n", " "))
129
+ if len(selected_chunks) >= 2:
130
+ break
131
+ except:
132
+ pass
133
+
134
+ return selected_chunks
135
+
136
+ def get_embedding(self, text: str, model: str) -> list[float]:
137
+ result = openai.Embedding.create(
138
+ model=model,
139
+ input=text
140
+ )
141
+ return result["data"][0]["embedding"]
142
+
143
+ def get_doc_embedding(self, text: str) -> list[float]:
144
+ return self.get_embedding(text, self.DOC_EMBEDDINGS_MODEL)
145
+
146
+ def get_query_embedding(self, text: str) -> list[float]:
147
+ return self.get_embedding(text, self.QUERY_EMBEDDINGS_MODEL)
148
+
149
+ def compute_doc_embeddings(self, df: pd.DataFrame) -> dict[tuple[str, str], list[float]]:
150
+ """
151
+ Create an embedding for each row in the dataframe using the OpenAI Embeddings API.
152
+
153
+ Return a dictionary that maps between each embedding vector and the index of the row that it corresponds to.
154
+ """
155
+ embeddings = {}
156
+ for idx, r in df.iterrows():
157
+ # Wait one second before making the next call to the OpenAI Embeddings API
158
+ # print("Waiting one second before embedding next row\n")
159
+ time.sleep(1)
160
+ embeddings[idx] = self.get_doc_embedding(r.content.replace("\n", " "))
161
+ return embeddings
162
+
163
+ def save_doc_embeddings_to_csv(self, doc_embeddings: dict, df: pd.DataFrame, csv_filepath: str):
164
+ # Get the dimensionality of the embedding vectors from the first element in the doc_embeddings dictionary
165
+ if len(doc_embeddings) == 0:
166
+ return
167
+
168
+ EMBEDDING_DIM = len(list(doc_embeddings.values())[0])
169
+
170
+ # Create a new dataframe with the filePath, lineCoverage, and embedding vector columns
171
+ embeddings_df = pd.DataFrame(columns=["filePath", "lineCoverage"] + [f"{i}" for i in range(EMBEDDING_DIM)])
172
+
173
+ # Iterate over the rows in the original dataframe
174
+ for idx, _ in df.iterrows():
175
+ # Get the embedding vector for the current row
176
+ embedding = doc_embeddings[idx]
177
+ # Create a new row in the embeddings dataframe with the filePath, lineCoverage, and embedding vector values
178
+ row = [idx[0], idx[1]] + embedding
179
+ embeddings_df.loc[len(embeddings_df)] = row
180
+
181
+ # Save the embeddings dataframe to a CSV file
182
+ embeddings_df.to_csv(csv_filepath, index=False)
183
+
184
+ def vector_similarity(self, x: list[float], y: list[float]) -> float:
185
+ return np.dot(np.array(x), np.array(y))
186
+
187
+ def order_document_sections_by_query_similarity(self, query: str, contexts: dict[(str, str), np.array]) -> list[(float, (str, str))]:
188
+ """
189
+ Find the query embedding for the supplied query, and compare it against all of the pre-calculated document embeddings
190
+ to find the most relevant sections.
191
+
192
+ Return the list of document sections, sorted by relevance in descending order.
193
+ """
194
+ query_embedding = self.get_query_embedding(query)
195
+
196
+ document_similarities = sorted([
197
+ (self.vector_similarity(query_embedding, doc_embedding), doc_index) for doc_index, doc_embedding in contexts.items()
198
+ ], reverse=True)
199
+
200
+ return document_similarities
201
+
202
+ def load_embeddings(self, fname: str) -> dict[tuple[str, str], list[float]]:
203
+ df = pd.read_csv(fname, header=0)
204
+ max_dim = max([int(c) for c in df.columns if c != "filePath" and c != "lineCoverage"])
205
+ return {
206
+ (r.filePath, r.lineCoverage): [r[str(i)] for i in range(max_dim + 1)] for _, r in df.iterrows()
207
+ }
babyagi/babycoder/objective.sample.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ Create a Python program that consists of a single class named 'TemperatureConverter' in a file named 'temperature_converter.py'. The class should have the following methods:
2
+
3
+ - celsius_to_fahrenheit(self, celsius: float) -> float: Converts Celsius temperature to Fahrenheit.
4
+ - fahrenheit_to_celsius(self, fahrenheit: float) -> float: Converts Fahrenheit temperature to Celsius.
5
+
6
+ Create a separate 'main.py' file that imports the 'TemperatureConverter' class, takes user input for the temperature value and the unit, converts the temperature to the other unit, and then prints the result.
babyagi/classic/ABOUT.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # BabyAgi Classic
2
+
3
+ This folder contains the classic version of BabyAGI as a single script. You can use this as a starting point for your own projects built on the original BabyAGI reasoning engine, if the mainline version is too complex for your needs.
babyagi/classic/BabyBeeAGI.py ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###### This is a modified version of OG BabyAGI, called BabyBeeAGI (future modifications will follow the pattern "Baby<animal>AGI"). This version requires GPT-4, it's very slow, and often errors out.######
2
+ ######IMPORTANT NOTE: I'm sharing this as a framework to build on top of (with lots of errors for improvement), to facilitate discussion around how to improve these. This is NOT for people who are looking for a complete solution that's ready to use. ######
3
+
4
+ import openai
5
+ import pinecone
6
+ import time
7
+ import requests
8
+ from bs4 import BeautifulSoup
9
+ from collections import deque
10
+ from typing import Dict, List
11
+ import re
12
+ import ast
13
+ import json
14
+ from serpapi import GoogleSearch
15
+
16
+ ### SET THESE 4 VARIABLES ##############################
17
+
18
+ # Add your API keys here
19
+ OPENAI_API_KEY = ""
20
+ SERPAPI_API_KEY = "" #If you include SERPAPI KEY, this will enable web-search. If you don't, it will automatically remove web-search capability.
21
+
22
+ # Set variables
23
+ OBJECTIVE = "You are an AI. Make the world a better place."
24
+ YOUR_FIRST_TASK = "Develop a task list."
25
+
26
+ ### UP TO HERE ##############################
27
+
28
+ # Configure OpenAI and SerpAPI client
29
+ openai.api_key = OPENAI_API_KEY
30
+ if SERPAPI_API_KEY:
31
+ serpapi_client = GoogleSearch({"api_key": SERPAPI_API_KEY})
32
+ websearch_var = "[web-search] "
33
+ else:
34
+ websearch_var = ""
35
+
36
+ # Initialize task list
37
+ task_list = []
38
+
39
+ # Initialize session_summary
40
+ session_summary = ""
41
+
42
+ ### Task list functions ##############################
43
+ def add_task(task: Dict):
44
+ task_list.append(task)
45
+
46
+ def get_task_by_id(task_id: int):
47
+ for task in task_list:
48
+ if task["id"] == task_id:
49
+ return task
50
+ return None
51
+
52
+ def get_completed_tasks():
53
+ return [task for task in task_list if task["status"] == "complete"]
54
+
55
+ ### Tool functions ##############################
56
+ def text_completion_tool(prompt: str):
57
+ response = openai.Completion.create(
58
+ engine="text-davinci-003",
59
+ prompt=prompt,
60
+ temperature=0.5,
61
+ max_tokens=1500,
62
+ top_p=1,
63
+ frequency_penalty=0,
64
+ presence_penalty=0
65
+ )
66
+ return response.choices[0].text.strip()
67
+
68
+ def web_search_tool(query: str):
69
+ search_params = {
70
+ "engine": "google",
71
+ "q": query,
72
+ "api_key": SERPAPI_API_KEY,
73
+ "num":3
74
+ }
75
+ search_results = GoogleSearch(search_params)
76
+ results = search_results.get_dict()
77
+
78
+ return str(results["organic_results"])
79
+
80
+ def web_scrape_tool(url: str):
81
+ response = requests.get(url)
82
+ print(response)
83
+ soup = BeautifulSoup(response.content, "html.parser")
84
+ result = soup.get_text(strip=True)+"URLs: "
85
+ for link in soup.findAll('a', attrs={'href': re.compile("^https://")}):
86
+ result+= link.get('href')+", "
87
+ return result
88
+
89
+ ### Agent functions ##############################
90
+ def execute_task(task, task_list, OBJECTIVE):
91
+ global task_id_counter
92
+ # Check if dependent_task_id is complete
93
+ if task["dependent_task_id"]:
94
+ dependent_task = get_task_by_id(task["dependent_task_id"])
95
+ if not dependent_task or dependent_task["status"] != "complete":
96
+ return
97
+
98
+ # Execute task
99
+
100
+ print("\033[92m\033[1m"+"\n*****NEXT TASK*****\n"+"\033[0m\033[0m")
101
+ print(str(task['id'])+": "+str(task['task'])+" ["+str(task['tool']+"]"))
102
+ task_prompt = f"Complete your assigned task based on the objective: {OBJECTIVE}. Your task: {task['task']}"
103
+ if task["dependent_task_id"]:
104
+ dependent_task_result = dependent_task["result"]
105
+ task_prompt += f"\nThe previous task ({dependent_task['id']}. {dependent_task['task']}) result: {dependent_task_result}"
106
+
107
+ task_prompt += "\nResponse:"
108
+ ##print("###task_prompt: "+task_prompt)
109
+ if task["tool"] == "text-completion":
110
+ result = text_completion_tool(task_prompt)
111
+ elif task["tool"] == "web-search":
112
+ result = web_search_tool(task_prompt)
113
+ elif task["tool"] == "web-scrape":
114
+ result = web_scrape_tool(str(task['task']))
115
+ else:
116
+ result = "Unknown tool"
117
+
118
+
119
+ print("\033[93m\033[1m"+"\n*****TASK RESULT*****\n"+"\033[0m\033[0m")
120
+ print_result = result[0:2000]
121
+ if result != result[0:2000]:
122
+ print(print_result+"...")
123
+ else:
124
+ print(result)
125
+ # Update task status and result
126
+ task["status"] = "complete"
127
+ task["result"] = result
128
+ task["result_summary"] = summarizer_agent(result)
129
+
130
+ # Update session_summary
131
+ session_summary = overview_agent(task["id"])
132
+
133
+ # Increment task_id_counter
134
+ task_id_counter += 1
135
+
136
+ # Update task_manager_agent of tasks
137
+ task_manager_agent(
138
+ OBJECTIVE,
139
+ result,
140
+ task["task"],
141
+ [t["task"] for t in task_list if t["status"] == "incomplete"],
142
+ task["id"]
143
+ )
144
+
145
+
146
+ def task_manager_agent(objective: str, result: str, task_description: str, incomplete_tasks: List[str], current_task_id : int) -> List[Dict]:
147
+ global task_list
148
+ original_task_list = task_list.copy()
149
+ minified_task_list = [{k: v for k, v in task.items() if k != "result"} for task in task_list]
150
+ result = result[0:4000] #come up with better solution later.
151
+
152
+ prompt = (
153
+ f"You are a task management AI tasked with cleaning the formatting of and reprioritizing the following tasks: {minified_task_list}. "
154
+ f"Consider the ultimate objective of your team: {OBJECTIVE}. "
155
+ f"Do not remove any tasks. Return the result as a JSON-formatted list of dictionaries.\n"
156
+ f"Create new tasks based on the result of last task if necessary for the objective. Limit tasks types to those that can be completed with the available tools listed below. Task description should be detailed."
157
+ f"The maximum task list length is 7. Do not add an 8th task."
158
+ f"The last completed task has the following result: {result}. "
159
+ f"Current tool option is [text-completion] {websearch_var} and [web-scrape] only."# web-search is added automatically if SERPAPI exists
160
+ f"For tasks using [web-scrape], provide only the URL to scrape as the task description. Do not provide placeholder URLs, but use ones provided by a search step or the initial objective."
161
+ #f"If the objective is research related, use at least one [web-search] with the query as the task description, and after, add up to three URLs from the search result as a task with [web-scrape], then use [text-completion] to write a comprehensive summary of each site thas has been scraped.'"
162
+ f"For tasks using [web-search], provide the search query, and only the search query to use (eg. not 'research waterproof shoes, but 'waterproof shoes')"
163
+ f"dependent_task_id should always be null or a number."
164
+ f"Do not reorder completed tasks. Only reorder and dedupe incomplete tasks.\n"
165
+ f"Make sure all task IDs are in chronological order.\n"
166
+ f"Do not provide example URLs for [web-scrape].\n"
167
+ f"Do not include the result from the last task in the JSON, that will be added after..\n"
168
+ f"The last step is always to provide a final summary report of all tasks.\n"
169
+ f"An example of the desired output format is: "
170
+ "[{\"id\": 1, \"task\": \"https://untapped.vc\", \"tool\": \"web-scrape\", \"dependent_task_id\": null, \"status\": \"incomplete\", \"result\": null, \"result_summary\": null}, {\"id\": 2, \"task\": \"Analyze the contents of...\", \"tool\": \"text-completion\", \"dependent_task_id\": 1, \"status\": \"incomplete\", \"result\": null, \"result_summary\": null}, {\"id\": 3, \"task\": \"Untapped Capital\", \"tool\": \"web-search\", \"dependent_task_id\": null, \"status\": \"incomplete\", \"result\": null, \"result_summary\": null}]."
171
+ )
172
+ print("\033[90m\033[3m" + "\nRunning task manager agent...\n" + "\033[0m")
173
+ response = openai.ChatCompletion.create(
174
+ model="gpt-4",
175
+ messages=[
176
+ {
177
+ "role": "system",
178
+ "content": "You are a task manager AI."
179
+ },
180
+ {
181
+ "role": "user",
182
+ "content": prompt
183
+ }
184
+ ],
185
+ temperature=0.2,
186
+ max_tokens=1500,
187
+ top_p=1,
188
+ frequency_penalty=0,
189
+ presence_penalty=0
190
+ )
191
+
192
+ # Extract the content of the assistant's response and parse it as JSON
193
+ result = response["choices"][0]["message"]["content"]
194
+ print("\033[90m\033[3m" + "\nDone!\n" + "\033[0m")
195
+ try:
196
+ task_list = json.loads(result)
197
+ except Exception as error:
198
+ print(error)
199
+ # Add the 'result' field back in
200
+ for updated_task, original_task in zip(task_list, original_task_list):
201
+ if "result" in original_task:
202
+ updated_task["result"] = original_task["result"]
203
+ task_list[current_task_id]["result"]=result
204
+ #print(task_list)
205
+ return task_list
206
+
207
+
208
+
209
+ def summarizer_agent(text: str) -> str:
210
+ text = text[0:4000]
211
+ prompt = f"Please summarize the following text:\n{text}\nSummary:"
212
+ response = openai.Completion.create(
213
+ engine="text-davinci-003",
214
+ prompt=prompt,
215
+ temperature=0.5,
216
+ max_tokens=100,
217
+ top_p=1,
218
+ frequency_penalty=0,
219
+ presence_penalty=0
220
+ )
221
+ return response.choices[0].text.strip()
222
+
223
+
224
+ def overview_agent(last_task_id: int) -> str:
225
+ global session_summary
226
+
227
+ completed_tasks = get_completed_tasks()
228
+ completed_tasks_text = "\n".join(
229
+ [f"{task['id']}. {task['task']} - {task['result_summary']}" for task in completed_tasks]
230
+ )
231
+
232
+ prompt = f"Here is the current session summary:\n{session_summary}\nThe last completed task is task {last_task_id}. Please update the session summary with the information of the last task:\n{completed_tasks_text}\nUpdated session summary, which should describe all tasks in chronological order:"
233
+ response = openai.Completion.create(
234
+ engine="text-davinci-003",
235
+ prompt=prompt,
236
+ temperature=0.5,
237
+ max_tokens=200,
238
+ top_p=1,
239
+ frequency_penalty=0,
240
+ presence_penalty=0
241
+ )
242
+ session_summary = response.choices[0].text.strip()
243
+ return session_summary
244
+
245
+
246
+ ### Main Loop ##############################
247
+
248
+ # Add the first task
249
+ first_task = {
250
+ "id": 1,
251
+ "task": YOUR_FIRST_TASK,
252
+ "tool": "text-completion",
253
+ "dependent_task_id": None,
254
+ "status": "incomplete",
255
+ "result": "",
256
+ "result_summary": ""
257
+ }
258
+ add_task(first_task)
259
+
260
+ task_id_counter = 0
261
+ #Print OBJECTIVE
262
+ print("\033[96m\033[1m"+"\n*****OBJECTIVE*****\n"+"\033[0m\033[0m")
263
+ print(OBJECTIVE)
264
+
265
+ # Continue the loop while there are incomplete tasks
266
+ while any(task["status"] == "incomplete" for task in task_list):
267
+
268
+ # Filter out incomplete tasks
269
+ incomplete_tasks = [task for task in task_list if task["status"] == "incomplete"]
270
+
271
+ if incomplete_tasks:
272
+ # Sort tasks by ID
273
+ incomplete_tasks.sort(key=lambda x: x["id"])
274
+
275
+ # Pull the first task
276
+ task = incomplete_tasks[0]
277
+
278
+ # Execute task & call task manager from function
279
+ execute_task(task, task_list, OBJECTIVE)
280
+
281
+ # Print task list and session summary
282
+ print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m")
283
+ for t in task_list:
284
+ dependent_task = ""
285
+ if t['dependent_task_id'] is not None:
286
+ dependent_task = f"\033[31m<dependency: #{t['dependent_task_id']}>\033[0m"
287
+ status_color = "\033[32m" if t['status'] == "complete" else "\033[31m"
288
+ print(f"\033[1m{t['id']}\033[0m: {t['task']} {status_color}[{t['status']}]\033[0m \033[93m[{t['tool']}] {dependent_task}\033[0m")
289
+ print("\033[93m\033[1m" + "\n*****SESSION SUMMARY*****\n" + "\033[0m\033[0m")
290
+ print(session_summary)
291
+
292
+ time.sleep(1) # Sleep before checking the task list again
293
+
294
+ ### Objective complete ##############################
295
+
296
+ # Print the full task list if there are no incomplete tasks
297
+ if all(task["status"] != "incomplete" for task in task_list):
298
+ print("\033[92m\033[1m" + "\n*****ALL TASKS COMPLETED*****\n" + "\033[0m\033[0m")
299
+ for task in task_list:
300
+ print(f"ID: {task['id']}, Task: {task['task']}, Result: {task['result']}")
babyagi/classic/BabyCatAGI.py ADDED
@@ -0,0 +1,320 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###### This is a modified version of OG BabyAGI, called BabyCatAGI (future modifications will follow the pattern "Baby<animal>AGI"). This version requires GPT-4, it's very slow, and often errors out.######
2
+ ######IMPORTANT NOTE: I'm sharing this as a framework to build on top of (with lots of errors for improvement), to facilitate discussion around how to improve these. This is NOT for people who are looking for a complete solution that's ready to use. ######
3
+
4
+ import openai
5
+ import time
6
+ import requests
7
+ from bs4 import BeautifulSoup
8
+ from collections import deque
9
+ from typing import Dict, List
10
+ import re
11
+ import ast
12
+ import json
13
+ from serpapi import GoogleSearch
14
+
15
+ ### SET THESE 4 VARIABLES ##############################
16
+
17
+ # Add your API keys here
18
+ OPENAI_API_KEY = ""
19
+ SERPAPI_API_KEY = "" #If you include SERPAPI KEY, this will enable web-search. If you don't, it will autoatically remove web-search capability.
20
+
21
+ # Set variables
22
+ OBJECTIVE = "Research experts at scaling NextJS and their Twitter accounts."
23
+ YOUR_FIRST_TASK = "Develop a task list." #you can provide additional instructions here regarding the task list.
24
+
25
+ ### UP TO HERE ##############################
26
+
27
+ # Configure OpenAI and SerpAPI client
28
+ openai.api_key = OPENAI_API_KEY
29
+ if SERPAPI_API_KEY:
30
+ serpapi_client = GoogleSearch({"api_key": SERPAPI_API_KEY})
31
+ websearch_var = "[web-search] "
32
+ else:
33
+ websearch_var = ""
34
+
35
+ # Initialize task list
36
+ task_list = []
37
+
38
+ # Initialize session_summary
39
+ session_summary = ""
40
+
41
+ ### Task list functions ##############################
42
+ def add_task(task: Dict):
43
+ task_list.append(task)
44
+
45
+ def get_task_by_id(task_id: int):
46
+ for task in task_list:
47
+ if task["id"] == task_id:
48
+ return task
49
+ return None
50
+
51
+ def get_completed_tasks():
52
+ return [task for task in task_list if task["status"] == "complete"]
53
+
54
+
55
+ # Print task list and session summary
56
+ def print_tasklist():
57
+ print("\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m")
58
+ for t in task_list:
59
+ dependent_task = ""
60
+ if t['dependent_task_ids']:
61
+ dependent_task = f"\033[31m<dependencies: {', '.join([f'#{dep_id}' for dep_id in t['dependent_task_ids']])}>\033[0m"
62
+ status_color = "\033[32m" if t['status'] == "complete" else "\033[31m"
63
+ print(f"\033[1m{t['id']}\033[0m: {t['task']} {status_color}[{t['status']}]\033[0m \033[93m[{t['tool']}] {dependent_task}\033[0m")
64
+
65
+ ### Tool functions ##############################
66
+ def text_completion_tool(prompt: str):
67
+ messages = [
68
+ {"role": "user", "content": prompt}
69
+ ]
70
+
71
+ response = openai.ChatCompletion.create(
72
+ model="gpt-3.5-turbo",
73
+ messages=messages,
74
+ temperature=0.2,
75
+ max_tokens=1500,
76
+ top_p=1,
77
+ frequency_penalty=0,
78
+ presence_penalty=0
79
+ )
80
+
81
+ return response.choices[0].message['content'].strip()
82
+
83
+
84
+ def web_search_tool(query: str):
85
+ search_params = {
86
+ "engine": "google",
87
+ "q": query,
88
+ "api_key": SERPAPI_API_KEY,
89
+ "num":5 #edit this up or down for more results, though higher often results in OpenAI rate limits
90
+ }
91
+ search_results = GoogleSearch(search_params)
92
+ search_results = search_results.get_dict()
93
+ try:
94
+ search_results = search_results["organic_results"]
95
+ except:
96
+ search_results = {}
97
+ search_results = simplify_search_results(search_results)
98
+ print("\033[90m\033[3m" + "Completed search. Now scraping results.\n" + "\033[0m")
99
+ results = "";
100
+ # Loop through the search results
101
+ for result in search_results:
102
+ # Extract the URL from the result
103
+ url = result.get('link')
104
+ # Call the web_scrape_tool function with the URL
105
+ print("\033[90m\033[3m" + "Scraping: "+url+"" + "...\033[0m")
106
+ content = web_scrape_tool(url, task)
107
+ print("\033[90m\033[3m" +str(content[0:100])[0:100]+"...\n" + "\033[0m")
108
+ results += str(content)+". "
109
+
110
+
111
+ return results
112
+
113
+
114
+ def simplify_search_results(search_results):
115
+ simplified_results = []
116
+ for result in search_results:
117
+ simplified_result = {
118
+ "position": result.get("position"),
119
+ "title": result.get("title"),
120
+ "link": result.get("link"),
121
+ "snippet": result.get("snippet")
122
+ }
123
+ simplified_results.append(simplified_result)
124
+ return simplified_results
125
+
126
+
127
+ def web_scrape_tool(url: str, task:str):
128
+ content = fetch_url_content(url)
129
+ if content is None:
130
+ return None
131
+
132
+ text = extract_text(content)
133
+ print("\033[90m\033[3m"+"Scrape completed. Length:" +str(len(text))+".Now extracting relevant info..."+"...\033[0m")
134
+ info = extract_relevant_info(OBJECTIVE, text[0:5000], task)
135
+ links = extract_links(content)
136
+
137
+ #result = f"{info} URLs: {', '.join(links)}"
138
+ result = info
139
+
140
+ return result
141
+
142
+ headers = {
143
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36"
144
+ }
145
+
146
+ def fetch_url_content(url: str):
147
+ try:
148
+ response = requests.get(url, headers=headers, timeout=10)
149
+ response.raise_for_status()
150
+ return response.content
151
+ except requests.exceptions.RequestException as e:
152
+ print(f"Error while fetching the URL: {e}")
153
+ return ""
154
+
155
+ def extract_links(content: str):
156
+ soup = BeautifulSoup(content, "html.parser")
157
+ links = [link.get('href') for link in soup.findAll('a', attrs={'href': re.compile("^https?://")})]
158
+ return links
159
+
160
+ def extract_text(content: str):
161
+ soup = BeautifulSoup(content, "html.parser")
162
+ text = soup.get_text(strip=True)
163
+ return text
164
+
165
+
166
+
167
+ def extract_relevant_info(objective, large_string, task):
168
+ chunk_size = 3000
169
+ overlap = 500
170
+ notes = ""
171
+
172
+ for i in range(0, len(large_string), chunk_size - overlap):
173
+ chunk = large_string[i:i + chunk_size]
174
+
175
+ messages = [
176
+ {"role": "system", "content": f"Objective: {objective}\nCurrent Task:{task}"},
177
+ {"role": "user", "content": f"Analyze the following text and extract information relevant to our objective and current task, and only information relevant to our objective and current task. If there is no relevant information do not say that there is no relevant informaiton related to our objective. ### Then, update or start our notes provided here (keep blank if currently blank): {notes}.### Text to analyze: {chunk}.### Updated Notes:"}
178
+ ]
179
+
180
+ response = openai.ChatCompletion.create(
181
+ model="gpt-3.5-turbo",
182
+ messages=messages,
183
+ max_tokens=800,
184
+ n=1,
185
+ stop="###",
186
+ temperature=0.7,
187
+ )
188
+
189
+ notes += response.choices[0].message['content'].strip()+". ";
190
+
191
+ return notes
192
+
193
+ ### Agent functions ##############################
194
+
195
+
196
+ def execute_task(task, task_list, OBJECTIVE):
197
+ global task_id_counter
198
+ # Check if dependent_task_ids is not empty
199
+ if task["dependent_task_ids"]:
200
+ all_dependent_tasks_complete = True
201
+ for dep_id in task["dependent_task_ids"]:
202
+ dependent_task = get_task_by_id(dep_id)
203
+ if not dependent_task or dependent_task["status"] != "complete":
204
+ all_dependent_tasks_complete = False
205
+ break
206
+
207
+
208
+ # Execute task
209
+ print("\033[92m\033[1m"+"\n*****NEXT TASK*****\n"+"\033[0m\033[0m")
210
+ print(str(task['id'])+": "+str(task['task'])+" ["+str(task['tool']+"]"))
211
+ task_prompt = f"Complete your assigned task based on the objective and only based on information provided in the dependent task output, if provided. Your objective: {OBJECTIVE}. Your task: {task['task']}"
212
+ if task["dependent_task_ids"]:
213
+ dependent_tasks_output = ""
214
+ for dep_id in task["dependent_task_ids"]:
215
+ dependent_task_output = get_task_by_id(dep_id)["output"]
216
+ dependent_task_output = dependent_task_output[0:2000]
217
+ dependent_tasks_output += f" {dependent_task_output}"
218
+ task_prompt += f" Your dependent tasks output: {dependent_tasks_output}\n OUTPUT:"
219
+
220
+ # Use tool to complete the task
221
+ if task["tool"] == "text-completion":
222
+ task_output = text_completion_tool(task_prompt)
223
+ elif task["tool"] == "web-search":
224
+ task_output = web_search_tool(str(task['task']))
225
+ elif task["tool"] == "web-scrape":
226
+ task_output = web_scrape_tool(str(task['task']))
227
+
228
+ # Find task index in the task_list
229
+ task_index = next((i for i, t in enumerate(task_list) if t["id"] == task["id"]), None)
230
+
231
+ # Mark task as complete and save output
232
+ task_list[task_index]["status"] = "complete"
233
+ task_list[task_index]["output"] = task_output
234
+
235
+ # Print task output
236
+ print("\033[93m\033[1m"+"\nTask Output:"+"\033[0m\033[0m")
237
+ print(task_output)
238
+
239
+ # Add task output to session_summary
240
+ global session_summary
241
+ session_summary += f"\n\nTask {task['id']} - {task['task']}:\n{task_output}"
242
+
243
+
244
+
245
+ task_list = []
246
+
247
+ def task_creation_agent(objective: str) -> List[Dict]:
248
+ global task_list
249
+ minified_task_list = [{k: v for k, v in task.items() if k != "result"} for task in task_list]
250
+
251
+ prompt = (
252
+ f"You are a task creation AI tasked with creating a list of tasks as a JSON array, considering the ultimate objective of your team: {OBJECTIVE}. "
253
+ f"Create new tasks based on the objective. Limit tasks types to those that can be completed with the available tools listed below. Task description should be detailed."
254
+ f"Current tool option is [text-completion] {websearch_var} and only." # web-search is added automatically if SERPAPI exists
255
+ f"For tasks using [web-search], provide the search query, and only the search query to use (eg. not 'research waterproof shoes, but 'waterproof shoes')"
256
+ f"dependent_task_ids should always be an empty array, or an array of numbers representing the task ID it should pull results from."
257
+ f"Make sure all task IDs are in chronological order.\n"
258
+ f"The last step is always to provide a final summary report including tasks executed and summary of knowledge acquired.\n"
259
+ f"Do not create any summarizing steps outside of the last step..\n"
260
+ f"An example of the desired output format is: "
261
+ "[{\"id\": 1, \"task\": \"https://untapped.vc\", \"tool\": \"web-scrape\", \"dependent_task_ids\": [], \"status\": \"incomplete\", \"result\": null, \"result_summary\": null}, {\"id\": 2, \"task\": \"Consider additional insights that can be reasoned from the results of...\", \"tool\": \"text-completion\", \"dependent_task_ids\": [1], \"status\": \"incomplete\", \"result\": null, \"result_summary\": null}, {\"id\": 3, \"task\": \"Untapped Capital\", \"tool\": \"web-search\", \"dependent_task_ids\": [], \"status\": \"incomplete\", \"result\": null, \"result_summary\": null}].\n"
262
+ f"JSON TASK LIST="
263
+ )
264
+
265
+ print("\033[90m\033[3m" + "\nInitializing...\n" + "\033[0m")
266
+ print("\033[90m\033[3m" + "Analyzing objective...\n" + "\033[0m")
267
+ print("\033[90m\033[3m" + "Running task creation agent...\n" + "\033[0m")
268
+ response = openai.ChatCompletion.create(
269
+ model="gpt-4",
270
+ messages=[
271
+ {
272
+ "role": "system",
273
+ "content": "You are a task creation AI."
274
+ },
275
+ {
276
+ "role": "user",
277
+ "content": prompt
278
+ }
279
+ ],
280
+ temperature=0,
281
+ max_tokens=1500,
282
+ top_p=1,
283
+ frequency_penalty=0,
284
+ presence_penalty=0
285
+ )
286
+
287
+ # Extract the content of the assistant's response and parse it as JSON
288
+ result = response["choices"][0]["message"]["content"]
289
+ print("\033[90m\033[3m" + "\nDone!\n" + "\033[0m")
290
+ try:
291
+ task_list = json.loads(result)
292
+ except Exception as error:
293
+ print(error)
294
+
295
+ return task_list
296
+
297
+ ##### START MAIN LOOP########
298
+
299
+ #Print OBJECTIVE
300
+ print("\033[96m\033[1m"+"\n*****OBJECTIVE*****\n"+"\033[0m\033[0m")
301
+ print(OBJECTIVE)
302
+
303
+ # Initialize task_id_counter
304
+ task_id_counter = 1
305
+
306
+ # Run the task_creation_agent to create initial tasks
307
+ task_list = task_creation_agent(OBJECTIVE)
308
+ print_tasklist()
309
+
310
+ # Execute tasks in order
311
+ while len(task_list) > 0:
312
+ for task in task_list:
313
+ if task["status"] == "incomplete":
314
+ execute_task(task, task_list, OBJECTIVE)
315
+ print_tasklist()
316
+ break
317
+
318
+ # Print session summary
319
+ print("\033[96m\033[1m"+"\n*****SESSION SUMMARY*****\n"+"\033[0m\033[0m")
320
+ print(session_summary)
babyagi/classic/BabyDeerAGI.py ADDED
@@ -0,0 +1,354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###### This is a modified version of OG BabyAGI, called BabyDeerAGI (modifications will follow the pattern "Baby<animal>AGI").######
2
+ ######IMPORTANT NOTE: I'm sharing this as a framework to build on top of (with lots of room for improvement), to facilitate discussion around how to improve these. This is NOT for people who are looking for a complete solution that's ready to use. ######
3
+
4
+ import openai
5
+ import time
6
+ from datetime import datetime
7
+ import requests
8
+ from bs4 import BeautifulSoup
9
+ from collections import deque
10
+ from typing import Dict, List
11
+ import re
12
+ import ast
13
+ import json
14
+ from serpapi import GoogleSearch
15
+ from concurrent.futures import ThreadPoolExecutor
16
+ import time
17
+
18
+ ### SET THESE 4 VARIABLES ##############################
19
+
20
+ # Add your API keys here
21
+ OPENAI_API_KEY = ""
22
+ SERPAPI_API_KEY = "" #[optional] web-search becomes available automatically when serpapi api key is provided
23
+
24
+ # Set variables
25
+ OBJECTIVE = "Research recent AI news and write a poem about your findings in the style of shakespeare."
26
+
27
+ #turn on user input (change to "True" to turn on user input tool)
28
+ user_input=False
29
+
30
+ ### UP TO HERE ##############################
31
+
32
+ # Configure OpenAI and SerpAPI client
33
+ openai.api_key = OPENAI_API_KEY
34
+ if SERPAPI_API_KEY:
35
+ serpapi_client = GoogleSearch({"api_key": SERPAPI_API_KEY})
36
+ websearch_var = "[web-search] "
37
+ else:
38
+ websearch_var = ""
39
+
40
+ if user_input == True:
41
+ user_input_var = "[user-input]"
42
+ else:
43
+ user_input_var = ""
44
+
45
+
46
+ # Initialize task list
47
+ task_list = []
48
+
49
+ # Initialize session_summary
50
+ session_summary = "OBJECTIVE: "+OBJECTIVE+"\n\n"
51
+
52
+ ### Task list functions ##############################
53
+ def get_task_by_id(task_id: int):
54
+ for task in task_list:
55
+ if task["id"] == task_id:
56
+ return task
57
+ return None
58
+
59
+ # Print task list and session summary
60
+ def print_tasklist():
61
+ p_tasklist="\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m"
62
+ for t in task_list:
63
+ dependent_task = ""
64
+ if t['dependent_task_ids']:
65
+ dependent_task = f"\033[31m<dependencies: {', '.join([f'#{dep_id}' for dep_id in t['dependent_task_ids']])}>\033[0m"
66
+ status_color = "\033[32m" if t['status'] == "complete" else "\033[31m"
67
+ p_tasklist+= f"\033[1m{t['id']}\033[0m: {t['task']} {status_color}[{t['status']}]\033[0m \033[93m[{t['tool']}] {dependent_task}\033[0m\n"
68
+ print(p_tasklist)
69
+
70
+ ### Tool functions ##############################
71
+ def text_completion_tool(prompt: str):
72
+ messages = [
73
+ {"role": "user", "content": prompt}
74
+ ]
75
+ response = openai.ChatCompletion.create(
76
+ model="gpt-3.5-turbo",
77
+ messages=messages,
78
+ temperature=0.2,
79
+ max_tokens=1500,
80
+ top_p=1,
81
+ frequency_penalty=0,
82
+ presence_penalty=0
83
+ )
84
+
85
+ return response.choices[0].message['content'].strip()
86
+
87
+
88
+ def user_input_tool(prompt: str):
89
+ val = input(f"\n{prompt}\nYour response: ")
90
+ return str(val)
91
+
92
+
93
+ def web_search_tool(query: str , dependent_tasks_output : str):
94
+
95
+ if dependent_tasks_output != "":
96
+ dependent_task = f"Use the dependent task output below as reference to help craft the correct search query for the provided task above. Dependent task output:{dependent_tasks_output}."
97
+ else:
98
+ dependent_task = "."
99
+ query = text_completion_tool("You are an AI assistant tasked with generating a Google search query based on the following task: "+query+". If the task looks like a search query, return the identical search query as your response. " + dependent_task + "\nSearch Query:")
100
+ print("\033[90m\033[3m"+"Search query: " +str(query)+"\033[0m")
101
+ search_params = {
102
+ "engine": "google",
103
+ "q": query,
104
+ "api_key": SERPAPI_API_KEY,
105
+ "num":3 #edit this up or down for more results, though higher often results in OpenAI rate limits
106
+ }
107
+ search_results = GoogleSearch(search_params)
108
+ search_results = search_results.get_dict()
109
+ try:
110
+ search_results = search_results["organic_results"]
111
+ except:
112
+ search_results = {}
113
+ search_results = simplify_search_results(search_results)
114
+ print("\033[90m\033[3m" + "Completed search. Now scraping results.\n" + "\033[0m")
115
+ results = "";
116
+ # Loop through the search results
117
+ for result in search_results:
118
+ # Extract the URL from the result
119
+ url = result.get('link')
120
+ # Call the web_scrape_tool function with the URL
121
+ print("\033[90m\033[3m" + "Scraping: "+url+"" + "...\033[0m")
122
+ content = web_scrape_tool(url, task)
123
+ print("\033[90m\033[3m" +str(content[0:100])[0:100]+"...\n" + "\033[0m")
124
+ results += str(content)+". "
125
+
126
+ results = text_completion_tool(f"You are an expert analyst. Rewrite the following information as one report without removing any facts.\n###INFORMATION:{results}.\n###REPORT:")
127
+ return results
128
+
129
+
130
+ def simplify_search_results(search_results):
131
+ simplified_results = []
132
+ for result in search_results:
133
+ simplified_result = {
134
+ "position": result.get("position"),
135
+ "title": result.get("title"),
136
+ "link": result.get("link"),
137
+ "snippet": result.get("snippet")
138
+ }
139
+ simplified_results.append(simplified_result)
140
+ return simplified_results
141
+
142
+
143
+ def web_scrape_tool(url: str, task:str):
144
+ content = fetch_url_content(url)
145
+ if content is None:
146
+ return None
147
+
148
+ text = extract_text(content)
149
+ print("\033[90m\033[3m"+"Scrape completed. Length:" +str(len(text))+".Now extracting relevant info..."+"...\033[0m")
150
+ info = extract_relevant_info(OBJECTIVE, text[0:5000], task)
151
+ links = extract_links(content)
152
+
153
+ #result = f"{info} URLs: {', '.join(links)}"
154
+ result = info
155
+
156
+ return result
157
+
158
+ headers = {
159
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36"
160
+ }
161
+
162
+ def fetch_url_content(url: str):
163
+ try:
164
+ response = requests.get(url, headers=headers, timeout=10)
165
+ response.raise_for_status()
166
+ return response.content
167
+ except requests.exceptions.RequestException as e:
168
+ print(f"Error while fetching the URL: {e}")
169
+ return ""
170
+
171
+ def extract_links(content: str):
172
+ soup = BeautifulSoup(content, "html.parser")
173
+ links = [link.get('href') for link in soup.findAll('a', attrs={'href': re.compile("^https?://")})]
174
+ return links
175
+
176
+ def extract_text(content: str):
177
+ soup = BeautifulSoup(content, "html.parser")
178
+ text = soup.get_text(strip=True)
179
+ return text
180
+
181
+
182
+
183
+ def extract_relevant_info(objective, large_string, task):
184
+ chunk_size = 3000
185
+ overlap = 500
186
+ notes = ""
187
+
188
+ for i in range(0, len(large_string), chunk_size - overlap):
189
+ chunk = large_string[i:i + chunk_size]
190
+
191
+ messages = [
192
+ {"role": "system", "content": f"You are an AI assistant."},
193
+ {"role": "user", "content": f"You are an expert AI research assistant tasked with creating or updating the current notes. If the current note is empty, start a current-notes section by exracting relevant data to the task and objective from the chunk of text to analyze. If there is a current note, add new relevant info frol the chunk of text to analyze. Make sure the new or combined notes is comprehensive and well written. Here's the current chunk of text to analyze: {chunk}. ### Here is the current task: {task}.### For context, here is the objective: {OBJECTIVE}.### Here is the data we've extraced so far that you need to update: {notes}.### new-or-updated-note:"}
194
+ ]
195
+
196
+ response = openai.ChatCompletion.create(
197
+ model="gpt-3.5-turbo",
198
+ messages=messages,
199
+ max_tokens=800,
200
+ n=1,
201
+ stop="###",
202
+ temperature=0.7,
203
+ )
204
+
205
+ notes += response.choices[0].message['content'].strip()+". ";
206
+
207
+ return notes
208
+
209
+ ### Agent functions ##############################
210
+
211
+
212
+ def execute_task(task, task_list, OBJECTIVE):
213
+
214
+ global session_summary
215
+ global task_id_counter
216
+ # Check if dependent_task_ids is not empty
217
+ if task["dependent_task_ids"]:
218
+ all_dependent_tasks_complete = True
219
+ for dep_id in task["dependent_task_ids"]:
220
+ dependent_task = get_task_by_id(dep_id)
221
+ if not dependent_task or dependent_task["status"] != "complete":
222
+ all_dependent_tasks_complete = False
223
+ break
224
+
225
+
226
+ # Execute task
227
+ p_nexttask="\033[92m\033[1m"+"\n*****NEXT TASK ID:"+str(task['id'])+"*****\n"+"\033[0m\033[0m"
228
+ p_nexttask += str(task['id'])+": "+str(task['task'])+" ["+str(task['tool']+"]")
229
+ print(p_nexttask)
230
+ task_prompt = f"Complete your assigned task based on the objective and only based on information provided in the dependent task output, if provided. \n###\nYour objective: {OBJECTIVE}. \n###\nYour task: {task['task']}"
231
+ if task["dependent_task_ids"]:
232
+ dependent_tasks_output = ""
233
+ for dep_id in task["dependent_task_ids"]:
234
+ dependent_task_output = get_task_by_id(dep_id)["output"]
235
+ dependent_task_output = dependent_task_output[0:2000]
236
+ dependent_tasks_output += f" {dependent_task_output}"
237
+ task_prompt += f" \n###\ndependent tasks output: {dependent_tasks_output} \n###\nYour task: {task['task']}\n###\nRESPONSE:"
238
+ else:
239
+ dependent_tasks_output="."
240
+
241
+ # Use tool to complete the task
242
+ if task["tool"] == "text-completion":
243
+ task_output = text_completion_tool(task_prompt)
244
+ elif task["tool"] == "web-search":
245
+ task_output = web_search_tool(str(task['task']),str(dependent_tasks_output))
246
+ elif task["tool"] == "web-scrape":
247
+ task_output = web_scrape_tool(str(task['task']))
248
+ elif task["tool"] == "user-input":
249
+ task_output = user_input_tool(str(task['task']))
250
+
251
+
252
+
253
+ # Find task index in the task_list
254
+ task_index = next((i for i, t in enumerate(task_list) if t["id"] == task["id"]), None)
255
+
256
+ # Mark task as complete and save output
257
+ task_list[task_index]["status"] = "complete"
258
+ task_list[task_index]["output"] = task_output
259
+
260
+ # Print task output
261
+ print("\033[93m\033[1m"+"\nTask Output (ID:"+str(task['id'])+"):"+"\033[0m\033[0m")
262
+ print(task_output)
263
+ # Add task output to session_summary
264
+ session_summary += f"\n\nTask {task['id']} - {task['task']}:\n{task_output}"
265
+
266
+ def task_ready_to_run(task, task_list):
267
+ return all([get_task_by_id(dep_id)["status"] == "complete" for dep_id in task["dependent_task_ids"]])
268
+
269
+
270
+ task_list = []
271
+
272
+ def task_creation_agent(objective: str) -> List[Dict]:
273
+ global task_list
274
+ minified_task_list = [{k: v for k, v in task.items() if k != "result"} for task in task_list]
275
+
276
+ prompt = (
277
+ f"You are an expert task creation AI tasked with creating a list of tasks as a JSON array, considering the ultimate objective of your team: {OBJECTIVE}. "
278
+ f"Create new tasks based on the objective. Limit tasks types to those that can be completed with the available tools listed below. Task description should be detailed."
279
+ f"Current tool options are [text-completion] {websearch_var} {user_input_var}." # web-search is added automatically if SERPAPI exists
280
+ f"For tasks using [web-search], provide the search query, and only the search query to use (eg. not 'research waterproof shoes, but 'waterproof shoes'). Result will be a summary of relevant information from the first few articles."
281
+ f"When requiring multiple searches, use the [web-search] multiple times. This tool will use the dependent task result to generate the search query if necessary."
282
+ f"Use [user-input] sparingly and only if you need to ask a question to the user who set up the objective. The task description should be the question you want to ask the user.')"
283
+ f"dependent_task_ids should always be an empty array, or an array of numbers representing the task ID it should pull results from."
284
+ f"Make sure all task IDs are in chronological order.\n"
285
+ f"EXAMPLE OBJECTIVE=Look up AI news from today (May 27, 2023) and write a poem."
286
+ "TASK LIST=[{\"id\":1,\"task\":\"AI news today\",\"tool\":\"web-search\",\"dependent_task_ids\":[],\"status\":\"incomplete\",\"result\":null,\"result_summary\":null},{\"id\":2,\"task\":\"Extract key points from AI news articles\",\"tool\":\"text-completion\",\"dependent_task_ids\":[1],\"status\":\"incomplete\",\"result\":null,\"result_summary\":null},{\"id\":3,\"task\":\"Generate a list of AI-related words and phrases\",\"tool\":\"text-completion\",\"dependent_task_ids\":[2],\"status\":\"incomplete\",\"result\":null,\"result_summary\":null},{\"id\":4,\"task\":\"Write a poem using AI-related words and phrases\",\"tool\":\"text-completion\",\"dependent_task_ids\":[3],\"status\":\"incomplete\",\"result\":null,\"result_summary\":null},{\"id\":5,\"task\":\"Final summary report\",\"tool\":\"text-completion\",\"dependent_task_ids\":[1,2,3,4],\"status\":\"incomplete\",\"result\":null,\"result_summary\":null}]"
287
+ f"OBJECTIVE={OBJECTIVE}"
288
+ f"TASK LIST="
289
+ )
290
+
291
+ print("\033[90m\033[3m" + "\nInitializing...\n" + "\033[0m")
292
+ response = openai.ChatCompletion.create(
293
+ model="gpt-3.5-turbo",
294
+ messages=[
295
+ {
296
+ "role": "system",
297
+ "content": "You are a task creation AI."
298
+ },
299
+ {
300
+ "role": "user",
301
+ "content": prompt
302
+ }
303
+ ],
304
+ temperature=0,
305
+ max_tokens=1500,
306
+ top_p=1,
307
+ frequency_penalty=0,
308
+ presence_penalty=0
309
+ )
310
+
311
+ # Extract the content of the assistant's response and parse it as JSON
312
+ result = response["choices"][0]["message"]["content"]
313
+ try:
314
+ task_list = json.loads(result)
315
+ except Exception as error:
316
+ print(error)
317
+
318
+ return task_list
319
+
320
+ ##### START MAIN LOOP########
321
+
322
+ #Print OBJECTIVE
323
+ print("\033[96m\033[1m"+"\n*****OBJECTIVE*****\n"+"\033[0m\033[0m")
324
+ print(OBJECTIVE)
325
+
326
+ # Initialize task_id_counter
327
+ task_id_counter = 1
328
+
329
+ # Run the task_creation_agent to create initial tasks
330
+ task_list = task_creation_agent(OBJECTIVE)
331
+ print_tasklist()
332
+
333
+ # Create a ThreadPoolExecutor
334
+ with ThreadPoolExecutor() as executor:
335
+ while True:
336
+ tasks_submitted = False
337
+ for task in task_list:
338
+ if task["status"] == "incomplete" and task_ready_to_run(task, task_list):
339
+ future = executor.submit(execute_task, task, task_list, OBJECTIVE)
340
+ task["status"] = "running"
341
+ tasks_submitted = True
342
+
343
+ if not tasks_submitted and all(task["status"] == "complete" for task in task_list):
344
+ break
345
+
346
+ time.sleep(5)
347
+
348
+ # Print session summary
349
+ print("\033[96m\033[1m"+"\n*****SAVING FILE...*****\n"+"\033[0m\033[0m")
350
+ file = open(f'output/output_{datetime.now().strftime("%d_%m_%Y_%H_%M_%S")}.txt', 'w')
351
+ file.write(session_summary)
352
+ file.close()
353
+ print("...file saved.")
354
+ print("END")
babyagi/classic/BabyElfAGI/main.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ import importlib.util
4
+ import json
5
+ import openai
6
+ import concurrent.futures
7
+ import time
8
+ from datetime import datetime
9
+ from skills.skill import Skill
10
+ from skills.skill_registry import SkillRegistry
11
+ from tasks.task_registry import TaskRegistry
12
+
13
+
14
+ load_dotenv() # Load environment variables from .env file
15
+
16
+ # Retrieve all API keys
17
+ api_keys = {
18
+ 'openai': os.environ['OPENAI_API_KEY'],
19
+ 'serpapi': os.environ['SERPAPI_API_KEY']
20
+ # Add more keys here as needed
21
+ }
22
+
23
+ # Set OBJECTIVE
24
+ OBJECTIVE = "Create an example objective and tasklist for 'write a poem', which only uses text_completion in the tasks. Do this by usign code_reader to read example1.json, then writing the JSON objective tasklist pair using text_completion, and saving it using objective_saver."
25
+ LOAD_SKILLS = ['text_completion','code_reader','objective_saver']
26
+ REFLECTION = False
27
+
28
+ ##### START MAIN LOOP########
29
+
30
+ # Print OBJECTIVE
31
+ print("\033[96m\033[1m"+"\n*****OBJECTIVE*****\n"+"\033[0m\033[0m")
32
+ print(OBJECTIVE)
33
+
34
+ if __name__ == "__main__":
35
+ session_summary = ""
36
+
37
+ # Initialize the SkillRegistry and TaskRegistry
38
+ skill_registry = SkillRegistry(api_keys=api_keys, skill_names=LOAD_SKILLS)
39
+ skill_descriptions = ",".join(f"[{skill.name}: {skill.description}]" for skill in skill_registry.skills.values())
40
+ task_registry = TaskRegistry()
41
+
42
+ # Create the initial task list based on an objective
43
+ task_registry.create_tasklist(OBJECTIVE, skill_descriptions)
44
+
45
+ # Initialize task outputs
46
+ task_outputs = {i: {"completed": False, "output": None} for i, _ in enumerate(task_registry.get_tasks())}
47
+
48
+ # Create a thread pool for parallel execution
49
+ with concurrent.futures.ThreadPoolExecutor() as executor:
50
+ # Loop until all tasks are completed
51
+ while not all(task["completed"] for task in task_outputs.values()):
52
+
53
+ # Get the tasks that are ready to be executed (i.e., all their dependencies have been completed)
54
+ tasks = task_registry.get_tasks()
55
+ # Print the updated task list
56
+ task_registry.print_tasklist(tasks)
57
+
58
+ # Update task_outputs to include new tasks
59
+ for task in tasks:
60
+ if task["id"] not in task_outputs:
61
+ task_outputs[task["id"]] = {"completed": False, "output": None}
62
+
63
+
64
+ ready_tasks = [(task["id"], task) for task in tasks
65
+ if all((dep in task_outputs and task_outputs[dep]["completed"])
66
+ for dep in task.get('dependent_task_ids', []))
67
+ and not task_outputs[task["id"]]["completed"]]
68
+
69
+ session_summary += str(task)+"\n"
70
+ futures = [executor.submit(task_registry.execute_task, task_id, task, skill_registry, task_outputs, OBJECTIVE)
71
+ for task_id, task in ready_tasks if not task_outputs[task_id]["completed"]]
72
+
73
+ # Wait for the tasks to complete
74
+ for future in futures:
75
+ i, output = future.result()
76
+ task_outputs[i]["output"] = output
77
+ task_outputs[i]["completed"] = True
78
+
79
+ # Update the task in the TaskRegistry
80
+ task_registry.update_tasks({"id": i, "status": "completed", "result": output})
81
+
82
+ completed_task = task_registry.get_task(i)
83
+ print(f"\033[92mTask #{i}: {completed_task.get('task')} \033[0m\033[92m[COMPLETED]\033[0m\033[92m[{completed_task.get('skill')}]\033[0m")
84
+
85
+ # Reflect on the output
86
+ if output:
87
+ session_summary += str(output)+"\n"
88
+
89
+
90
+ if REFLECTION == True:
91
+ new_tasks, insert_after_ids, tasks_to_update = task_registry.reflect_on_output(output, skill_descriptions)
92
+ # Insert new tasks
93
+ for new_task, after_id in zip(new_tasks, insert_after_ids):
94
+ task_registry.add_task(new_task, after_id)
95
+
96
+ # Update existing tasks
97
+ for task_to_update in tasks_to_update:
98
+ task_registry.update_tasks(task_to_update)
99
+
100
+
101
+
102
+ #print(task_outputs.values())
103
+ if all(task["status"] == "completed" for task in task_registry.tasks):
104
+ print("All tasks completed!")
105
+ break
106
+
107
+ # Short delay to prevent busy looping
108
+ time.sleep(0.1)
109
+
110
+
111
+ # Print session summary
112
+ print("\033[96m\033[1m"+"\n*****SAVING FILE...*****\n"+"\033[0m\033[0m")
113
+ file = open(f'output/output_{datetime.now().strftime("%d_%m_%Y_%H_%M_%S")}.txt', 'w')
114
+ file.write(session_summary)
115
+ file.close()
116
+ print("...file saved.")
117
+ print("END")
118
+ executor.shutdown()
babyagi/classic/BabyElfAGI/skills/code_reader.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from skills.skill import Skill
2
+ import openai
3
+ import os
4
+
5
+ class CodeReader(Skill):
6
+ name = 'code_reader'
7
+ description = "A skill that finds a file's location in it's own program's directory and returns its contents."
8
+ api_keys_required = ['openai']
9
+
10
+ def __init__(self, api_keys):
11
+ super().__init__(api_keys)
12
+
13
+ def execute(self, params, dependent_task_outputs, objective):
14
+ if not self.valid:
15
+ return
16
+
17
+ dir_structure = self.get_directory_structure(self.get_top_parent_path(os.path.realpath(__file__)))
18
+ print(f"Directory structure: {dir_structure}")
19
+ example_dir_structure = {'.': {'main.py': None}, 'skills': {'__init__.py': None, 'web_scrape.py': None, 'skill.py': None, 'test_skill.py': None, 'text_completion.py': None, 'web_search.py': None, 'skill_registry.py': None, 'directory_structure.py': None, 'code_reader.py': None}, 'tasks': {'task_registry.py': None}, 'output': {}}
20
+ example_params = "Analyze main.py"
21
+ example_response = "main.py"
22
+
23
+ task_prompt = f"Find a specific file in a directory and return only the file path, based on the task description below. Always return a directory.###The directory structure is as follows: \n{example_dir_structure}\nYour task: {example_params}\n###\nRESPONSE:{example_response} ###The directory structure is as follows: \n{dir_structure}\nYour task: {params}\n###\nRESPONSE:"
24
+
25
+ messages = [
26
+ {"role": "system", "content": "You are a helpful assistant."},
27
+ {"role": "user", "content": task_prompt}
28
+ ]
29
+ response = openai.ChatCompletion.create(
30
+ model="gpt-3.5-turbo",
31
+ messages=messages,
32
+ temperature=0.2,
33
+ max_tokens=1500,
34
+ top_p=1,
35
+ frequency_penalty=0,
36
+ presence_penalty=0
37
+ )
38
+ file_path = response.choices[0].message['content'].strip()
39
+ print(f"AI suggested file path: {file_path}")
40
+
41
+ try:
42
+ with open(file_path, 'r') as file:
43
+ file_content = file.read()
44
+ print(f"File content:\n{file_content}")
45
+ return file_content
46
+ except FileNotFoundError:
47
+ print("File not found. Please check the AI's suggested file path.")
48
+ return None
49
+
50
+ def get_directory_structure(self, start_path):
51
+ dir_structure = {}
52
+ ignore_dirs = ['.','__init__.py', '__pycache__', 'pydevd', 'poetry','venv'] # add any other directories to ignore here
53
+
54
+ for root, dirs, files in os.walk(start_path):
55
+ dirs[:] = [d for d in dirs if not any(d.startswith(i) for i in ignore_dirs)] # exclude specified directories
56
+ files = [f for f in files if not f[0] == '.' and f.endswith('.py')] # exclude hidden files and non-Python files
57
+
58
+ current_dict = dir_structure
59
+ path_parts = os.path.relpath(root, start_path).split(os.sep)
60
+ for part in path_parts:
61
+ if part: # skip empty parts
62
+ if part not in current_dict:
63
+ current_dict[part] = {}
64
+ current_dict = current_dict[part]
65
+ for f in files:
66
+ current_dict[f] = None
67
+
68
+ return dir_structure
69
+
70
+ def get_top_parent_path(self, current_path):
71
+ relative_path = ""
72
+ while True:
73
+ new_path = os.path.dirname(current_path)
74
+ if new_path == '/home/runner/BabyElfAGI/skills': # reached the top
75
+ return '/home/runner/BabyElfAGI'
76
+ current_path = new_path
77
+ relative_path = os.path.join("..", relative_path)
78
+
79
+ return relative_path
babyagi/classic/BabyElfAGI/skills/directory_structure.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from skills.skill import Skill
2
+ import os
3
+
4
+ class DirectoryStructure(Skill):
5
+ name = 'directory_structure'
6
+ description = "A tool that outputs the file and folder structure of its top parent folder."
7
+
8
+ def __init__(self, api_keys=None):
9
+ super().__init__(api_keys)
10
+
11
+ def execute(self, params, dependent_task_outputs, objective):
12
+ # Get the current script path
13
+ current_script_path = os.path.realpath(__file__)
14
+
15
+ # Get the top parent directory of current script
16
+ top_parent_path = self.get_top_parent_path(current_script_path)
17
+ # Get the directory structure from the top parent directory
18
+ dir_structure = self.get_directory_structure(top_parent_path)
19
+
20
+ return dir_structure
21
+
22
+ def get_directory_structure(self, start_path):
23
+ dir_structure = {}
24
+ ignore_dirs = ['.','__init__.py', '__pycache__', 'pydevd', 'poetry','venv'] # add any other directories to ignore here
25
+
26
+ for root, dirs, files in os.walk(start_path):
27
+ dirs[:] = [d for d in dirs if not any(d.startswith(i) for i in ignore_dirs)] # exclude specified directories
28
+ files = [f for f in files if not f[0] == '.' and f.endswith('.py')] # exclude hidden files and non-Python files
29
+
30
+ current_dict = dir_structure
31
+ path_parts = os.path.relpath(root, start_path).split(os.sep)
32
+ for part in path_parts:
33
+ if part: # skip empty parts
34
+ if part not in current_dict:
35
+ current_dict[part] = {}
36
+ current_dict = current_dict[part]
37
+ for f in files:
38
+ current_dict[f] = None
39
+ #print("#############################")
40
+ #print(str(current_dict)[0:100])
41
+ return dir_structure
42
+
43
+
44
+
45
+ def get_top_parent_path(self, current_path):
46
+ relative_path = ""
47
+ while True:
48
+ new_path = os.path.dirname(current_path)
49
+ print(new_path)
50
+ if new_path == '/home/runner/BabyElfAGI/skills': # reached the top
51
+ #if new_path == current_path: # reached the top
52
+ #return relative_path
53
+ return '/home/runner/BabyElfAGI'
54
+ current_path = new_path
55
+ relative_path = os.path.join("..", relative_path)
56
+ print(relative_path)
babyagi/classic/BabyElfAGI/skills/objective_saver.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from skills.skill import Skill
2
+ import os
3
+ import openai
4
+
5
+ class ObjectiveSaver(Skill):
6
+ name = 'objective_saver'
7
+ description = "A skill that saves a new example_objective based on the concepts from skill_saver.py"
8
+ api_keys_required = []
9
+
10
+ def __init__(self, api_keys):
11
+ super().__init__(api_keys)
12
+
13
+ def execute(self, params, dependent_task_outputs, objective):
14
+ if not self.valid:
15
+ return
16
+ #print(dependent_task_outputs[2])
17
+ code = dependent_task_outputs[2]
18
+ task_prompt = f"Come up with a file name (eg. 'research_shoes.json') for the following objective:{code}\n###\nFILE_NAME:"
19
+
20
+ messages = [
21
+ {"role": "user", "content": task_prompt}
22
+ ]
23
+ response = openai.ChatCompletion.create(
24
+ model="gpt-3.5-turbo",
25
+ messages=messages,
26
+ temperature=0.4,
27
+ max_tokens=3000,
28
+ top_p=1,
29
+ frequency_penalty=0,
30
+ presence_penalty=0
31
+ )
32
+
33
+ file_name = response.choices[0].message['content'].strip()
34
+ file_path = os.path.join('tasks/example_objectives',file_name)
35
+
36
+ try:
37
+ with open(file_path, 'w') as file:
38
+ file.write("["+code+"]")
39
+ print(f"Code saved successfully: {file_name}")
40
+ except:
41
+ print("Error saving code.")
42
+
43
+ return None
babyagi/classic/BabyElfAGI/skills/skill.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class Skill:
2
+ name = 'base skill'
3
+ description = 'This is the base skill.'
4
+ api_keys_required = []
5
+
6
+ def __init__(self, api_keys):
7
+ self.api_keys = api_keys
8
+ missing_keys = self.check_required_keys(api_keys)
9
+ if missing_keys:
10
+ print(f"Missing API keys for {self.name}: {missing_keys}")
11
+ self.valid = False
12
+ else:
13
+ self.valid = True
14
+ for key in self.api_keys_required:
15
+ if isinstance(key, list):
16
+ for subkey in key:
17
+ if subkey in api_keys:
18
+ setattr(self, f"{subkey}_api_key", api_keys.get(subkey))
19
+ elif key in api_keys:
20
+ setattr(self, f"{key}_api_key", api_keys.get(key))
21
+
22
+ def check_required_keys(self, api_keys):
23
+ missing_keys = []
24
+ for key in self.api_keys_required:
25
+ if isinstance(key, list): # If the key is actually a list of alternatives
26
+ if not any(k in api_keys for k in key): # If none of the alternatives are present
27
+ missing_keys.append(key) # Add the list of alternatives to the missing keys
28
+ elif key not in api_keys: # If the key is a single key and it's not present
29
+ missing_keys.append(key) # Add the key to the missing keys
30
+ return missing_keys
31
+
32
+ def execute(self, params, dependent_task_outputs, objective):
33
+ raise NotImplementedError('Execute method must be implemented in subclass.')
babyagi/classic/BabyElfAGI/skills/skill_registry.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import importlib.util
3
+ import inspect
4
+ from .skill import Skill
5
+
6
+ class SkillRegistry:
7
+ def __init__(self, api_keys, skill_names=None):
8
+ self.skills = {}
9
+ skill_files = [f for f in os.listdir('skills') if f.endswith('.py') and f != 'skill.py']
10
+ for skill_file in skill_files:
11
+ module_name = skill_file[:-3]
12
+ if skill_names and module_name not in skill_names:
13
+ continue
14
+ module = importlib.import_module(f'skills.{module_name}')
15
+ for attr_name in dir(module):
16
+ attr_value = getattr(module, attr_name)
17
+ if inspect.isclass(attr_value) and issubclass(attr_value, Skill) and attr_value is not Skill:
18
+ skill = attr_value(api_keys)
19
+ if skill.valid:
20
+ self.skills[skill.name] = skill
21
+ # Print the names and descriptions of all loaded skills
22
+ skill_info = "\n".join([f"{skill_name}: {skill.description}" for skill_name, skill in self.skills.items()])
23
+ # print(skill_info)
24
+
25
+ def load_all_skills(self):
26
+ skills_dir = os.path.dirname(__file__)
27
+ for filename in os.listdir(skills_dir):
28
+ if filename.endswith(".py") and filename not in ["__init__.py", "skill.py", "skill_registry.py"]:
29
+ skill_name = filename[:-3] # Remove .py extension
30
+ self.load_skill(skill_name)
31
+
32
+ def load_specific_skills(self, skill_names):
33
+ for skill_name in skill_names:
34
+ self.load_skill(skill_name)
35
+
36
+ def load_skill(self, skill_name):
37
+ skills_dir = os.path.dirname(__file__)
38
+ filename = f"{skill_name}.py"
39
+ if os.path.isfile(os.path.join(skills_dir, filename)):
40
+ spec = importlib.util.spec_from_file_location(skill_name, os.path.join(skills_dir, filename))
41
+ module = importlib.util.module_from_spec(spec)
42
+ spec.loader.exec_module(module)
43
+ for item_name in dir(module):
44
+ item = getattr(module, item_name)
45
+ if isinstance(item, type) and issubclass(item, Skill) and item is not Skill:
46
+ skill_instance = item(self.api_keys)
47
+ self.skills[skill_instance.name] = skill_instance
48
+
49
+ def get_skill(self, skill_name):
50
+ skill = self.skills.get(skill_name)
51
+ if skill is None:
52
+ raise Exception(
53
+ f"Skill '{skill_name}' not found. Please make sure the skill is loaded and all required API keys are set.")
54
+ return skill
55
+
56
+ def get_all_skills(self):
57
+ return self.skills
babyagi/classic/BabyElfAGI/skills/skill_saver.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from skills.skill import Skill
2
+ import os
3
+ import openai
4
+
5
+ class SkillSaver(Skill):
6
+ name = 'skill_saver'
7
+ description = "A skill that saves code written in a previous step into a file within the skills folder. Not for writing code."
8
+ api_keys_required = []
9
+
10
+ def __init__(self, api_keys):
11
+ super().__init__(api_keys)
12
+
13
+ def execute(self, params, dependent_task_outputs, objective):
14
+ if not self.valid:
15
+ return
16
+
17
+ task_prompt = f"Extract the code and only the code from the dependent task output here: {dependent_task_outputs} \n###\nCODE:"
18
+
19
+ messages = [
20
+ {"role": "user", "content": task_prompt}
21
+ ]
22
+ response = openai.ChatCompletion.create(
23
+ model="gpt-3.5-turbo",
24
+ messages=messages,
25
+ temperature=0.4,
26
+ max_tokens=3000,
27
+ top_p=1,
28
+ frequency_penalty=0,
29
+ presence_penalty=0
30
+ )
31
+
32
+ code = response.choices[0].message['content'].strip()
33
+ task_prompt = f"Come up with a file name (eg. 'get_weather.py') for the following skill:{code}\n###\nFILE_NAME:"
34
+
35
+ messages = [
36
+ {"role": "user", "content": task_prompt}
37
+ ]
38
+ response = openai.ChatCompletion.create(
39
+ model="gpt-3.5-turbo",
40
+ messages=messages,
41
+ temperature=0.4,
42
+ max_tokens=3000,
43
+ top_p=1,
44
+ frequency_penalty=0,
45
+ presence_penalty=0
46
+ )
47
+
48
+ file_name = response.choices[0].message['content'].strip()
49
+ file_path = os.path.join('skills',file_name)
50
+
51
+ try:
52
+ with open(file_path, 'w') as file:
53
+ file.write(code)
54
+ print(f"Code saved successfully: {file_name}")
55
+ except:
56
+ print("Error saving code.")
57
+
58
+ return None
babyagi/classic/BabyElfAGI/skills/text_completion.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from skills.skill import Skill
2
+ import openai
3
+
4
+ class TextCompletion(Skill):
5
+ name = 'text_completion'
6
+ description = "A tool that uses OpenAI's text completion API to generate, summarize, and/or analyze text and code."
7
+ api_keys_required = ['openai']
8
+
9
+ def __init__(self, api_keys):
10
+ super().__init__(api_keys)
11
+
12
+ def execute(self, params, dependent_task_outputs, objective):
13
+ if not self.valid:
14
+ return
15
+
16
+ task_prompt = f"Complete your assigned task based on the objective and only based on information provided in the dependent task output, if provided. \n###\nYour objective: {objective}. \n###\nYour task: {params} \n###\nDependent tasks output: {dependent_task_outputs} \n###\nYour task: {params}\n###\nRESPONSE:"
17
+
18
+ messages = [
19
+ {"role": "user", "content": task_prompt}
20
+ ]
21
+ response = openai.ChatCompletion.create(
22
+ model="gpt-3.5-turbo",
23
+ messages=messages,
24
+ temperature=0.4,
25
+ max_tokens=2000,
26
+ top_p=1,
27
+ frequency_penalty=0,
28
+ presence_penalty=0
29
+ )
30
+
31
+ return "\n\n"+response.choices[0].message['content'].strip()
babyagi/classic/BabyElfAGI/skills/web_search.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from skills.skill import Skill
3
+ from serpapi import GoogleSearch
4
+ import openai
5
+ from bs4 import BeautifulSoup
6
+ import requests
7
+ import re
8
+
9
+ headers = {
10
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36"
11
+ }
12
+
13
+ class WebSearch(Skill):
14
+ name = 'web_search'
15
+ description = 'A tool that performs web searches.'
16
+ api_keys_required = [['openai'],['serpapi']]
17
+
18
+ def __init__(self, api_keys):
19
+ super().__init__(api_keys)
20
+
21
+ def execute(self, params, dependent_task_outputs, objective):
22
+ # Your function goes here
23
+
24
+
25
+ # Modify the query based on the dependent task output
26
+ if dependent_task_outputs != "":
27
+ dependent_task = f"Use the dependent task output below as reference to help craft the correct search query for the provided task above. Dependent task output:{dependent_task_outputs}."
28
+ else:
29
+ dependent_task = "."
30
+ query = self.text_completion_tool("You are an AI assistant tasked with generating a Google search query based on the following task: "+params+". If the task looks like a search query, return the identical search query as your response. " + dependent_task + "\nSearch Query:")
31
+ print("\033[90m\033[3m"+"Search query: " +str(query)+"\033[0m")
32
+ # Set the search parameters
33
+ search_params = {
34
+ "engine": "google",
35
+ "q": query,
36
+ "api_key": self.serpapi_api_key,
37
+ "num": 3
38
+ }
39
+ # Perform the web search
40
+ search_results = GoogleSearch(search_params).get_dict()
41
+
42
+ # Simplify the search results
43
+ search_results = self.simplify_search_results(search_results.get('organic_results', []))
44
+ print("\033[90m\033[3mCompleted search. Now scraping results.\n\033[0m")
45
+
46
+ # Store the results from web scraping
47
+ results = ""
48
+ for result in search_results:
49
+ url = result.get('link')
50
+ print("\033[90m\033[3m" + "Scraping: "+url+"" + "...\033[0m")
51
+ content = self.web_scrape_tool({"url": url, "task": params,"objective":objective})
52
+ results += str(content) + ". "
53
+ print("\033[90m\033[3m"+str(results[0:100])[0:100]+"...\033[0m")
54
+ # Process the results and generate a report
55
+ results = self.text_completion_tool(f"You are an expert analyst combining the results of multiple web scrapes. Rewrite the following information as one cohesive report without removing any facts. Ignore any reports of not having info, unless all reports say so - in which case explain that the search did not work and suggest other web search queries to try. \n###INFORMATION:{results}.\n###REPORT:")
56
+ return results
57
+
58
+ def simplify_search_results(self, search_results):
59
+ simplified_results = []
60
+ for result in search_results:
61
+ simplified_result = {
62
+ "position": result.get("position"),
63
+ "title": result.get("title"),
64
+ "link": result.get("link"),
65
+ "snippet": result.get("snippet")
66
+ }
67
+ simplified_results.append(simplified_result)
68
+ return simplified_results
69
+
70
+
71
+ def text_completion_tool(self, prompt: str):
72
+ messages = [
73
+ {"role": "user", "content": prompt}
74
+ ]
75
+ response = openai.ChatCompletion.create(
76
+ model="gpt-3.5-turbo-16k-0613",
77
+ messages=messages,
78
+ temperature=0.2,
79
+ max_tokens=1500,
80
+ top_p=1,
81
+ frequency_penalty=0,
82
+ presence_penalty=0
83
+ )
84
+
85
+ return response.choices[0].message['content'].strip()
86
+
87
+
88
+ def web_scrape_tool(self, params):
89
+ content = self.fetch_url_content(params['url'])
90
+ if content is None:
91
+ return None
92
+
93
+ text = self.extract_text(content)
94
+ print("\033[90m\033[3m"+"Scrape completed. Length:" +str(len(text))+".Now extracting relevant info..."+"...\033[0m")
95
+ info = self.extract_relevant_info(params['objective'], text[0:11000], params['task'])
96
+ links = self.extract_links(content)
97
+ #result = f"{info} URLs: {', '.join(links)}"
98
+ result = info
99
+
100
+ return result
101
+
102
+ def fetch_url_content(self,url: str):
103
+ try:
104
+ response = requests.get(url, headers=headers, timeout=10)
105
+ response.raise_for_status()
106
+ return response.content
107
+ except requests.exceptions.RequestException as e:
108
+ print(f"Error while fetching the URL: {e}")
109
+ return ""
110
+
111
+ def extract_links(self,content: str):
112
+ soup = BeautifulSoup(content, "html.parser")
113
+ links = [link.get('href') for link in soup.findAll('a', attrs={'href': re.compile("^https?://")})]
114
+ return links
115
+
116
+ def extract_text(self,content: str):
117
+ soup = BeautifulSoup(content, "html.parser")
118
+ text = soup.get_text(strip=True)
119
+ return text
120
+
121
+ def extract_relevant_info(self, objective, large_string, task):
122
+ chunk_size = 12000
123
+ overlap = 500
124
+ notes = ""
125
+
126
+ if len(large_string) == 0:
127
+ print("error scraping")
128
+ return "Error scraping."
129
+
130
+ for i in range(0, len(large_string), chunk_size - overlap):
131
+
132
+ print("\033[90m\033[3m"+"Reading chunk..."+"\033[0m")
133
+ chunk = large_string[i:i + chunk_size]
134
+
135
+ messages = [
136
+ {"role": "system", "content": f"You are an AI assistant."},
137
+ {"role": "user", "content": f"You are an expert AI research assistant tasked with creating or updating the current notes. If the current note is empty, start a current-notes section by exracting relevant data to the task and objective from the chunk of text to analyze. If there is a current note, add new relevant info frol the chunk of text to analyze. Make sure the new or combined notes is comprehensive and well written. Here's the current chunk of text to analyze: {chunk}. ### Here is the current task: {task}.### For context, here is the objective: {objective}.### Here is the data we've extraced so far that you need to update: {notes}.### new-or-updated-note:"}
138
+ ]
139
+
140
+ response = openai.ChatCompletion.create(
141
+ model="gpt-3.5-turbo-16k-0613",
142
+ messages=messages,
143
+ max_tokens=800,
144
+ n=1,
145
+ stop="###",
146
+ temperature=0.7,
147
+ )
148
+
149
+ notes += response.choices[0].message['content'].strip()+". ";
150
+
151
+ return notes
babyagi/classic/BabyElfAGI/tasks/example_objectives/example1.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [{
2
+ "objective": "Create a new skill that writes a poem based on an input.",
3
+ "examples": [
4
+ {
5
+ "id": 1,
6
+ "task": "Read the code in text_completion.py using the code_reader skill to understand its structure and concepts.",
7
+ "skill": "code_reader",
8
+ "dependent_task_ids": [],
9
+ "status": "incomplete"
10
+ },
11
+ {
12
+ "id": 2,
13
+ "task": "Write a new skill that uses the concepts from text_completion.py to generate a poem based on user input.",
14
+ "skill": "text_completion",
15
+ "dependent_task_ids": [1],
16
+ "status": "incomplete"
17
+ },
18
+ {
19
+ "id": 3,
20
+ "task": "Save the newly created skill using the skill_saver skill for future use.",
21
+ "skill": "skill_saver",
22
+ "dependent_task_ids": [2],
23
+ "status": "incomplete"
24
+ }
25
+ ]
26
+ }]
babyagi/classic/BabyElfAGI/tasks/example_objectives/example2.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [{
2
+ "objective": "Create a new skill that looks up the weather based on a location input.",
3
+ "examples": [
4
+ {
5
+ "id": 1,
6
+ "task": "Search code examples for free weather APIs to gather information on how to retrieve weather data based on a location input.",
7
+ "skill": "web_search",
8
+ "dependent_task_ids": [],
9
+ "status": "incomplete"
10
+ },
11
+ {
12
+ "id": 2,
13
+ "task": "Read the code in text_completion.py using the code_reader skill to understand its structure and concepts.",
14
+ "skill": "code_reader",
15
+ "dependent_task_ids": [],
16
+ "status": "incomplete"
17
+ },
18
+ {
19
+ "id": 3,
20
+ "task": "Write a new skill that combines the concepts from text_completion.py and the code examples for free weather APIs to implement weather lookup based on a location input.",
21
+ "skill": "text_completion",
22
+ "dependent_task_ids": [2, 1],
23
+ "status": "incomplete"
24
+ },
25
+ {
26
+ "id": 4,
27
+ "task": "Save the newly created skill using the skill_saver skill for future use.",
28
+ "skill": "skill_saver",
29
+ "dependent_task_ids": [3],
30
+ "status": "incomplete"
31
+ }
32
+ ]
33
+ }]
babyagi/classic/BabyElfAGI/tasks/example_objectives/example3.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [{
2
+ "objective": "Research untapped.vc",
3
+ "examples": [
4
+ {
5
+ "id": 1,
6
+ "task": "Conduct a web search on 'untapped.vc' to gather information about the company, its investments, and its impact in the startup ecosystem.",
7
+ "skill": "web_search",
8
+ "dependent_task_ids": [],
9
+ "status": "incomplete"
10
+ },
11
+ {
12
+ "id": 2,
13
+ "task": "Based on the results from the first web search, perform a follow-up web search to explore specific areas of interest or investment strategies of 'untapped.vc'.",
14
+ "skill": "web_search",
15
+ "dependent_task_ids": [1],
16
+ "status": "incomplete"
17
+ },
18
+ {
19
+ "id": 3,
20
+ "task": "Use text_completion to summarize the findings from the initial web search on 'untapped.vc' and provide key insights.",
21
+ "skill": "text_completion",
22
+ "dependent_task_ids": [1],
23
+ "status": "incomplete"
24
+ },
25
+ {
26
+ "id": 4,
27
+ "task": "Use text_completion to summarize the findings from the follow-up web search and highlight any additional information or insights.",
28
+ "skill": "text_completion",
29
+ "dependent_task_ids": [2],
30
+ "status": "incomplete"
31
+ },
32
+ {
33
+ "id": 5,
34
+ "task": "Combine the summaries from the initial and follow-up web searches to provide a comprehensive overview of 'untapped.vc' and its activities.",
35
+ "skill": "text_completion",
36
+ "dependent_task_ids": [3, 4],
37
+ "status": "incomplete"
38
+ }
39
+ ]
40
+ }]
babyagi/classic/BabyElfAGI/tasks/example_objectives/example4.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [{
2
+ "objective": "Research Yohei Nakajima and Untapped Capital.",
3
+ "examples": [
4
+ {
5
+ "id": 1,
6
+ "task": "Conduct a web search on Yohei Nakajima.",
7
+ "skill": "web_search",
8
+ "dependent_task_ids": [],
9
+ "status": "incomplete"
10
+ },
11
+ {
12
+ "id": 2,
13
+ "task": "Conduct a follow-up web search on Yohei Nakajima.",
14
+ "skill": "web_search",
15
+ "dependent_task_ids": [1],
16
+ "status": "incomplete"
17
+ },
18
+ {
19
+ "id": 3,
20
+ "task": "Conduct a web search on Untapped Capital",
21
+ "skill": "web_search",
22
+ "dependent_task_ids": [],
23
+ "status": "incomplete"
24
+ },
25
+ {
26
+ "id": 4,
27
+ "task": "Conduct a follow-up web search on Untapped Capital",
28
+ "skill": "web_search",
29
+ "dependent_task_ids": [3],
30
+ "status": "incomplete"
31
+ },
32
+ {
33
+ "id": 5,
34
+ "task": "Analyze the findings from the web search on Yohei Nakajima and summarize his key contributions and areas of expertise.",
35
+ "skill": "text_completion",
36
+ "dependent_task_ids": [1,2],
37
+ "status": "incomplete"
38
+ },
39
+ {
40
+ "id": 6,
41
+ "task": "Analyze the findings from the web search on Untapped Capital and summarize their investment strategies and notable portfolio companies.",
42
+ "skill": "text_completion",
43
+ "dependent_task_ids": [3,4],
44
+ "status": "incomplete"
45
+ },
46
+ {
47
+ "id": 7,
48
+ "task": "Combine the analyses of Yohei Nakajima and Untapped Capital to provide an overview of their collaboration or mutual interests.",
49
+ "skill": "text_completion",
50
+ "dependent_task_ids": [5, 6],
51
+ "status": "incomplete"
52
+ }
53
+ ]
54
+ }]
babyagi/classic/BabyElfAGI/tasks/example_objectives/example5.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [{
2
+ "objective": "Based on skill_saver.py, write a new skill called objective_saver.py which saves a new example_objective.",
3
+ "examples": [
4
+ {
5
+ "id": 1,
6
+ "task": "Look up the code in skill_saver.py using the code_reader skill to understand its structure and concepts.",
7
+ "skill": "code_reader",
8
+ "dependent_task_ids": [],
9
+ "status": "incomplete"
10
+ },
11
+ {
12
+ "id": 2,
13
+ "task": "Write a new skill called objective_saver.py that saves a new example_objective based on the concepts from skill_saver.py (use text_completion).",
14
+ "skill": "text_completion",
15
+ "dependent_task_ids": [1],
16
+ "status": "incomplete"
17
+ },
18
+ {
19
+ "id": 3,
20
+ "task": "Save the newly created example_objective using the skill_saver.py skill for future use.",
21
+ "skill": "skill_saver",
22
+ "dependent_task_ids": [2],
23
+ "status": "incomplete"
24
+ }
25
+ ]
26
+ }]
babyagi/classic/BabyElfAGI/tasks/example_objectives/example6.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [{
2
+ "objective": "Create new example objective for researching waterproof kid shoes.",
3
+ "examples": [
4
+ {
5
+ "id": 1,
6
+ "task": "Read the contents of example1.json in the example_objectives folder using the code_reader skill.",
7
+ "skill": "code_reader",
8
+ "dependent_task_ids": [],
9
+ "status": "incomplete"
10
+ },
11
+ {
12
+ "id": 2,
13
+ "task": "Use text_completion to generate a new example objective and task list as JSON based on the extracted information from example1.json.",
14
+ "skill": "text_completion",
15
+ "dependent_task_ids": [1],
16
+ "status": "incomplete"
17
+ },
18
+ {
19
+ "id": 3,
20
+ "task": "Save the newly created example objective and task list as JSON using the objective_saver skill.",
21
+ "skill": "objective_saver",
22
+ "dependent_task_ids": [2],
23
+ "status": "incomplete"
24
+ }
25
+ ]
26
+ }]
babyagi/classic/BabyElfAGI/tasks/task_registry.py ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai
2
+ import json
3
+ import threading
4
+ import os
5
+ import numpy as np
6
+
7
+ class TaskRegistry:
8
+ def __init__(self):
9
+ self.tasks = []
10
+ # Initialize the lock
11
+ self.lock = threading.Lock()
12
+ objectives_file_path = "tasks/example_objectives"
13
+ self.example_loader = ExampleObjectivesLoader(objectives_file_path)
14
+
15
+ def load_example_objectives(self, user_objective):
16
+ return self.example_loader.load_example_objectives(user_objective)
17
+
18
+
19
+ def create_tasklist(self, objective, skill_descriptions):
20
+ #load most relevant object and tasklist from objectives_examples.json
21
+ example_objective, example_tasklist = self.load_example_objectives(objective)
22
+
23
+ prompt = (
24
+ f"You are an expert task list creation AI tasked with creating a list of tasks as a JSON array, considering the ultimate objective of your team: {objective}. "
25
+ f"Create a very short task list based on the objective, the final output of the last task will be provided back to the user. Limit tasks types to those that can be completed with the available skills listed below. Task description should be detailed.###"
26
+ f"AVAILABLE SKILLS: {skill_descriptions}.###"
27
+ f"RULES:"
28
+ f"Do not use skills that are not listed."
29
+ f"Always include one skill."
30
+ f"dependent_task_ids should always be an empty array, or an array of numbers representing the task ID it should pull results from."
31
+ f"Make sure all task IDs are in chronological order.###\n"
32
+ f"EXAMPLE OBJECTIVE={json.dumps(example_objective)}"
33
+ f"TASK LIST={json.dumps(example_tasklist)}"
34
+ f"OBJECTIVE={objective}"
35
+ f"TASK LIST="
36
+ )
37
+
38
+ print("\033[90m\033[3m" + "\nInitializing...\n" + "\033[0m")
39
+ response = openai.ChatCompletion.create(
40
+ model="gpt-3.5-turbo-0613",
41
+ messages=[
42
+ {
43
+ "role": "system",
44
+ "content": "You are a task creation AI."
45
+ },
46
+ {
47
+ "role": "user",
48
+ "content": prompt
49
+ }
50
+ ],
51
+ temperature=0,
52
+ max_tokens=1500,
53
+ top_p=1,
54
+ frequency_penalty=0,
55
+ presence_penalty=0
56
+ )
57
+
58
+ # Extract the content of the assistant's response and parse it as JSON
59
+ result = response["choices"][0]["message"]["content"]
60
+ try:
61
+ task_list = json.loads(result)
62
+ self.tasks = task_list
63
+ except Exception as error:
64
+ print(error)
65
+
66
+
67
+ def execute_task(self, i, task, skill_registry, task_outputs, objective):
68
+ p_nexttask="\033[92m\033[1m"+"\n*****NEXT TASK ID:"+str(task['id'])+"*****\n"+"\033[0m\033[0m"
69
+ p_nexttask += f"\033[ EExecuting task {task.get('id')}: {task.get('task')}) [{task.get('skill')}]\033[)"
70
+ print(p_nexttask)
71
+ # Retrieve the skill from the registry
72
+ skill = skill_registry.get_skill(task['skill'])
73
+ # Get the outputs of the dependent tasks
74
+ dependent_task_outputs = {dep: task_outputs[dep]["output"] for dep in task['dependent_task_ids']} if 'dependent_task_ids' in task else {}
75
+ # Execute the skill
76
+ # print("execute:"+str([task['task'], dependent_task_outputs, objective]))
77
+ task_output = skill.execute(task['task'], dependent_task_outputs, objective)
78
+ print("\033[93m\033[1m"+"\nTask Output (ID:"+str(task['id'])+"):"+"\033[0m\033[0m")
79
+ print("TASK: "+str(task["task"]))
80
+ print("OUTPUT: "+str(task_output))
81
+ return i, task_output
82
+
83
+
84
+ def reorder_tasks(self):
85
+ self.tasks = sorted(self.tasks, key=lambda task: task['id'])
86
+
87
+
88
+ def add_task(self, task, after_task_id):
89
+ # Get the task ids
90
+ task_ids = [t["id"] for t in self.tasks]
91
+
92
+ # Get the index of the task id to add the new task after
93
+ insert_index = task_ids.index(after_task_id) + 1 if after_task_id in task_ids else len(task_ids)
94
+
95
+ # Insert the new task
96
+ self.tasks.insert(insert_index, task)
97
+ self.reorder_tasks()
98
+
99
+
100
+ def update_tasks(self, task_update):
101
+ for task in self.tasks:
102
+ if task['id'] == task_update['id']:
103
+ # This merges the original task dictionary with the update, overwriting only the fields present in the update.
104
+ task.update(task_update)
105
+ self.reorder_tasks()
106
+
107
+ def reflect_on_output(self, task_output, skill_descriptions):
108
+ with self.lock:
109
+ example = [
110
+ [
111
+ {"id": 3, "task": "New task 1 description", "skill": "text_completion_skill",
112
+ "dependent_task_ids": [], "status": "complete"},
113
+ {"id": 4, "task": "New task 2 description", "skill": "text_completion_skill",
114
+ "dependent_task_ids": [], "status": "incomplete"}
115
+ ],
116
+ [2, 3],
117
+ {"id": 5, "task": "Complete the objective and provide a final report",
118
+ "skill": "text_completion_skill", "dependent_task_ids": [1, 2, 3, 4], "status": "incomplete"}
119
+ ]
120
+
121
+ prompt = (
122
+ f"You are an expert task manager, review the task output to decide at least one new task to add."
123
+ f"As you add a new task, see if there are any tasks that need to be updated (such as updating dependencies)."
124
+ f"Use the current task list as reference."
125
+ f"Do not add duplicate tasks to those in the current task list."
126
+ f"Only provide JSON as your response without further comments."
127
+ f"Every new and updated task must include all variables, even they are empty array."
128
+ f"Dependent IDs must be smaller than the ID of the task."
129
+ f"New tasks IDs should be no larger than the last task ID."
130
+ f"Always select at least one skill."
131
+ f"Task IDs should be unique and in chronological order." f"Do not change the status of complete tasks."
132
+ f"Only add skills from the AVAILABLE SKILLS, using the exact same spelling."
133
+ f"Provide your array as a JSON array with double quotes. The first object is new tasks to add as a JSON array, the second array lists the ID numbers where the new tasks should be added after (number of ID numbers matches array), and the third object provides the tasks that need to be updated."
134
+ f"Make sure to keep dependent_task_ids key, even if an empty array."
135
+ f"AVAILABLE SKILLS: {skill_descriptions}.###"
136
+ f"\n###Here is the last task output: {task_output}"
137
+ f"\n###Here is the current task list: {self.tasks}"
138
+ f"\n###EXAMPLE OUTPUT FORMAT = {json.dumps(example)}"
139
+ f"\n###OUTPUT = "
140
+ )
141
+ print("\033[90m\033[3m" + "\nReflecting on task output to generate new tasks if necessary...\n" + "\033[0m")
142
+ response = openai.ChatCompletion.create(
143
+ model="gpt-3.5-turbo-16k-0613",
144
+ messages=[
145
+ {
146
+ "role": "system",
147
+ "content": "You are a task creation AI."
148
+ },
149
+ {
150
+ "role": "user",
151
+ "content": prompt
152
+ }
153
+ ],
154
+ temperature=0.7,
155
+ max_tokens=1500,
156
+ top_p=1,
157
+ frequency_penalty=0,
158
+ presence_penalty=0
159
+ )
160
+
161
+ # Extract the content of the assistant's response and parse it as JSON
162
+ result = response["choices"][0]["message"]["content"]
163
+ print("\n#" + str(result))
164
+
165
+ # Check if the returned result has the expected structure
166
+ if isinstance(result, str):
167
+ try:
168
+ task_list = json.loads(result)
169
+ # print("RESULT:")
170
+
171
+ print(task_list)
172
+ # return [],[],[]
173
+ return task_list[0], task_list[1], task_list[2]
174
+ except Exception as error:
175
+ print(error)
176
+
177
+ else:
178
+ raise ValueError("Invalid task list structure in the output")
179
+
180
+ def get_tasks(self):
181
+ """
182
+ Returns the current list of tasks.
183
+
184
+ Returns:
185
+ list: the list of tasks.
186
+ """
187
+ return self.tasks
188
+
189
+ def get_task(self, task_id):
190
+ """
191
+ Returns a task given its task_id.
192
+
193
+ Parameters:
194
+ task_id : int
195
+ The unique ID of the task.
196
+
197
+ Returns:
198
+ dict
199
+ The task that matches the task_id.
200
+ """
201
+ matching_tasks = [task for task in self.tasks if task["id"] == task_id]
202
+
203
+ if matching_tasks:
204
+ return matching_tasks[0]
205
+ else:
206
+ print(f"No task found with id {task_id}")
207
+ return None
208
+
209
+ def print_tasklist(self, task_list):
210
+ p_tasklist="\033[95m\033[1m" + "\n*****TASK LIST*****\n" + "\033[0m"
211
+ for t in task_list:
212
+ dependent_task_ids = t.get('dependent_task_ids', [])
213
+ dependent_task = ""
214
+ if dependent_task_ids:
215
+ dependent_task = f"\033[31m<dependencies: {', '.join([f'#{dep_id}' for dep_id in dependent_task_ids])}>\033[0m"
216
+ status_color = "\033[32m" if t.get('status') == "completed" else "\033[31m"
217
+ p_tasklist+= f"\033[1m{t.get('id')}\033[0m: {t.get('task')} {status_color}[{t.get('status')}]\033[0m \033[93m[{t.get('skill')}] {dependent_task}\033[0m\n"
218
+ print(p_tasklist)
219
+
220
+
221
+
222
+ class ExampleObjectivesLoader:
223
+ def __init__(self, objectives_folder_path):
224
+ self.objectives_folder_path = objectives_folder_path
225
+ self.objectives_examples = [] # Initialize as an empty list
226
+
227
+ def load_objectives_examples(self):
228
+ self.objectives_examples = []
229
+ for filename in os.listdir(self.objectives_folder_path):
230
+ file_path = os.path.join(self.objectives_folder_path, filename)
231
+ with open(file_path, 'r') as file:
232
+ objectives = json.load(file)
233
+ self.objectives_examples.extend(objectives)
234
+
235
+
236
+ def find_most_relevant_objective(self, user_input):
237
+ user_input_embedding = self.get_embedding(user_input, model='text-embedding-ada-002')
238
+ most_relevant_objective = max(
239
+ self.objectives_examples,
240
+ key=lambda pair: self.cosine_similarity(pair['objective'], user_input_embedding)
241
+ )
242
+ return most_relevant_objective['objective'], most_relevant_objective['examples']
243
+
244
+
245
+ def get_embedding(self, text, model='text-embedding-ada-002'):
246
+ response = openai.Embedding.create(input=[text], model=model)
247
+ embedding = response['data'][0]['embedding']
248
+ return embedding
249
+
250
+ def cosine_similarity(self, objective, embedding):
251
+ max_similarity = float('-inf')
252
+ objective_embedding = self.get_embedding(objective, model='text-embedding-ada-002')
253
+ similarity = self.calculate_similarity(objective_embedding, embedding)
254
+ max_similarity = max(max_similarity, similarity)
255
+ return max_similarity
256
+
257
+ def calculate_similarity(self, embedding1, embedding2):
258
+ embedding1 = np.array(embedding1, dtype=np.float32)
259
+ embedding2 = np.array(embedding2, dtype=np.float32)
260
+ similarity = np.dot(embedding1, embedding2) / (np.linalg.norm(embedding1) * np.linalg.norm(embedding2))
261
+ return similarity
262
+
263
+ def load_example_objectives(self, user_objective):
264
+ self.load_objectives_examples()
265
+ most_relevant_objective, most_relevant_tasklist = self.find_most_relevant_objective(user_objective)
266
+ example_objective = most_relevant_objective
267
+ example_tasklist = most_relevant_tasklist
268
+ return example_objective, example_tasklist
269
+
babyagi/classic/README.md ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # babyagi
2
+
3
+
4
+ # Objective
5
+ This Python script is an example of an AI-powered task management system. The system uses OpenAI and Pinecone APIs to create, prioritize, and execute tasks. The main idea behind this system is that it creates tasks based on the result of previous tasks and a predefined objective. The script then uses OpenAI's natural language processing (NLP) capabilities to create new tasks based on the objective, and Pinecone to store and retrieve task results for context. This is a paired-down version of the original [Task-Driven Autonomous Agent](https://twitter.com/yoheinakajima/status/1640934493489070080?s=20) (Mar 28, 2023).
6
+
7
+ This README will cover the following:
8
+
9
+ * How the script works
10
+
11
+ * How to use the script
12
+ * Warning about running the script continuously
13
+ # How It Works
14
+ The script works by running an infinite loop that does the following steps:
15
+
16
+ 1. Pulls the first task from the task list.
17
+ 2. Sends the task to the execution agent, which uses OpenAI's API to complete the task based on the context.
18
+ 3. Enriches the result and stores it in Pinecone.
19
+ 4. Creates new tasks and reprioritizes the task list based on the objective and the result of the previous task.
20
+ The execution_agent() function is where the OpenAI API is used. It takes two parameters: the objective and the task. It then sends a prompt to OpenAI's API, which returns the result of the task. The prompt consists of a description of the AI system's task, the objective, and the task itself. The result is then returned as a string.
21
+
22
+ The task_creation_agent() function is where OpenAI's API is used to create new tasks based on the objective and the result of the previous task. The function takes four parameters: the objective, the result of the previous task, the task description, and the current task list. It then sends a prompt to OpenAI's API, which returns a list of new tasks as strings. The function then returns the new tasks as a list of dictionaries, where each dictionary contains the name of the task.
23
+
24
+ The prioritization_agent() function is where OpenAI's API is used to reprioritize the task list. The function takes one parameter, the ID of the current task. It sends a prompt to OpenAI's API, which returns the reprioritized task list as a numbered list.
25
+
26
+ Finally, the script uses Pinecone to store and retrieve task results for context. The script creates a Pinecone index based on the table name specified in YOUR_TABLE_NAME variable. Pinecone is then used to store the results of the task in the index, along with the task name and any additional metadata.
27
+
28
+ # How to Use
29
+ To use the script, you will need to follow these steps:
30
+
31
+ 1. Install the required packages: `pip install -r requirements.txt`
32
+ 2. Set your OpenAI and Pinecone API keys in the OPENAI_API_KEY and PINECONE_API_KEY variables.
33
+ 3. Set the Pinecone environment in the PINECONE_ENVIRONMENT variable.
34
+ 4. Set the name of the table where the task results will be stored in the YOUR_TABLE_NAME variable.
35
+ 5. Set the objective of the task management system in the OBJECTIVE variable.
36
+ 6. Set the first task of the system in the YOUR_FIRST_TASK variable.
37
+ 7. Run the script.
38
+
39
+ # Warning
40
+ This script is designed to be run continuously as part of a task management system. Running this script continuously can result in high API usage, so please use it responsibly. Additionally, the script requires the OpenAI and Pinecone APIs to be set up correctly, so make sure you have set up the APIs before running the script.
41
+
42
+ #Backstory
43
+ BabyAGI is a paired-down version of the original [Task-Driven Autonomous Agent](https://twitter.com/yoheinakajima/status/1640934493489070080?s=20) (Mar 28, 2023) shared on Twitter. This version is down to 140 lines: 13 comments, 22 blank, 105 code. The name of the repo came up in the reaction to the original autonomous agent - the author does not mean to imply that this is AGI.
44
+
45
+ Made with love by [@yoheinakajima](https://twitter.com/yoheinakajima), who happens to be a VC - so if you use this build a startup, ping him!
babyagi/classic/babyagi.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import openai
2
+ import pinecone
3
+ import time
4
+ from collections import deque
5
+ from typing import Dict, List
6
+
7
+ #Set API Keys
8
+ OPENAI_API_KEY = ""
9
+ PINECONE_API_KEY = ""
10
+ PINECONE_ENVIRONMENT = "us-east1-gcp" #Pinecone Environment (eg. "us-east1-gcp")
11
+
12
+ #Set Variables
13
+ YOUR_TABLE_NAME = "test-table"
14
+ OBJECTIVE = "Solve world hunger."
15
+ YOUR_FIRST_TASK = "Develop a task list."
16
+
17
+ #Print OBJECTIVE
18
+ print("\033[96m\033[1m"+"\n*****OBJECTIVE*****\n"+"\033[0m\033[0m")
19
+ print(OBJECTIVE)
20
+
21
+ # Configure OpenAI and Pinecone
22
+ openai.api_key = OPENAI_API_KEY
23
+ pinecone.init(api_key=PINECONE_API_KEY, environment=PINECONE_ENVIRONMENT)
24
+
25
+ # Create Pinecone index
26
+ table_name = YOUR_TABLE_NAME
27
+ dimension = 1536
28
+ metric = "cosine"
29
+ pod_type = "p1"
30
+ if table_name not in pinecone.list_indexes():
31
+ pinecone.create_index(table_name, dimension=dimension, metric=metric, pod_type=pod_type)
32
+
33
+ # Connect to the index
34
+ index = pinecone.Index(table_name)
35
+
36
+ # Task list
37
+ task_list = deque([])
38
+
39
+ def add_task(task: Dict):
40
+ task_list.append(task)
41
+
42
+ def get_ada_embedding(text):
43
+ text = text.replace("\n", " ")
44
+ return openai.Embedding.create(input=[text], model="text-embedding-ada-002")["data"][0]["embedding"]
45
+
46
+ def task_creation_agent(objective: str, result: Dict, task_description: str, task_list: List[str]):
47
+ prompt = f"You are an task creation AI that uses the result of an execution agent to create new tasks with the following objective: {objective}, The last completed task has the result: {result}. This result was based on this task description: {task_description}. These are incomplete tasks: {', '.join(task_list)}. Based on the result, create new tasks to be completed by the AI system that do not overlap with incomplete tasks. Return the tasks as an array."
48
+ response = openai.Completion.create(engine="text-davinci-003",prompt=prompt,temperature=0.5,max_tokens=100,top_p=1,frequency_penalty=0,presence_penalty=0)
49
+ new_tasks = response.choices[0].text.strip().split('\n')
50
+ return [{"task_name": task_name} for task_name in new_tasks]
51
+
52
+ def prioritization_agent(this_task_id:int):
53
+ global task_list
54
+ task_names = [t["task_name"] for t in task_list]
55
+ next_task_id = int(this_task_id)+1
56
+ prompt = f"""You are an task prioritization AI tasked with cleaning the formatting of and reprioritizing the following tasks: {task_names}. Consider the ultimate objective of your team:{OBJECTIVE}. Do not remove any tasks. Return the result as a numbered list, like:
57
+ #. First task
58
+ #. Second task
59
+ Start the task list with number {next_task_id}."""
60
+ response = openai.Completion.create(engine="text-davinci-003",prompt=prompt,temperature=0.5,max_tokens=1000,top_p=1,frequency_penalty=0,presence_penalty=0)
61
+ new_tasks = response.choices[0].text.strip().split('\n')
62
+ task_list = deque()
63
+ for task_string in new_tasks:
64
+ task_parts = task_string.strip().split(".", 1)
65
+ if len(task_parts) == 2:
66
+ task_id = task_parts[0].strip()
67
+ task_name = task_parts[1].strip()
68
+ task_list.append({"task_id": task_id, "task_name": task_name})
69
+
70
+ def execution_agent(objective:str,task: str) -> str:
71
+ #context = context_agent(index="quickstart", query="my_search_query", n=5)
72
+ context=context_agent(index=YOUR_TABLE_NAME, query=objective, n=5)
73
+ #print("\n*******RELEVANT CONTEXT******\n")
74
+ #print(context)
75
+ response = openai.Completion.create(
76
+ engine="text-davinci-003",
77
+ prompt=f"You are an AI who performs one task based on the following objective: {objective}. Your task: {task}\nResponse:",
78
+ temperature=0.7,
79
+ max_tokens=2000,
80
+ top_p=1,
81
+ frequency_penalty=0,
82
+ presence_penalty=0
83
+ )
84
+ return response.choices[0].text.strip()
85
+
86
+ def context_agent(query: str, index: str, n: int):
87
+ query_embedding = get_ada_embedding(query)
88
+ index = pinecone.Index(index_name=index)
89
+ results = index.query(query_embedding, top_k=n,
90
+ include_metadata=True)
91
+ #print("***** RESULTS *****")
92
+ #print(results)
93
+ sorted_results = sorted(results.matches, key=lambda x: x.score, reverse=True)
94
+ return [(str(item.metadata['task'])) for item in sorted_results]
95
+
96
+ # Add the first task
97
+ first_task = {
98
+ "task_id": 1,
99
+ "task_name": YOUR_FIRST_TASK
100
+ }
101
+
102
+ add_task(first_task)
103
+ # Main loop
104
+ task_id_counter = 1
105
+ while True:
106
+ if task_list:
107
+ # Print the task list
108
+ print("\033[95m\033[1m"+"\n*****TASK LIST*****\n"+"\033[0m\033[0m")
109
+ for t in task_list:
110
+ print(str(t['task_id'])+": "+t['task_name'])
111
+
112
+ # Step 1: Pull the first task
113
+ task = task_list.popleft()
114
+ print("\033[92m\033[1m"+"\n*****NEXT TASK*****\n"+"\033[0m\033[0m")
115
+ print(str(task['task_id'])+": "+task['task_name'])
116
+
117
+ # Send to execution function to complete the task based on the context
118
+ result = execution_agent(OBJECTIVE,task["task_name"])
119
+ this_task_id = int(task["task_id"])
120
+ print("\033[93m\033[1m"+"\n*****TASK RESULT*****\n"+"\033[0m\033[0m")
121
+ print(result)
122
+
123
+ # Step 2: Enrich result and store in Pinecone
124
+ enriched_result = {'data': result} # This is where you should enrich the result if needed
125
+ result_id = f"result_{task['task_id']}"
126
+ vector = enriched_result['data'] # extract the actual result from the dictionary
127
+ index.upsert([(result_id, get_ada_embedding(vector),{"task":task['task_name'],"result":result})])
128
+
129
+ # Step 3: Create new tasks and reprioritize task list
130
+ new_tasks = task_creation_agent(OBJECTIVE,enriched_result, task["task_name"], [t["task_name"] for t in task_list])
131
+
132
+ for new_task in new_tasks:
133
+ task_id_counter += 1
134
+ new_task.update({"task_id": task_id_counter})
135
+ add_task(new_task)
136
+ prioritization_agent(this_task_id)
137
+
138
+ time.sleep(1) # Sleep before checking the task list again
babyagi/classic/babyfoxagi/README.md ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### Author's Note
2
+
3
+ BabyFoxAGI is the 5th mod of BabyAGI. The earlier 4 were [BabyBeeAGI](https://twitter.com/yoheinakajima/status/1652732735344246784?lang=en), [BabyCatAGI](https://twitter.com/yoheinakajima/status/1657448504112091136), [BabyDeerAGI](https://twitter.com/yoheinakajima/status/1666313838868992001), and [BabyElfAGI](https://twitter.com/yoheinakajima/status/1678443482866933760). Following the evolution will be the easiest way to understand BabyFoxAGI. Please check out [the tweet thread introducing BabyFoxAGI](https://twitter.com/yoheinakajima/status/1697539193768116449) for a quick overview.
4
+
5
+ ### New Features in BabyFoxAGI
6
+
7
+ In BabyFoxAGI, the two newest features are:
8
+
9
+ 1. **Self-Improvement (Also known as [FOXY Method](https://twitter.com/yoheinakajima/status/1685894298536148992))**: This helps it improve its task list building.
10
+ 2. **[BabyAGI Experimental UI](https://twitter.com/yoheinakajima/status/1693153307454546331)**: In this feature, the chat is separated from task/output.
11
+
12
+ Notable in the chat is the ability to either run one skill quickly or generate a task list and chain skills, where the you see BabyAGI (moved to babyagi.py) comes in. main.py is now the back-end to the Python Flask based chat app (public/templates folder).
13
+
14
+ ### Known Issues and Limitations
15
+
16
+ I had issues with parallel tasks within BabyAGI, so removed that for now. I'm also not streaming the task list or in-between work from these task list runs to the UI. For now, you'll have to monitor that in the console. And in general, lots more room for improvement... but wanted to get this out there :)
17
+
18
+ ## Getting Started
19
+
20
+ These instructions will guide you through the process of setting up Classic BabyFoxAGI on your local machine.
21
+
22
+ ### Prerequisites
23
+
24
+ Make sure you have the following software installed:
25
+
26
+ - Git ([Download & Install Git](https://git-scm.com/downloads))
27
+ - Python 3.x ([Download & Install Python](https://www.python.org/downloads/))
28
+ - Pip (usually installed with Python)
29
+
30
+ ### Clone the Repository
31
+
32
+ To clone this specific folder (`classic/babyfoxagi`) from the GitHub repository, open a terminal and run the following commands:
33
+
34
+ ```bash
35
+ # Navigate to the directory where you want to clone the project
36
+ cd your/desired/directory
37
+
38
+ # Clone the entire repository
39
+ git clone https://github.com/yoheinakajima/babyagi.git
40
+
41
+ # Move into the cloned repository
42
+ cd babyagi
43
+
44
+ # Navigate to the 'classic/babyfoxagi' folder
45
+ cd classic/babyfoxagi
46
+ ```
47
+ ### Install Dependencies
48
+ Since there's no requirements.txt, you'll need to install the required packages manually:
49
+ ```bash
50
+ # Install OpenAI package
51
+ pip install openai
52
+
53
+ # Install Flask
54
+ pip install Flask
55
+ ```
56
+ Note: If you are using a Python environment manager like conda, make sure to activate your environment before running the pip commands.
57
+ ### Configuration
58
+ Create a .env file in the classic/babyfoxagi directory to store your API keys.
59
+ ```bash
60
+ # Create a .env file
61
+ touch .env
62
+
63
+ # Open the .env file with a text editor and add your API keys
64
+ echo "OPENAI_API_KEY=your_openai_api_key_here" >> .env
65
+ # Add other API keys as needed for other tools (e.g., Airtable)
66
+ echo "SERPAPI_API_KEY=your_serpapi_api_key_here" >> .env
67
+ echo "AIRTABLE_API_KEY=your_airtable_api_key_here" >> .env
68
+ ```
69
+ For other tools like airtable_search, you may also need to specify additional configurations like BASE, TABLE, and COLUMN in the airtable_search.py file.
70
+ ### Running the Project
71
+ After cloning the repository, installing the dependencies, and setting up the .env file, just run:
72
+ ```bash
73
+ python main.py
74
+ ```
75
+
76
+ # BabyFoxAGI - Overview
77
+
78
+ BabyFoxAGI is an experimental chat-based UI that can use a variety of skills to accomplish tasks, displayed in a separate panel from the Chat UI, allowing for parallel execution of tasks. Tasks can be accomplished quickly using one skill, or by generating a tasklist and chaining multiple tasks/skills together.
79
+
80
+ ## Skills
81
+
82
+ Skills that are included include text_completion, web_search, drawing (uses html canvas), documentation_search, code_reader, skill_saver, airtable_search, and call_babyagi. Please read through each skill to understand them better.
83
+
84
+ ## Components
85
+
86
+ The project consists mainly of two Python scripts (`main.py` and `babyagi.py`) and a client-side JavaScript file (`Chat.js`), along with an HTML layout (`index.html`).
87
+
88
+ ### main.py
89
+
90
+ #### Role
91
+ Acts as the entry point for the Flask web application and handles routes, API calls, and ongoing tasks.
92
+
93
+ #### Key Features
94
+ - Flask routes for handling HTTP requests.
95
+ - Integration with OpenAI's API for text summarization and skill execution.
96
+ - Management of ongoing tasks and their statuses.
97
+
98
+ ### Chat.js
99
+
100
+ #### Role
101
+ Handles the client-side interaction within the web interface, including capturing user input and displaying messages and task statuses.
102
+
103
+ #### Key Features
104
+ - Dynamic chat interface for user interaction.
105
+ - HTTP requests to the Flask backend for task initiation and status checks.
106
+ - Presentation layer for task status and results.
107
+
108
+ ### index.html
109
+
110
+ #### Role
111
+ Provides the layout for the web interface, including a chat box for user interaction and an objectives box for task display.
112
+
113
+ #### Key Features
114
+ - HTML layout that accommodates the chat box and objectives box side-by-side.
115
+
116
+ ### babyagi.py
117
+
118
+ #### Role
119
+ Acts as the central orchestrator for task execution, coordinating with various skills to accomplish a predefined objective.
120
+
121
+ #### Key Features
122
+ - Task and skill registries to manage the execution.
123
+ - Main execution loop that iteratively performs tasks based on dependencies and objectives.
124
+ - Optional feature to reflect on task outputs and potentially generate new tasks.
125
+
126
+ ## Flow of Execution
127
+
128
+ 1. The user interacts with the chat interface, sending commands or inquiries.
129
+ 2. `main.py` receives these requests and uses OpenAI's API to determine the next steps, which could include executing a skill or creating a task list.
130
+ 3. If tasks are to be executed, `main.py` delegates to `babyagi.py`.
131
+ 4. `babyagi.py` uses its main execution loop to perform tasks in the required sequence, based on dependencies and the main objective.
132
+ 5. The output or status of each task is sent back to the client-side via Flask routes, and displayed using `Chat.js`.
133
+
134
+ ## Notes
135
+
136
+ - The system leverages `.env` for API key management.
137
+ - `.ndjson` files are used for persistent storage of chat and task statuses.
138
+ - There is an optional `REFLECTION` feature in `babyagi.py` that allows the system to reflect on task outputs and potentially generate new tasks.
139
+
140
+ This overview provides a comprehensive look into the functionalities and execution flow of the project, offering both high-level insights and low-level details.
babyagi/classic/babyfoxagi/babyagi.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ import time
4
+ from datetime import datetime
5
+ from skills.skill_registry import SkillRegistry
6
+ from tasks.task_registry import TaskRegistry
7
+ from ongoing_tasks import ongoing_tasks
8
+
9
+ load_dotenv() # Load environment variables from .env file
10
+
11
+ api_keys = {
12
+ 'openai': os.getenv('OPENAI_API_KEY'),
13
+ 'serpapi': os.getenv('SERPAPI_API_KEY')
14
+ #'airtable': os.getenv('AIRTABLE_API_KEY')
15
+ }
16
+
17
+ OBJECTIVE = "Research Yohei Nakajima and write a poem about him."
18
+ LOAD_SKILLS = ['web_search', 'text_completion', 'code_reader','google_jobs_api_search','image_generation','startup_analysis','play_music','game_generation']
19
+ #add web_search and documentation_search after you add SERPAPI_API_KEY in your secrets. airtable_search once you've added your AIRTABLE_API_KEY, and add base/table/column data to airtable_search.py, etc...
20
+ REFLECTION = False #Experimental reflection step between each task run (when running tasklist)
21
+
22
+ def run_single_task(task_id, task, skill_registry, task_outputs, OBJECTIVE, task_registry):
23
+ """Execute a single task and update its status"""
24
+ task_output = task_registry.execute_task(task_id, task, skill_registry, task_outputs, OBJECTIVE)
25
+
26
+ task_outputs[task_id]["output"] = task_output
27
+ task_outputs[task_id]["completed"] = True
28
+ task_outputs[task_id]["description"] = task.get('description', 'No description available')
29
+ task_outputs[task_id]["skill"] = task.get('skill', 'No skill information available')
30
+
31
+ if task_output:
32
+ task_registry.update_tasks({"id": task_id, "status": "completed", "result": task_output})
33
+
34
+ completed_task = task_registry.get_task(task_id)
35
+ print(f"Task #{task_id}: {completed_task.get('task')} [COMPLETED][{completed_task.get('skill')}]")
36
+
37
+ if REFLECTION:
38
+ new_tasks, insert_after_ids, tasks_to_update = task_registry.reflect_on_output(task_output, skill_descriptions)
39
+ for new_task, after_id in zip(new_tasks, insert_after_ids):
40
+ task_registry.add_task(new_task, after_id)
41
+
42
+ if isinstance(tasks_to_update, dict) and tasks_to_update:
43
+ tasks_to_update = [tasks_to_update]
44
+
45
+ for task_to_update in tasks_to_update:
46
+ task_registry.update_tasks(task_to_update)
47
+
48
+
49
+
50
+
51
+ def run_main_loop(OBJECTIVE, LOAD_SKILLS, api_keys, REFLECTION=False):
52
+ """Main execution loop"""
53
+ try:
54
+ skill_descriptions = ",".join(f"[{skill.name}: {skill.description}]" for skill in global_skill_registry.skills.values())
55
+ task_registry = TaskRegistry()
56
+ task_registry.create_tasklist(OBJECTIVE, skill_descriptions)
57
+
58
+ skill_names = [skill.name for skill in global_skill_registry.skills.values()]
59
+ session_summary = f"OBJECTIVE:{OBJECTIVE}.#SKILLS:{','.join(skill_names)}.#"
60
+
61
+ task_outputs = {task["id"]: {"completed": False, "output": None} for task in task_registry.get_tasks()}
62
+
63
+ task_output = None # Initialize task_output to None
64
+
65
+ while not all(task["completed"] for task in task_outputs.values()):
66
+ tasks = task_registry.get_tasks()
67
+ task_registry.print_tasklist(tasks)
68
+
69
+ for task in tasks:
70
+ if task["id"] not in task_outputs:
71
+ task_outputs[task["id"]] = {"completed": False, "output": None}
72
+
73
+ ready_tasks = [(task["id"], task) for task in tasks if all((dep in task_outputs and task_outputs[dep]["completed"]) for dep in task.get('dependent_task_ids', [])) and not task_outputs[task["id"]]["completed"]]
74
+
75
+ for task_id, task in ready_tasks:
76
+ run_single_task(task_id, task, global_skill_registry, task_outputs, OBJECTIVE, task_registry)
77
+
78
+ time.sleep(0.1)
79
+
80
+ # Assuming the last task in tasks has the latest output. Adjust if your use case is different.
81
+ last_task_id = tasks[-1]["id"] if tasks else None
82
+ task_output = task_outputs[last_task_id]["output"] if last_task_id else None
83
+
84
+ task_registry.reflect_on_final(OBJECTIVE, task_registry.get_tasks(), task_output, skill_descriptions)
85
+ global_skill_registry.reflect_skills(OBJECTIVE, task_registry.get_tasks(), task_output, skill_descriptions)
86
+
87
+ with open(f'output/output_{datetime.now().strftime("%d_%m_%Y_%H_%M_%S")}.txt', 'w') as file:
88
+ file.write(session_summary)
89
+ print("...file saved.")
90
+ print("END")
91
+
92
+ return task_output # Return the last task output
93
+
94
+ except Exception as e:
95
+ return f"An error occurred: {e}"
96
+
97
+
98
+
99
+ # Removed repeated logic for initiating skill registry
100
+ global_skill_registry = SkillRegistry(api_keys=api_keys, main_loop_function=run_main_loop, skill_names=LOAD_SKILLS)
101
+
102
+
103
+ def execute_skill(skill_name, objective, task_id):
104
+ """Execute a single skill"""
105
+ skill = global_skill_registry.get_skill(skill_name)
106
+ if skill:
107
+ try:
108
+ result = skill.execute(objective, "", objective)
109
+ ongoing_tasks[task_id].update({"status": "completed", "output": result})
110
+ except Exception as e:
111
+ ongoing_tasks[task_id].update({"status": "error", "error": str(e)})
112
+ return task_id
113
+ return "Skill not found :("
114
+
115
+ def execute_task_list(objective, api_keys, task_id):
116
+ """Execute a list of tasks"""
117
+ try:
118
+ task_registry = TaskRegistry()
119
+ result = run_main_loop(objective, get_skills(), api_keys)
120
+ ongoing_tasks[task_id].update({"status": "completed", "output": result})
121
+ return task_registry.get_tasks(), task_id
122
+ except Exception as e:
123
+ ongoing_tasks[task_id].update({"status": "error", "error": str(e)})
124
+ print(f"Error in execute_task_list: {e}")
125
+ return task_id
126
+
127
+
128
+
129
+ def get_skills():
130
+ """Return the global skill registry"""
131
+ # Removed repeated logic for initiating skill registry
132
+ global global_skill_registry
133
+ print("Returning GLOBAL SKILL REGISTRY")
134
+ return global_skill_registry
135
+
136
+ # Removed repeated logic for initiating skill registry
137
+ global_skill_registry = SkillRegistry(api_keys=api_keys, main_loop_function=run_main_loop, skill_names=LOAD_SKILLS)
138
+
139
+ if __name__ == "__main__":
140
+ run_main_loop(OBJECTIVE, LOAD_SKILLS, api_keys, REFLECTION)
babyagi/classic/babyfoxagi/forever_cache.ndjson ADDED
@@ -0,0 +1 @@
 
 
1
+ {"role": "assistant", "content": "Hey I'm BabyAGI! How can I help you today?"}
babyagi/classic/babyfoxagi/main.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, render_template, request, jsonify, send_from_directory
2
+ import openai
3
+ import os
4
+ import json
5
+ import threading
6
+ from babyagi import get_skills, execute_skill, execute_task_list, api_keys, LOAD_SKILLS
7
+ from ongoing_tasks import ongoing_tasks
8
+
9
+ app = Flask(__name__, static_folder='public/static')
10
+ openai.api_key = os.getenv('OPENAI_API_KEY')
11
+
12
+
13
+ @app.route('/')
14
+ def hello_world():
15
+
16
+ return render_template('index.html')
17
+
18
+
19
+ FOREVER_CACHE_FILE = "forever_cache.ndjson"
20
+ OVERALL_SUMMARY_FILE = "overall_summary.ndjson"
21
+
22
+
23
+ @app.route('/get-all-messages', methods=["GET"])
24
+ def get_all_messages():
25
+ try:
26
+ messages = []
27
+ with open("forever_cache.ndjson", "r") as file:
28
+ for line in file:
29
+ messages.append(json.loads(line))
30
+
31
+ return jsonify(messages)
32
+
33
+ except Exception as e:
34
+ return jsonify({"error": str(e)}), 500
35
+
36
+
37
+ def get_latest_summary():
38
+ with open(OVERALL_SUMMARY_FILE, 'r') as file:
39
+ lines = file.readlines()
40
+ if lines:
41
+ return json.loads(lines[-1])["summary"] # Return the latest summary
42
+ return ""
43
+
44
+
45
+ def summarize_text(text):
46
+ system_message = (
47
+ "Your task is to generate a concise summary for the provided conversation, this will be fed to a separate AI call as context to generate responses for future user queries."
48
+ " The conversation contains various messages that have been exchanged between participants."
49
+ " Please ensure your summary captures the main points and context of the conversation without being too verbose."
50
+ " The summary should be limited to a maximum of 500 tokens."
51
+ " Here's the conversation you need to summarize:")
52
+
53
+ completion = openai.ChatCompletion.create(model="gpt-3.5-turbo-16k",
54
+ messages=[{
55
+ "role": "system",
56
+ "content": system_message
57
+ }, {
58
+ "role": "user",
59
+ "content": text
60
+ }])
61
+
62
+ # Extracting the content from the assistant's message
63
+ return completion.choices[0]['message']['content'].strip()
64
+
65
+
66
+ def combine_summaries(overall, latest):
67
+ system_message = (
68
+ "Your task is to generate a concise summary for the provided conversation, this will be fed to a separate AI call as context to generate responses for future user queries."
69
+ "You will do this by combining two given summaries into one cohesive summary."
70
+ " Make sure to retain the key points from both summaries and create a concise, unified summary."
71
+ " The combined summary should not exceed 500 tokens."
72
+ " Here are the summaries you need to combine:")
73
+
74
+ completion = openai.ChatCompletion.create(
75
+ model="gpt-3.5-turbo-16k",
76
+ messages=[{
77
+ "role": "system",
78
+ "content": system_message
79
+ }, {
80
+ "role":
81
+ "user",
82
+ "content":
83
+ f"Overall summary: {overall}\nLatest summary: {latest}"
84
+ }])
85
+
86
+ # Extracting the content from the assistant's message
87
+ return completion.choices[0]['message']['content'].strip()
88
+
89
+
90
+
91
+ def openai_function_call(user_message):
92
+ global ongoing_tasks
93
+ global global_skill_registry
94
+ global_skill_registry = LOAD_SKILLS
95
+ print("Returning GLOBAL SKILL REGISTRY")
96
+ print(global_skill_registry)
97
+
98
+ # Append the new user message to the forever_cache file
99
+ user_entry = {"role": "user", "content": user_message}
100
+ append_to_ndjson(FOREVER_CACHE_FILE, user_entry)
101
+
102
+ # Retrieve the last 20 stored messages
103
+ with open(FOREVER_CACHE_FILE, "r") as file:
104
+ lines = file.readlines()
105
+
106
+ last_20_messages = [json.loads(line) for line in lines][-20:]
107
+
108
+ # Always update the summary in a separate thread
109
+ threading.Thread(target=update_summary, args=(last_20_messages, )).start()
110
+
111
+ overall_summary = get_latest_summary()
112
+ print("LOAD_SKILLS")
113
+ print(global_skill_registry)
114
+ system_message = (
115
+ f"You are a fun happy and quirky AI chat assistant that uses Gen Z language and lots of emojis named BabyAGI with capabilities beyond chat. For every user message, you quickly analyze whether this is a request that you can simply respond via ChatCompletion, whether you need to use one of the skills provided, or whether you should create a task list and chain multiple skills together. You will always provide a message_to_user. If path is Skill or TaskList, always generate an objective. If path is Skill, ALWAYS include skill_used from one of the available skills. ###Here are your available skills: {global_skill_registry}.###For context, here is the overall summary of the chat: {overall_summary}."
116
+ )
117
+
118
+ completion = openai.ChatCompletion.create(
119
+ model="gpt-3.5-turbo-16k",
120
+ messages=[
121
+ {"role": "system","content": system_message},
122
+ *last_20_messages,
123
+ {"role": "user","content": user_message},
124
+ ],
125
+ functions=[{
126
+ "name": "determine_response_type",
127
+ "description":
128
+ "Determine whether to respond via ChatCompletion, use a skill, or create a task list. Always provide a message_to_user.",
129
+ "parameters": {
130
+ "type": "object",
131
+ "properties": {
132
+ "message_to_user": {
133
+ "type":
134
+ "string",
135
+ "description":
136
+ "A message for the user, indicating the AI's action or providing the direct chat response. ALWAYS REQUIRED. Do not use line breaks."
137
+ },
138
+ "path": {
139
+ "type":
140
+ "string",
141
+ "enum": ["ChatCompletion", "Skill", "TaskList"], # Restrict the values to these three options
142
+ "description":
143
+ "The type of response – either 'ChatCompletion', 'Skill', or 'TaskList'"
144
+ },
145
+ "skill_used": {
146
+ "type":
147
+ "string",
148
+ "description":
149
+ f"If path is 'Skill', indicates which skill to use. If path is 'Skill', ALWAYS use skill_used. Must be one of these: {global_skill_registry}"
150
+ },
151
+ "objective": {
152
+ "type":
153
+ "string",
154
+ "description":
155
+ "If path is 'Skill' or 'TaskList', describes the main task or objective. Always include if path is 'Skill' or 'TaskList'."
156
+ }
157
+ },
158
+ "required": ["path", "message_to_user", "objective", "skill_used"]
159
+ }
160
+ }],
161
+ function_call={"name": "determine_response_type"})
162
+
163
+ # Extract AI's structured response from function call
164
+ response_data = completion.choices[0]['message']['function_call'][
165
+ 'arguments']
166
+ if isinstance(response_data, str):
167
+ response_data = json.loads(response_data)
168
+ print("RESPONSE DATA:")
169
+ print(response_data)
170
+ path = response_data.get("path")
171
+ skill_used = response_data.get("skill_used")
172
+ objective = response_data.get("objective")
173
+ task_id = generate_task_id()
174
+ response_data["taskId"] = task_id
175
+ if path == "Skill":
176
+ ongoing_tasks[task_id] = {
177
+ 'status': 'ongoing',
178
+ 'description': objective,
179
+ 'skill_used': skill_used
180
+ }
181
+ threading.Thread(target=execute_skill, args=(skill_used, objective, task_id)).start()
182
+ update_ongoing_tasks_file()
183
+ elif path == "TaskList":
184
+ ongoing_tasks[task_id] = {
185
+ 'status': 'ongoing',
186
+ 'description': objective,
187
+ 'skill_used': 'Multiple'
188
+ }
189
+ threading.Thread(target=execute_task_list, args=(objective, api_keys, task_id)).start()
190
+ update_ongoing_tasks_file()
191
+
192
+ return response_data
193
+
194
+
195
+ def generate_task_id():
196
+ """Generates a unique task ID"""
197
+ return f"{str(len(ongoing_tasks) + 1)}"
198
+
199
+
200
+ def update_summary(messages):
201
+ # Combine messages to form text and summarize
202
+ messages_text = " ".join([msg['content'] for msg in messages])
203
+ latest_summary = summarize_text(messages_text)
204
+
205
+ # Update overall summary
206
+ overall = get_latest_summary()
207
+ combined_summary = combine_summaries(overall, latest_summary)
208
+ append_to_ndjson(OVERALL_SUMMARY_FILE, {"summary": combined_summary})
209
+
210
+
211
+ @app.route('/determine-response', methods=["POST"])
212
+ def determine_response():
213
+ try:
214
+ # Ensure that the request contains JSON data
215
+ if not request.is_json:
216
+ return jsonify({"error": "Expected JSON data"}), 400
217
+
218
+ user_message = request.json.get("user_message")
219
+
220
+ # Check if user_message is provided
221
+ if not user_message:
222
+ return jsonify({"error": "user_message field is required"}), 400
223
+
224
+ response_data = openai_function_call(user_message)
225
+
226
+ data = {
227
+ "message": response_data['message_to_user'],
228
+ "skill_used": response_data.get('skill_used', None),
229
+ "objective": response_data.get('objective', None),
230
+ "task_list": response_data.get('task_list', []),
231
+ "path": response_data.get('path', []),
232
+ "task_id": response_data.get('taskId')
233
+ }
234
+
235
+ # Storing AI's response to NDJSON file
236
+ ai_entry = {
237
+ "role": "assistant",
238
+ "content": response_data['message_to_user']
239
+ }
240
+ append_to_ndjson("forever_cache.ndjson", ai_entry)
241
+
242
+ print("END OF DETERMINE-RESPONSE. PRINTING 'DATA'")
243
+ print(data)
244
+ return jsonify(data)
245
+
246
+ except Exception as e:
247
+ print(f"Exception occurred: {str(e)}")
248
+ return jsonify({"error": str(e)}), 500
249
+
250
+
251
+ def append_to_ndjson(filename, data):
252
+ try:
253
+ print(f"Appending to {filename} with data: {data}")
254
+ with open(filename, 'a') as file:
255
+ file.write(json.dumps(data) + '\n')
256
+ except Exception as e:
257
+ print(f"Error in append_to_ndjson: {str(e)}")
258
+
259
+
260
+
261
+ @app.route('/check-task-status/<task_id>', methods=["GET"])
262
+ def check_task_status(task_id):
263
+ global ongoing_tasks
264
+ update_ongoing_tasks_file()
265
+ print("CHECK_TASK_STATUS")
266
+ print(task_id)
267
+ task = ongoing_tasks.get(task_id)
268
+
269
+ # First check if task is None
270
+ if not task:
271
+ return jsonify({"error": f"No task with ID {task_id} found."}), 404
272
+
273
+ # Now, it's safe to access attributes of task
274
+ print(task.get("status"))
275
+ return jsonify({"status": task.get("status")})
276
+
277
+
278
+ @app.route('/fetch-task-output/<task_id>', methods=["GET"])
279
+ def fetch_task_output(task_id):
280
+ print("FETCH_TASK_STATUS")
281
+ print(task_id)
282
+ task = ongoing_tasks.get(task_id)
283
+ if not task:
284
+ return jsonify({"error": f"No task with ID {task_id} found."}), 404
285
+ return jsonify({"output": task.get("output")})
286
+
287
+
288
+ def update_ongoing_tasks_file():
289
+ with open("ongoing_tasks.py", "w") as file:
290
+ file.write(f"ongoing_tasks = {ongoing_tasks}\n")
291
+
292
+ @app.route('/get-all-tasks', methods=['GET'])
293
+ def get_all_tasks():
294
+ tasks = []
295
+ for task_id, task_data in ongoing_tasks.items():
296
+ task = {
297
+ 'task_id': task_id,
298
+ 'status': task_data.get('status', 'unknown'),
299
+ 'description': task_data.get('description', 'N/A'),
300
+ 'skill_used': task_data.get('skill_used', 'N/A'),
301
+ 'output': task_data.get('output', 'N/A')
302
+ }
303
+ tasks.append(task)
304
+ return jsonify(tasks)
305
+
306
+
307
+
308
+ if __name__ == "__main__":
309
+ app.run(host='0.0.0.0', port=8080)
babyagi/classic/babyfoxagi/ongoing_tasks.py ADDED
@@ -0,0 +1 @@
 
 
1
+ ongoing_tasks = {}
babyagi/classic/babyfoxagi/overall_summary.ndjson ADDED
File without changes
babyagi/classic/babyfoxagi/poetry.lock ADDED
@@ -0,0 +1,886 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [[package]]
2
+ name = "aiohttp"
3
+ version = "3.8.5"
4
+ description = "Async http client/server framework (asyncio)"
5
+ category = "main"
6
+ optional = false
7
+ python-versions = ">=3.6"
8
+
9
+ [package.dependencies]
10
+ aiosignal = ">=1.1.2"
11
+ async-timeout = ">=4.0.0a3,<5.0"
12
+ attrs = ">=17.3.0"
13
+ charset-normalizer = ">=2.0,<4.0"
14
+ frozenlist = ">=1.1.1"
15
+ multidict = ">=4.5,<7.0"
16
+ yarl = ">=1.0,<2.0"
17
+
18
+ [package.extras]
19
+ speedups = ["aiodns", "brotli", "cchardet"]
20
+
21
+ [[package]]
22
+ name = "aiohttp-retry"
23
+ version = "2.8.3"
24
+ description = "Simple retry client for aiohttp"
25
+ category = "main"
26
+ optional = false
27
+ python-versions = ">=3.7"
28
+
29
+ [package.dependencies]
30
+ aiohttp = "*"
31
+
32
+ [[package]]
33
+ name = "aiosignal"
34
+ version = "1.2.0"
35
+ description = "aiosignal: a list of registered asynchronous callbacks"
36
+ category = "main"
37
+ optional = false
38
+ python-versions = ">=3.6"
39
+
40
+ [package.dependencies]
41
+ frozenlist = ">=1.1.0"
42
+
43
+ [[package]]
44
+ name = "async-timeout"
45
+ version = "4.0.2"
46
+ description = "Timeout context manager for asyncio programs"
47
+ category = "main"
48
+ optional = false
49
+ python-versions = ">=3.6"
50
+
51
+ [[package]]
52
+ name = "attrs"
53
+ version = "22.1.0"
54
+ description = "Classes Without Boilerplate"
55
+ category = "main"
56
+ optional = false
57
+ python-versions = ">=3.5"
58
+
59
+ [package.extras]
60
+ dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "mypy (>=0.900,!=0.940)", "pytest-mypy-plugins", "zope.interface", "furo", "sphinx", "sphinx-notfound-page", "pre-commit", "cloudpickle"]
61
+ docs = ["furo", "sphinx", "zope.interface", "sphinx-notfound-page"]
62
+ tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "mypy (>=0.900,!=0.940)", "pytest-mypy-plugins", "zope.interface", "cloudpickle"]
63
+ tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "mypy (>=0.900,!=0.940)", "pytest-mypy-plugins", "cloudpickle"]
64
+
65
+ [[package]]
66
+ name = "beautifulsoup4"
67
+ version = "4.12.2"
68
+ description = "Screen-scraping library"
69
+ category = "main"
70
+ optional = false
71
+ python-versions = ">=3.6.0"
72
+
73
+ [package.dependencies]
74
+ soupsieve = ">1.2"
75
+
76
+ [package.extras]
77
+ html5lib = ["html5lib"]
78
+ lxml = ["lxml"]
79
+
80
+ [[package]]
81
+ name = "bs4"
82
+ version = "0.0.1"
83
+ description = "Dummy package for Beautiful Soup"
84
+ category = "main"
85
+ optional = false
86
+ python-versions = "*"
87
+
88
+ [package.dependencies]
89
+ beautifulsoup4 = "*"
90
+
91
+ [[package]]
92
+ name = "cachetools"
93
+ version = "5.3.1"
94
+ description = "Extensible memoizing collections and decorators"
95
+ category = "main"
96
+ optional = false
97
+ python-versions = ">=3.7"
98
+
99
+ [[package]]
100
+ name = "certifi"
101
+ version = "2022.9.24"
102
+ description = "Python package for providing Mozilla's CA Bundle."
103
+ category = "main"
104
+ optional = false
105
+ python-versions = ">=3.6"
106
+
107
+ [[package]]
108
+ name = "charset-normalizer"
109
+ version = "2.1.1"
110
+ description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
111
+ category = "main"
112
+ optional = false
113
+ python-versions = ">=3.6.0"
114
+
115
+ [package.extras]
116
+ unicode_backport = ["unicodedata2"]
117
+
118
+ [[package]]
119
+ name = "click"
120
+ version = "8.1.3"
121
+ description = "Composable command line interface toolkit"
122
+ category = "main"
123
+ optional = false
124
+ python-versions = ">=3.7"
125
+
126
+ [package.dependencies]
127
+ colorama = {version = "*", markers = "platform_system == \"Windows\""}
128
+
129
+ [[package]]
130
+ name = "colorama"
131
+ version = "0.4.5"
132
+ description = "Cross-platform colored terminal text."
133
+ category = "main"
134
+ optional = false
135
+ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
136
+
137
+ [[package]]
138
+ name = "datadispatch"
139
+ version = "1.0.0"
140
+ description = "Like functools.singledispatch but for values"
141
+ category = "main"
142
+ optional = false
143
+ python-versions = "*"
144
+
145
+ [[package]]
146
+ name = "debugpy"
147
+ version = "1.6.3"
148
+ description = "An implementation of the Debug Adapter Protocol for Python"
149
+ category = "dev"
150
+ optional = false
151
+ python-versions = ">=3.7"
152
+
153
+ [[package]]
154
+ name = "flask"
155
+ version = "2.2.2"
156
+ description = "A simple framework for building complex web applications."
157
+ category = "main"
158
+ optional = false
159
+ python-versions = ">=3.7"
160
+
161
+ [package.dependencies]
162
+ click = ">=8.0"
163
+ importlib-metadata = {version = ">=3.6.0", markers = "python_version < \"3.10\""}
164
+ itsdangerous = ">=2.0"
165
+ Jinja2 = ">=3.0"
166
+ Werkzeug = ">=2.2.2"
167
+
168
+ [package.extras]
169
+ async = ["asgiref (>=3.2)"]
170
+ dotenv = ["python-dotenv"]
171
+
172
+ [[package]]
173
+ name = "frozenlist"
174
+ version = "1.3.1"
175
+ description = "A list-like structure which implements collections.abc.MutableSequence"
176
+ category = "main"
177
+ optional = false
178
+ python-versions = ">=3.7"
179
+
180
+ [[package]]
181
+ name = "google-api-core"
182
+ version = "2.11.1"
183
+ description = "Google API client core library"
184
+ category = "main"
185
+ optional = false
186
+ python-versions = ">=3.7"
187
+
188
+ [package.dependencies]
189
+ google-auth = ">=2.14.1,<3.0.dev0"
190
+ googleapis-common-protos = ">=1.56.2,<2.0.dev0"
191
+ protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0.dev0"
192
+ requests = ">=2.18.0,<3.0.0.dev0"
193
+
194
+ [package.extras]
195
+ grpc = ["grpcio (>=1.33.2,<2.0dev)", "grpcio-status (>=1.33.2,<2.0.dev0)", "grpcio (>=1.49.1,<2.0dev)", "grpcio-status (>=1.49.1,<2.0.dev0)"]
196
+ grpcgcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"]
197
+ grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"]
198
+
199
+ [[package]]
200
+ name = "google-api-python-client"
201
+ version = "2.97.0"
202
+ description = "Google API Client Library for Python"
203
+ category = "main"
204
+ optional = false
205
+ python-versions = ">=3.7"
206
+
207
+ [package.dependencies]
208
+ google-api-core = ">=1.31.5,<2.0.0 || >2.3.0,<3.0.0.dev0"
209
+ google-auth = ">=1.19.0,<3.0.0.dev0"
210
+ google-auth-httplib2 = ">=0.1.0"
211
+ httplib2 = ">=0.15.0,<1.dev0"
212
+ uritemplate = ">=3.0.1,<5"
213
+
214
+ [[package]]
215
+ name = "google-auth"
216
+ version = "2.22.0"
217
+ description = "Google Authentication Library"
218
+ category = "main"
219
+ optional = false
220
+ python-versions = ">=3.6"
221
+
222
+ [package.dependencies]
223
+ cachetools = ">=2.0.0,<6.0"
224
+ pyasn1-modules = ">=0.2.1"
225
+ rsa = ">=3.1.4,<5"
226
+ six = ">=1.9.0"
227
+ urllib3 = "<2.0"
228
+
229
+ [package.extras]
230
+ aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"]
231
+ enterprise_cert = ["cryptography (==36.0.2)", "pyopenssl (==22.0.0)"]
232
+ pyopenssl = ["pyopenssl (>=20.0.0)", "cryptography (>=38.0.3)"]
233
+ reauth = ["pyu2f (>=0.1.5)"]
234
+ requests = ["requests (>=2.20.0,<3.0.0.dev0)"]
235
+
236
+ [[package]]
237
+ name = "google-auth-httplib2"
238
+ version = "0.1.0"
239
+ description = "Google Authentication Library: httplib2 transport"
240
+ category = "main"
241
+ optional = false
242
+ python-versions = "*"
243
+
244
+ [package.dependencies]
245
+ google-auth = "*"
246
+ httplib2 = ">=0.15.0"
247
+ six = "*"
248
+
249
+ [[package]]
250
+ name = "googleapis-common-protos"
251
+ version = "1.60.0"
252
+ description = "Common protobufs used in Google APIs"
253
+ category = "main"
254
+ optional = false
255
+ python-versions = ">=3.7"
256
+
257
+ [package.dependencies]
258
+ protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<5.0.0.dev0"
259
+
260
+ [package.extras]
261
+ grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"]
262
+
263
+ [[package]]
264
+ name = "grpcio"
265
+ version = "1.57.0"
266
+ description = "HTTP/2-based RPC framework"
267
+ category = "main"
268
+ optional = false
269
+ python-versions = ">=3.7"
270
+
271
+ [package.extras]
272
+ protobuf = ["grpcio-tools (>=1.57.0)"]
273
+
274
+ [[package]]
275
+ name = "httplib2"
276
+ version = "0.22.0"
277
+ description = "A comprehensive HTTP client library."
278
+ category = "main"
279
+ optional = false
280
+ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
281
+
282
+ [package.dependencies]
283
+ pyparsing = {version = ">=2.4.2,<3.0.0 || >3.0.0,<3.0.1 || >3.0.1,<3.0.2 || >3.0.2,<3.0.3 || >3.0.3,<4", markers = "python_version > \"3.0\""}
284
+
285
+ [[package]]
286
+ name = "idna"
287
+ version = "3.4"
288
+ description = "Internationalized Domain Names in Applications (IDNA)"
289
+ category = "main"
290
+ optional = false
291
+ python-versions = ">=3.5"
292
+
293
+ [[package]]
294
+ name = "importlib-metadata"
295
+ version = "5.0.0"
296
+ description = "Read metadata from Python packages"
297
+ category = "main"
298
+ optional = false
299
+ python-versions = ">=3.7"
300
+
301
+ [package.dependencies]
302
+ zipp = ">=0.5"
303
+
304
+ [package.extras]
305
+ docs = ["sphinx (>=3.5)", "jaraco.packaging (>=9)", "rst.linker (>=1.9)", "furo", "jaraco.tidelift (>=1.4)"]
306
+ perf = ["ipython"]
307
+ testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "flake8 (<5)", "pytest-cov", "pytest-enabler (>=1.3)", "packaging", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)", "importlib-resources (>=1.3)"]
308
+
309
+ [[package]]
310
+ name = "itsdangerous"
311
+ version = "2.1.2"
312
+ description = "Safely pass data to untrusted environments and back."
313
+ category = "main"
314
+ optional = false
315
+ python-versions = ">=3.7"
316
+
317
+ [[package]]
318
+ name = "jedi"
319
+ version = "0.18.1"
320
+ description = "An autocompletion tool for Python that can be used for text editors."
321
+ category = "dev"
322
+ optional = false
323
+ python-versions = ">=3.6"
324
+
325
+ [package.dependencies]
326
+ parso = ">=0.8.0,<0.9.0"
327
+
328
+ [package.extras]
329
+ qa = ["flake8 (==3.8.3)", "mypy (==0.782)"]
330
+ testing = ["Django (<3.1)", "colorama", "docopt", "pytest (<7.0.0)"]
331
+
332
+ [[package]]
333
+ name = "jinja2"
334
+ version = "3.1.2"
335
+ description = "A very fast and expressive template engine."
336
+ category = "main"
337
+ optional = false
338
+ python-versions = ">=3.7"
339
+
340
+ [package.dependencies]
341
+ MarkupSafe = ">=2.0"
342
+
343
+ [package.extras]
344
+ i18n = ["Babel (>=2.7)"]
345
+
346
+ [[package]]
347
+ name = "markupsafe"
348
+ version = "2.1.1"
349
+ description = "Safely add untrusted strings to HTML/XML markup."
350
+ category = "main"
351
+ optional = false
352
+ python-versions = ">=3.7"
353
+
354
+ [[package]]
355
+ name = "multidict"
356
+ version = "6.0.2"
357
+ description = "multidict implementation"
358
+ category = "main"
359
+ optional = false
360
+ python-versions = ">=3.7"
361
+
362
+ [[package]]
363
+ name = "numpy"
364
+ version = "1.23.4"
365
+ description = "NumPy is the fundamental package for array computing with Python."
366
+ category = "main"
367
+ optional = false
368
+ python-versions = ">=3.8"
369
+
370
+ [[package]]
371
+ name = "openai"
372
+ version = "0.27.8"
373
+ description = "Python client library for the OpenAI API"
374
+ category = "main"
375
+ optional = false
376
+ python-versions = ">=3.7.1"
377
+
378
+ [package.dependencies]
379
+ aiohttp = "*"
380
+ requests = ">=2.20"
381
+ tqdm = "*"
382
+
383
+ [package.extras]
384
+ datalib = ["numpy", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)", "openpyxl (>=3.0.7)"]
385
+ dev = ["black (>=21.6b0,<22.0.0)", "pytest (>=6.0.0,<7.0.0)", "pytest-asyncio", "pytest-mock"]
386
+ embeddings = ["scikit-learn (>=1.0.2)", "tenacity (>=8.0.1)", "matplotlib", "plotly", "numpy", "scipy", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)", "openpyxl (>=3.0.7)"]
387
+ wandb = ["wandb", "numpy", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)", "openpyxl (>=3.0.7)"]
388
+
389
+ [[package]]
390
+ name = "packaging"
391
+ version = "21.3"
392
+ description = "Core utilities for Python packages"
393
+ category = "dev"
394
+ optional = false
395
+ python-versions = ">=3.6"
396
+
397
+ [package.dependencies]
398
+ pyparsing = ">=2.0.2,<3.0.5 || >3.0.5"
399
+
400
+ [[package]]
401
+ name = "parso"
402
+ version = "0.8.3"
403
+ description = "A Python Parser"
404
+ category = "dev"
405
+ optional = false
406
+ python-versions = ">=3.6"
407
+
408
+ [package.extras]
409
+ qa = ["flake8 (==3.8.3)", "mypy (==0.782)"]
410
+ testing = ["docopt", "pytest (<6.0.0)"]
411
+
412
+ [[package]]
413
+ name = "pluggy"
414
+ version = "1.0.0"
415
+ description = "plugin and hook calling mechanisms for python"
416
+ category = "dev"
417
+ optional = false
418
+ python-versions = ">=3.6"
419
+
420
+ [package.extras]
421
+ testing = ["pytest-benchmark", "pytest"]
422
+ dev = ["tox", "pre-commit"]
423
+
424
+ [[package]]
425
+ name = "protobuf"
426
+ version = "4.24.2"
427
+ description = ""
428
+ category = "main"
429
+ optional = false
430
+ python-versions = ">=3.7"
431
+
432
+ [[package]]
433
+ name = "pyasn1"
434
+ version = "0.5.0"
435
+ description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)"
436
+ category = "main"
437
+ optional = false
438
+ python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
439
+
440
+ [[package]]
441
+ name = "pyasn1-modules"
442
+ version = "0.3.0"
443
+ description = "A collection of ASN.1-based protocols modules"
444
+ category = "main"
445
+ optional = false
446
+ python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
447
+
448
+ [package.dependencies]
449
+ pyasn1 = ">=0.4.6,<0.6.0"
450
+
451
+ [[package]]
452
+ name = "pyflakes"
453
+ version = "2.5.0"
454
+ description = "passive checker of Python programs"
455
+ category = "dev"
456
+ optional = false
457
+ python-versions = ">=3.6"
458
+
459
+ [[package]]
460
+ name = "pyjwt"
461
+ version = "2.8.0"
462
+ description = "JSON Web Token implementation in Python"
463
+ category = "main"
464
+ optional = false
465
+ python-versions = ">=3.7"
466
+
467
+ [package.extras]
468
+ crypto = ["cryptography (>=3.4.0)"]
469
+ dev = ["sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface", "cryptography (>=3.4.0)", "pytest (>=6.0.0,<7.0.0)", "coverage[toml] (==5.0.4)", "pre-commit"]
470
+ docs = ["sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"]
471
+ tests = ["pytest (>=6.0.0,<7.0.0)", "coverage[toml] (==5.0.4)"]
472
+
473
+ [[package]]
474
+ name = "pyparsing"
475
+ version = "3.0.9"
476
+ description = "pyparsing module - Classes and methods to define and execute parsing grammars"
477
+ category = "main"
478
+ optional = false
479
+ python-versions = ">=3.6.8"
480
+
481
+ [package.extras]
482
+ diagrams = ["railroad-diagrams", "jinja2"]
483
+
484
+ [[package]]
485
+ name = "python-dotenv"
486
+ version = "1.0.0"
487
+ description = "Read key-value pairs from a .env file and set them as environment variables"
488
+ category = "main"
489
+ optional = false
490
+ python-versions = ">=3.8"
491
+
492
+ [package.extras]
493
+ cli = ["click (>=5.0)"]
494
+
495
+ [[package]]
496
+ name = "python-lsp-jsonrpc"
497
+ version = "1.0.0"
498
+ description = "JSON RPC 2.0 server library"
499
+ category = "dev"
500
+ optional = false
501
+ python-versions = "*"
502
+
503
+ [package.dependencies]
504
+ ujson = ">=3.0.0"
505
+
506
+ [package.extras]
507
+ test = ["coverage", "pytest-cov", "pytest", "pyflakes", "pycodestyle", "pylint"]
508
+
509
+ [[package]]
510
+ name = "pytoolconfig"
511
+ version = "1.2.2"
512
+ description = "Python tool configuration"
513
+ category = "dev"
514
+ optional = false
515
+ python-versions = ">=3.7"
516
+
517
+ [package.dependencies]
518
+ packaging = ">=21.3"
519
+ tomli = {version = ">=2.0", markers = "python_version < \"3.11\""}
520
+
521
+ [package.extras]
522
+ validation = ["pydantic (>=1.7.4)"]
523
+ global = ["appdirs (>=1.4.4)"]
524
+ gen_docs = ["pytoolconfig", "sphinx-rtd-theme (>=1.0.0)", "sphinx-autodoc-typehints (>=1.18.1)", "sphinx (>=4.5.0)"]
525
+ doc = ["sphinx (>=4.5.0)", "tabulate (>=0.8.9)"]
526
+
527
+ [[package]]
528
+ name = "pytz"
529
+ version = "2023.3"
530
+ description = "World timezone definitions, modern and historical"
531
+ category = "main"
532
+ optional = false
533
+ python-versions = "*"
534
+
535
+ [[package]]
536
+ name = "replit"
537
+ version = "3.2.4"
538
+ description = "A library for interacting with features of repl.it"
539
+ category = "main"
540
+ optional = false
541
+ python-versions = ">=3.8,<4.0"
542
+
543
+ [package.dependencies]
544
+ aiohttp = ">=3.6.2,<4.0.0"
545
+ Flask = ">=2.0.0,<3.0.0"
546
+ requests = ">=2.25.1,<3.0.0"
547
+ typing_extensions = ">=3.7.4,<4.0.0"
548
+ Werkzeug = ">=2.0.0,<3.0.0"
549
+
550
+ [[package]]
551
+ name = "replit-python-lsp-server"
552
+ version = "1.15.9"
553
+ description = "Python Language Server for the Language Server Protocol"
554
+ category = "dev"
555
+ optional = false
556
+ python-versions = ">=3.7"
557
+
558
+ [package.dependencies]
559
+ jedi = ">=0.17.2,<0.19.0"
560
+ pluggy = ">=1.0.0"
561
+ pyflakes = {version = ">=2.5.0,<2.6.0", optional = true, markers = "extra == \"pyflakes\""}
562
+ python-lsp-jsonrpc = ">=1.0.0"
563
+ rope = {version = ">0.10.5", optional = true, markers = "extra == \"rope\""}
564
+ toml = ">=0.10.2"
565
+ ujson = ">=3.0.0"
566
+ whatthepatch = {version = ">=1.0.2,<2.0.0", optional = true, markers = "extra == \"yapf\""}
567
+ yapf = {version = "*", optional = true, markers = "extra == \"yapf\""}
568
+
569
+ [package.extras]
570
+ all = ["autopep8 (>=1.6.0,<1.7.0)", "flake8 (>=5.0.0,<5.1.0)", "mccabe (>=0.7.0,<0.8.0)", "pycodestyle (>=2.9.0,<2.10.0)", "pydocstyle (>=2.0.0)", "pyflakes (>=2.5.0,<2.6.0)", "pylint (>=2.5.0)", "rope (>=0.10.5)", "yapf", "whatthepatch"]
571
+ autopep8 = ["autopep8 (>=1.6.0,<1.7.0)"]
572
+ flake8 = ["flake8 (>=5.0.0,<5.1.0)"]
573
+ mccabe = ["mccabe (>=0.7.0,<0.8.0)"]
574
+ pycodestyle = ["pycodestyle (>=2.9.0,<2.10.0)"]
575
+ pydocstyle = ["pydocstyle (>=2.0.0)"]
576
+ pyflakes = ["pyflakes (>=2.5.0,<2.6.0)"]
577
+ pylint = ["pylint (>=2.5.0)"]
578
+ rope = ["rope (>0.10.5)"]
579
+ test = ["pylint (>=2.5.0)", "pytest", "pytest-cov", "coverage", "numpy (<1.23)", "pandas", "matplotlib", "pyqt5", "flaky"]
580
+ websockets = ["websockets (>=10.3)"]
581
+ yapf = ["yapf", "whatthepatch (>=1.0.2,<2.0.0)"]
582
+
583
+ [[package]]
584
+ name = "requests"
585
+ version = "2.28.1"
586
+ description = "Python HTTP for Humans."
587
+ category = "main"
588
+ optional = false
589
+ python-versions = ">=3.7, <4"
590
+
591
+ [package.dependencies]
592
+ certifi = ">=2017.4.17"
593
+ charset-normalizer = ">=2,<3"
594
+ idna = ">=2.5,<4"
595
+ urllib3 = ">=1.21.1,<1.27"
596
+
597
+ [package.extras]
598
+ socks = ["PySocks (>=1.5.6,!=1.5.7)"]
599
+ use_chardet_on_py3 = ["chardet (>=3.0.2,<6)"]
600
+
601
+ [[package]]
602
+ name = "rope"
603
+ version = "1.3.0"
604
+ description = "a python refactoring library..."
605
+ category = "dev"
606
+ optional = false
607
+ python-versions = ">=3.7"
608
+
609
+ [package.dependencies]
610
+ pytoolconfig = ">=1.1.2"
611
+
612
+ [package.extras]
613
+ doc = ["sphinx-rtd-theme (>=1.0.0)", "sphinx-autodoc-typehints (>=1.18.1)", "sphinx (>=4.5.0)", "pytoolconfig"]
614
+ dev = ["build (>=0.7.0)", "pytest-timeout (>=2.1.0)", "pytest (>=7.0.1)"]
615
+
616
+ [[package]]
617
+ name = "rsa"
618
+ version = "4.9"
619
+ description = "Pure-Python RSA implementation"
620
+ category = "main"
621
+ optional = false
622
+ python-versions = ">=3.6,<4"
623
+
624
+ [package.dependencies]
625
+ pyasn1 = ">=0.1.3"
626
+
627
+ [[package]]
628
+ name = "six"
629
+ version = "1.16.0"
630
+ description = "Python 2 and 3 compatibility utilities"
631
+ category = "main"
632
+ optional = false
633
+ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
634
+
635
+ [[package]]
636
+ name = "skills"
637
+ version = "0.3.0"
638
+ description = "Implementation of the TrueSkill, Glicko and Elo Ranking Algorithms"
639
+ category = "main"
640
+ optional = false
641
+ python-versions = "*"
642
+
643
+ [[package]]
644
+ name = "soupsieve"
645
+ version = "2.4.1"
646
+ description = "A modern CSS selector implementation for Beautiful Soup."
647
+ category = "main"
648
+ optional = false
649
+ python-versions = ">=3.7"
650
+
651
+ [[package]]
652
+ name = "tasks"
653
+ version = "2.8.0"
654
+ description = "A simple personal task queue to track todo items"
655
+ category = "main"
656
+ optional = false
657
+ python-versions = "*"
658
+
659
+ [package.dependencies]
660
+ datadispatch = "*"
661
+
662
+ [[package]]
663
+ name = "toml"
664
+ version = "0.10.2"
665
+ description = "Python Library for Tom's Obvious, Minimal Language"
666
+ category = "dev"
667
+ optional = false
668
+ python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
669
+
670
+ [[package]]
671
+ name = "tomli"
672
+ version = "2.0.1"
673
+ description = "A lil' TOML parser"
674
+ category = "dev"
675
+ optional = false
676
+ python-versions = ">=3.7"
677
+
678
+ [[package]]
679
+ name = "tqdm"
680
+ version = "4.66.1"
681
+ description = "Fast, Extensible Progress Meter"
682
+ category = "main"
683
+ optional = false
684
+ python-versions = ">=3.7"
685
+
686
+ [package.dependencies]
687
+ colorama = {version = "*", markers = "platform_system == \"Windows\""}
688
+
689
+ [package.extras]
690
+ dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"]
691
+ notebook = ["ipywidgets (>=6)"]
692
+ slack = ["slack-sdk"]
693
+ telegram = ["requests"]
694
+
695
+ [[package]]
696
+ name = "twilio"
697
+ version = "8.5.0"
698
+ description = "Twilio API client and TwiML generator"
699
+ category = "main"
700
+ optional = false
701
+ python-versions = ">=3.7.0"
702
+
703
+ [package.dependencies]
704
+ aiohttp = ">=3.8.4"
705
+ aiohttp-retry = ">=2.8.3"
706
+ PyJWT = ">=2.0.0,<3.0.0"
707
+ pytz = "*"
708
+ requests = ">=2.0.0"
709
+
710
+ [[package]]
711
+ name = "typing-extensions"
712
+ version = "3.10.0.2"
713
+ description = "Backported and Experimental Type Hints for Python 3.5+"
714
+ category = "main"
715
+ optional = false
716
+ python-versions = "*"
717
+
718
+ [[package]]
719
+ name = "ujson"
720
+ version = "5.5.0"
721
+ description = "Ultra fast JSON encoder and decoder for Python"
722
+ category = "dev"
723
+ optional = false
724
+ python-versions = ">=3.7"
725
+
726
+ [[package]]
727
+ name = "uritemplate"
728
+ version = "4.1.1"
729
+ description = "Implementation of RFC 6570 URI Templates"
730
+ category = "main"
731
+ optional = false
732
+ python-versions = ">=3.6"
733
+
734
+ [[package]]
735
+ name = "urllib3"
736
+ version = "1.26.12"
737
+ description = "HTTP library with thread-safe connection pooling, file post, and more."
738
+ category = "main"
739
+ optional = false
740
+ python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4"
741
+
742
+ [package.extras]
743
+ brotli = ["brotlicffi (>=0.8.0)", "brotli (>=1.0.9)", "brotlipy (>=0.6.0)"]
744
+ secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "urllib3-secure-extra", "ipaddress"]
745
+ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
746
+
747
+ [[package]]
748
+ name = "werkzeug"
749
+ version = "2.2.2"
750
+ description = "The comprehensive WSGI web application library."
751
+ category = "main"
752
+ optional = false
753
+ python-versions = ">=3.7"
754
+
755
+ [package.dependencies]
756
+ MarkupSafe = ">=2.1.1"
757
+
758
+ [package.extras]
759
+ watchdog = ["watchdog"]
760
+
761
+ [[package]]
762
+ name = "whatthepatch"
763
+ version = "1.0.2"
764
+ description = "A patch parsing and application library."
765
+ category = "dev"
766
+ optional = false
767
+ python-versions = "*"
768
+
769
+ [[package]]
770
+ name = "xds-protos"
771
+ version = "0.0.11"
772
+ description = "Generated Python code from envoyproxy/data-plane-api"
773
+ category = "main"
774
+ optional = false
775
+ python-versions = "*"
776
+
777
+ [package.dependencies]
778
+ grpcio = "*"
779
+ protobuf = "*"
780
+
781
+ [[package]]
782
+ name = "yapf"
783
+ version = "0.32.0"
784
+ description = "A formatter for Python code."
785
+ category = "dev"
786
+ optional = false
787
+ python-versions = "*"
788
+
789
+ [[package]]
790
+ name = "yarl"
791
+ version = "1.8.1"
792
+ description = "Yet another URL library"
793
+ category = "main"
794
+ optional = false
795
+ python-versions = ">=3.7"
796
+
797
+ [package.dependencies]
798
+ idna = ">=2.0"
799
+ multidict = ">=4.0"
800
+
801
+ [[package]]
802
+ name = "zipp"
803
+ version = "3.9.0"
804
+ description = "Backport of pathlib-compatible object wrapper for zip files"
805
+ category = "main"
806
+ optional = false
807
+ python-versions = ">=3.7"
808
+
809
+ [package.extras]
810
+ docs = ["sphinx (>=3.5)", "jaraco.packaging (>=9)", "rst.linker (>=1.9)", "furo", "jaraco.tidelift (>=1.4)"]
811
+ testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "flake8 (<5)", "pytest-cov", "pytest-enabler (>=1.3)", "jaraco.itertools", "func-timeout", "jaraco.functools", "more-itertools", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)"]
812
+
813
+ [metadata]
814
+ lock-version = "1.1"
815
+ python-versions = ">=3.8.0,<3.9"
816
+ content-hash = "c0a0faf8c1bfce437b0517d539ce578ac47df821f02234e0c3ad58c35c88ea34"
817
+
818
+ [metadata.files]
819
+ aiohttp = []
820
+ aiohttp-retry = []
821
+ aiosignal = []
822
+ async-timeout = []
823
+ attrs = []
824
+ beautifulsoup4 = []
825
+ bs4 = []
826
+ cachetools = []
827
+ certifi = []
828
+ charset-normalizer = []
829
+ click = []
830
+ colorama = []
831
+ datadispatch = []
832
+ debugpy = []
833
+ flask = []
834
+ frozenlist = []
835
+ google-api-core = []
836
+ google-api-python-client = []
837
+ google-auth = []
838
+ google-auth-httplib2 = []
839
+ googleapis-common-protos = []
840
+ grpcio = []
841
+ httplib2 = []
842
+ idna = []
843
+ importlib-metadata = []
844
+ itsdangerous = []
845
+ jedi = []
846
+ jinja2 = []
847
+ markupsafe = []
848
+ multidict = []
849
+ numpy = []
850
+ openai = []
851
+ packaging = []
852
+ parso = []
853
+ pluggy = []
854
+ protobuf = []
855
+ pyasn1 = []
856
+ pyasn1-modules = []
857
+ pyflakes = []
858
+ pyjwt = []
859
+ pyparsing = []
860
+ python-dotenv = []
861
+ python-lsp-jsonrpc = []
862
+ pytoolconfig = []
863
+ pytz = []
864
+ replit = []
865
+ replit-python-lsp-server = []
866
+ requests = []
867
+ rope = []
868
+ rsa = []
869
+ six = []
870
+ skills = []
871
+ soupsieve = []
872
+ tasks = []
873
+ toml = []
874
+ tomli = []
875
+ tqdm = []
876
+ twilio = []
877
+ typing-extensions = []
878
+ ujson = []
879
+ uritemplate = []
880
+ urllib3 = []
881
+ werkzeug = []
882
+ whatthepatch = []
883
+ xds-protos = []
884
+ yapf = []
885
+ yarl = []
886
+ zipp = []
babyagi/classic/babyfoxagi/public/static/chat.js ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Display message function
2
+ function displayMessage(content, role = "user") {
3
+ let messageHTML = createMessageHTML(content, role);
4
+ $("#chat-messages").append(messageHTML);
5
+ if (role === "ongoing") {
6
+ $(".bg-green-100:last").parent().addClass("ai-processing");
7
+ }
8
+ }
9
+
10
+ function createMessageHTML(content, role) {
11
+ if (role === "user") {
12
+ return `<div class="flex justify-end mb-2">
13
+ <div class="bg-blue-200 text-black rounded-lg p-3 max-w-lg lg:max-w-xl text-right relative before:absolute before:inset-r-0 before:w-3 before:h-3 before:bg-blue-500 before:transform before:-rotate-45 before:-translate-y-1/2">
14
+ ${content}
15
+ </div>
16
+ </div>`;
17
+ } else {
18
+ return `<div class="flex justify-start mb-2">
19
+ <div class="bg-green-100 text-black rounded-lg p-3 max-w-lg lg:max-w-xl relative before:absolute before:inset-l-0 before:w-3 before:h-3 before:bg-gray-300 before:transform before:-rotate-45 before:-translate-y-1/2">
20
+ ${content}
21
+ </div>
22
+ </div>`;
23
+ }
24
+ }
25
+
26
+ // Send message function
27
+ function sendMessage() {
28
+ const userMessage = $("#user-input").val().trim();
29
+
30
+ if (!userMessage) {
31
+ alert("Message cannot be empty!");
32
+ return;
33
+ }
34
+
35
+ // Display the user's message
36
+ displayMessage(userMessage, "user");
37
+ scrollToBottom();
38
+ // Display processing message
39
+ displayMessage('...', 'ongoing');
40
+ scrollToBottom();
41
+
42
+ // Clear the input and disable the button
43
+ $("#user-input").val("");
44
+ toggleSendButton();
45
+
46
+ // Send the message to the backend for processing
47
+ $.ajax({
48
+ type: 'POST',
49
+ url: '/determine-response',
50
+ data: JSON.stringify({ user_message: userMessage }),
51
+ contentType: 'application/json',
52
+ dataType: 'json',
53
+ success: handleResponseSuccess,
54
+ error: handleResponseError
55
+ });
56
+ }
57
+
58
+ function handleResponseSuccess(data) {
59
+ $(".ai-processing").remove();
60
+ displayMessage(data.message, "assistant");
61
+ scrollToBottom();
62
+
63
+ if (data.objective) {
64
+ displayTaskWithStatus(`Task: ${data.objective}`, "ongoing", data.skill_used, data.task_id);
65
+ }
66
+
67
+ if (data.path !== "ChatCompletion") {
68
+ checkTaskCompletion(data.task_id);
69
+ } else {
70
+
71
+ }
72
+
73
+ }
74
+
75
+ function handleResponseError(error) {
76
+ $(".ai-processing").remove();
77
+ const errorMessage = error.responseJSON && error.responseJSON.error ? error.responseJSON.error : "Unknown error";
78
+ displayMessage(`Error: ${errorMessage}`, "error");
79
+ scrollToBottom();
80
+ }
81
+
82
+ // Other utility functions
83
+ function toggleSendButton() {
84
+ if ($("#user-input").val().trim()) {
85
+ $("button").prop("disabled", false);
86
+ } else {
87
+ $("button").prop("disabled", true);
88
+ }
89
+ }
90
+
91
+ function scrollToBottom() {
92
+ setTimeout(() => {
93
+ const chatBox = document.getElementById("chat-messages");
94
+ chatBox.scrollTop = chatBox.scrollHeight;
95
+ }, 100); // small delay to ensure content is rendered
96
+ }
97
+
98
+
99
+
100
+ function checkTaskCompletion(taskId) {
101
+ $.ajax({
102
+ type: 'GET',
103
+ url: `/check-task-status/${taskId}`,
104
+ dataType: 'json',
105
+ success(data) {
106
+ if (data.status === "completed") { updateTaskStatus(taskId, "completed");
107
+ fetchTaskOutput(taskId);
108
+ displayMessage("Hey, I just finished a task!", "assistant");
109
+ } else {
110
+ fetchTaskOutput(taskId);
111
+ setTimeout(() => {
112
+ checkTaskCompletion(taskId);
113
+ }, 5000); // Check every 5 seconds
114
+ }
115
+ },
116
+ error(error) {
117
+ console.error(`Error checking task status for ${taskId}:`, error);
118
+ }
119
+ });
120
+ }
121
+
122
+ function fetchTaskOutput(taskId) {
123
+ $.ajax({
124
+ type: 'GET',
125
+ url: `/fetch-task-output/${taskId}`,
126
+ dataType: 'json',
127
+ success(data) {
128
+ if (data.output) {
129
+ const $taskItem = $(`.task-item[data-task-id="${taskId}"]`); // Find the task item with the given task ID
130
+ console.log('taskItem:'+$taskItem)
131
+ const $outputContainer = $taskItem.find('.task-output');
132
+ console.log('outputContainer:'+$outputContainer)
133
+ console.log('data.output:'+data.output)
134
+
135
+ // Update the task's output content
136
+ $outputContainer.html(`<p>${data.output}</p>`);
137
+ }
138
+ },
139
+ error(error) {
140
+ console.error(`Error fetching task output for ${taskId}:`, error);
141
+ }
142
+ });
143
+ }
144
+
145
+
146
+
147
+
148
+ $(document).ready(function() {
149
+ toggleSendButton();
150
+ loadPreviousMessages();
151
+ loadAllTasks();
152
+ $("#send-btn").on('click', function() {
153
+ sendMessage();
154
+ });
155
+ $("#user-input").on('keyup', toggleSendButton);
156
+ $("#user-input").on('keydown', function(e) {
157
+ if (e.key === 'Enter' && !e.shiftKey && !e.ctrlKey) {
158
+ e.preventDefault();
159
+ sendMessage();
160
+ }
161
+ });
162
+ $("#task-search").on("keyup", function() {
163
+ let value = $(this).val().toLowerCase();
164
+ $(".task-item").filter(function() {
165
+ $(this).toggle($(this).find(".task-title").text().toLowerCase().indexOf(value) > -1);
166
+ });
167
+ });
168
+
169
+
170
+ });
171
+
172
+ function loadPreviousMessages() {
173
+ $.ajax({
174
+ type: 'GET',
175
+ url: '/get-all-messages',
176
+ dataType: 'json',
177
+ success(data) {
178
+ data.forEach(message => displayMessage(message.content, message.role));
179
+ scrollToBottom();
180
+ },
181
+ error(error) {
182
+ console.error("Error fetching previous messages:", error);
183
+ }
184
+ });
185
+ }
186
+
187
+ function getStatusBadgeHTML(status) {
188
+ switch (status) {
189
+ case 'ongoing':
190
+ return `<span class="status-badge inline-block bg-yellow-300 rounded-full px-2 py-1 text-xs font-semibold text-gray-700 mr-2 ml-2">Ongoing</span>`;
191
+ case 'completed':
192
+ return `<span class="status-badge inline-block bg-green-300 rounded-full px-2 py-1 text-xs font-semibold text-gray-700 mr-2 ml-2">Completed</span>`;
193
+ case 'error':
194
+ return `<span class="status-badge inline-block bg-red-400 rounded-full px-2 py-1 text-xs font-semibold text-white mr-2 ml-2">Error</span>`;
195
+ default:
196
+ return `<span class="status-badge inline-block bg-gray-400 rounded-full px-2 py-1 text-xs font-semibold text-white mr-2 ml-2">Unknown</span>`;
197
+ }
198
+ }
199
+
200
+ function displayTaskWithStatus(taskDescription, status, skillType = "Unknown Skill", taskId, output = null) {
201
+ let statusBadgeHTML = getStatusBadgeHTML(status);
202
+
203
+ let skillBadgeHTML = '';
204
+ if (skillType) {
205
+ skillBadgeHTML = `<span class="inline-block bg-purple-300 rounded-full px-2 py-1 text-xs font-semibold text-gray-700 ml-2">${skillType}</span>`;
206
+ }
207
+
208
+ let outputHTML = output ? `<p>${output}</p>` : '<p><small>No output yet...</small></p>';
209
+
210
+ const taskHTML = `
211
+ <div class="task-item" data-task-id="${taskId}">
212
+ <span class="task-title">${taskDescription}</span></br>
213
+ <span class="toggle-output-icon" style="cursor: pointer;">▶</span>
214
+ ${statusBadgeHTML}${skillBadgeHTML}
215
+ <div class="task-output hidden">
216
+ ${outputHTML}
217
+ </div>
218
+ </div>`;
219
+ $("#task-list").prepend(taskHTML);
220
+ }
221
+
222
+
223
+ function updateTaskStatus(taskId, status, output) {
224
+ const $taskToUpdate = $(`#task-list > .task-item[data-task-id="${taskId}"]`);
225
+
226
+ // Remove the existing status badge
227
+ $taskToUpdate.find(".status-badge").remove();
228
+
229
+ // Insert the new status badge right before the skill badge
230
+ $taskToUpdate.find(".toggle-output-icon").after(getStatusBadgeHTML(status));
231
+
232
+ // Update output, if available
233
+ const $outputContainer = $taskToUpdate.find(".task-output");
234
+ if (output) {
235
+ $outputContainer.html(`<p>${output}</p>`);
236
+ } else {
237
+ $outputContainer.find("p").text("No output yet...");
238
+ }
239
+ }
240
+
241
+ function loadAllTasks() {
242
+ $.ajax({
243
+ type: 'GET',
244
+ url: '/get-all-tasks',
245
+ dataType: 'json',
246
+ success(data) {
247
+ console.log("Debug: Received tasks:", data); // Debug print
248
+ data.forEach(task => {
249
+ const description = task.description || '';
250
+ const status = task.status || '';
251
+ const skill_used = task.skill_used || '';
252
+ const task_id = task.task_id || '';
253
+ const output = task.output || null; // Get the output, if it exists, otherwise set to null
254
+ displayTaskWithStatus(description, status, skill_used, task_id, output);
255
+ });
256
+ },
257
+ error(error) {
258
+ console.error("Error fetching all tasks:", error);
259
+ }
260
+ });
261
+ }
262
+
263
+
264
+
265
+
266
+
267
+
268
+ $(document).on('click', '.toggle-output-icon', function() {
269
+ const $task = $(this).closest(".task-item");
270
+ const $output = $task.find(".task-output");
271
+ $output.toggleClass('hidden');
272
+ // Change the icon when the output is toggled
273
+ const icon = $(this);
274
+ if ($output.hasClass('hidden')) {
275
+ icon.text('▶'); // Icon indicating the output is hidden
276
+ } else {
277
+ icon.text('▼'); // Icon indicating the output is shown
278
+ }
279
+ });
babyagi/classic/babyfoxagi/public/static/style.css ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ body::before {
2
+ content: "";
3
+ position: fixed;
4
+ top: 0;
5
+ left: 0;
6
+ right: 0;
7
+ bottom: 0;
8
+ z-index: -1;
9
+ background-image:
10
+ linear-gradient(45deg, rgba(250, 243, 224, 0.1) 25%, transparent 25%, transparent 50%, rgba(250, 243, 224, 0.1) 50%, rgba(250, 243, 224, 0.1) 75%, transparent 75%, transparent);
11
+ background-size: 40px 40px;
12
+ }
13
+
14
+ body {
15
+ background-color: #ffffff;
16
+ opacity: 0.8;
17
+ background-image: linear-gradient(135deg, #fff2ce 25%, transparent 25%), linear-gradient(225deg, #fff2ce 25%, transparent 25%), linear-gradient(45deg, #fff2ce 25%, transparent 25%), linear-gradient(315deg, #fff2ce 25%, #ffffff 25%);
18
+ background-position: 10px 0, 10px 0, 0 0, 0 0;
19
+ background-size: 10px 10px;
20
+ background-repeat: repeat;
21
+ font-family: 'Montserrat';
22
+ }
23
+
24
+
25
+
26
+
27
+ .chat-box {
28
+
29
+ background-color: #faf3e0; /* Light beige */
30
+ }
31
+ .objectives-box {
32
+ background-color: #faf3e0; /* Light beige */
33
+ }
34
+ #task-list {
35
+ background-color: #fff; /* Light beige */
36
+ }
37
+
38
+ .chat-messages {
39
+ overflow-y: scroll;
40
+ background-color: #fff
41
+ /* background-color: #faf3e0; /* Light beige */
42
+ }
43
+
44
+ /* Style for user's speech bubble tail */
45
+ .bg-blue-200::before {
46
+ content: "";
47
+ position: absolute;
48
+ top: 10px;
49
+ right: -10px;
50
+ border-width: 10px;
51
+ border-style: solid;
52
+ border-color: transparent transparent transparent;
53
+ }
54
+
55
+ /* Style for assistant's speech bubble tail */
56
+ .bg-gray-300::before {
57
+ content: "";
58
+ position: absolute;
59
+ top: 10px;
60
+ left: -10px;
61
+ border-width: 10px;
62
+ border-style: solid;
63
+ border-color: transparent transparent transparent;
64
+ }
65
+
66
+
67
+ .task-item {
68
+ border: 1px solid #fffbe7;
69
+ border-radius: 8px;
70
+ margin-bottom: 10px;
71
+ padding: 10px;
72
+ position: relative;
73
+ }
74
+
75
+ .task-title {
76
+ font-weight: bold;
77
+ }
78
+
79
+ .task-output {
80
+ margin-top: 10px;
81
+ }
82
+
83
+ .show-output .task-output {
84
+ display: block;
85
+ }
86
+
87
+ #task-list {
88
+ overflow-y: auto;
89
+ max-height: calc(100vh - 150px); /* Adjust based on header and padding */
90
+ }
91
+
92
+ .task-output {
93
+ background-color: #fffdfd; /* Light gray */
94
+ padding: 10px;
95
+ border-radius: 5px;
96
+ margin-top: 5px;
97
+ }
babyagi/classic/babyfoxagi/pyproject.toml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.poetry]
2
+ name = "python-template"
3
+ version = "0.1.0"
4
+ description = ""
5
+ authors = ["Your Name <you@example.com>"]
6
+
7
+ [tool.poetry.dependencies]
8
+ python = ">=3.8.0,<3.9"
9
+ numpy = "^1.22.2"
10
+ replit = "^3.2.4"
11
+ Flask = "^2.2.0"
12
+ urllib3 = "^1.26.12"
13
+ openai = "^0.27.8"
14
+ tasks = "^2.8.0"
15
+ python-dotenv = "^1.0.0"
16
+ skills = "^0.3.0"
17
+ bs4 = "^0.0.1"
18
+ twilio = "^8.5.0"
19
+ google-api-python-client = "^2.97.0"
20
+ xds-protos = "^0.0.11"
21
+
22
+ [tool.poetry.dev-dependencies]
23
+ debugpy = "^1.6.2"
24
+ replit-python-lsp-server = {extras = ["yapf", "rope", "pyflakes"], version = "^1.5.9"}
25
+
26
+ [build-system]
27
+ requires = ["poetry-core>=1.0.0"]
28
+ build-backend = "poetry.core.masonry.api"
babyagi/classic/babyfoxagi/skills/__init__.py ADDED
File without changes
babyagi/classic/babyfoxagi/skills/airtable_search.py ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from skills.skill import Skill
2
+ import openai
3
+ import os
4
+ import requests
5
+ from urllib.parse import quote # Python built-in URL encoding function
6
+
7
+ #This Airtable Skill is tuned to work with our Airtable set up, where we are seaeching a specific column in a specific table, in a specific base. ll three variables need to be set below (Lines 55-59)
8
+ class AirtableSearch(Skill):
9
+ name = 'airtable_search'
10
+ description = "A skill that retrieves data from our Airtable notes using a search. Useful for remembering who we talked to about a certain topic."
11
+ api_keys_required = ['code_reader','skill_saver','documentation_search','text_completion']
12
+
13
+ def __init__(self, api_keys, main_loop_function):
14
+ super().__init__(api_keys, main_loop_function)
15
+
16
+ def execute(self, params, dependent_task_outputs, objective):
17
+ if not self.valid:
18
+ return
19
+
20
+ # Initialize a list to keep track of queries tried
21
+ tried_queries = []
22
+
23
+ # Modify the query based on the dependent task output
24
+ if dependent_task_outputs != "":
25
+ dependent_task = f"Use the dependent task output below as reference to help craft the correct search query for the provided task above. Dependent task output:{dependent_task_outputs}."
26
+ else:
27
+ dependent_task = "."
28
+
29
+ # Initialize output
30
+ output = ''
31
+
32
+ while not output:
33
+ # If there are tried queries, add them to the prompt
34
+ tried_queries_prompt = f" Do not include search queries we have tried, and instead try synonyms or misspellings. Search queries we have tried: {', '.join(tried_queries)}." if tried_queries else ""
35
+ print(tried_queries_prompt)
36
+ query = self.text_completion_tool(f"You are an AI assistant tasked with generating a one word search query based on the following task: {params}. Provide only the search query as a response. {tried_queries_prompt} Take into account output from the previous task:{dependent_task}.\nExample Task: Retrieve data from Airtable notes using the skill 'airtable_search' to find people we have talked to about TED AI.\nExample Query:TED AI\nExample Task:Conduct a search in our Airtable notes to identify investors who have expressed interest in climate.\nExample Query:climate\nTask:{params}\nQuery:")
37
+
38
+ # Add the query to the list of tried queries
39
+ tried_queries.append(query)
40
+
41
+ print("\033[90m\033[3m"+"Search query: " +str(query)+"\033[0m")
42
+
43
+ # Retrieve the Airtable API key
44
+ airtable_api_key = self.api_keys['airtable']
45
+
46
+ # Set the headers for the API request
47
+ headers = {
48
+ "Authorization": f"Bearer {airtable_api_key}",
49
+ "Content-Type": "application/json"
50
+ }
51
+
52
+
53
+ # Set base id
54
+ base_id = '<base_id>'
55
+ table_name = '<table_name'
56
+
57
+
58
+ # The field to filter on and the value to search for
59
+ filter_field = '<filter_field>'
60
+ filter_value = query
61
+
62
+ # URL encode the filter_field
63
+ encoded_filter_field = quote(filter_field)
64
+
65
+ # If filter_field contains a space, wrap it in curly brackets
66
+ formula_field = f'{{{filter_field}}}' if ' ' in filter_field else f'{filter_field}'
67
+
68
+ # Construct the Airtable formula and URL encode it
69
+ formula = f'"{filter_value}",{formula_field}'
70
+ encoded_formula = quote(formula)
71
+
72
+ # Construct the Airtable API URL
73
+ url = f"https://api.airtable.com/v0/{base_id}/{table_name}?fields%5B%5D={encoded_filter_field}&filterByFormula=FIND({encoded_formula})"
74
+ print(url)
75
+
76
+
77
+ # Make the API request to retrieve the data from Airtable
78
+ response = requests.get(url, headers=headers)
79
+
80
+ # Check if the API request was successful
81
+ if response.status_code == 200:
82
+ data = response.json()
83
+
84
+ # Iterate through the records and process each one
85
+ for record in data['records']:
86
+ # Combine objective, task, and record into a single string
87
+ input_str = f"Your objective:{objective}\nYour task:{params}\nThe Airtable record:{record['fields']}"
88
+ #print(record['fields'])
89
+
90
+ if output == "":
91
+ instructions = ""
92
+ else:
93
+ instructions = f"Update the existing summary by adding information from this new record. ###Current summary:{output}."
94
+
95
+ # Send the combined string to the OpenAI ChatCompletion API
96
+ response = self.text_completion_tool(f"You are an AI assistant that will review content from an Airtable record provided by the user, and extract only relevant information to the task at hand with context on why that information is relevant, taking into account the objective. If there is no relevant info, simply respond with '###'. Note that the Airtable note may use shorthand, and do your best to understand it first.{instructions} #####AIRTABLE DATA: {input_str}.")
97
+ print("\033[90m\033[3m" +str(response)+"\033[0m")
98
+ output += response + "\n####"
99
+ else:
100
+ print(f"Failed to retrieve data from Airtable. Status code: {response.status_code}")
101
+ return None
102
+
103
+ # Return the combined output
104
+
105
+ output = "Tried Queries: "+str(tried_queries)+"###Result:"+output
106
+ return output
107
+
108
+
109
+ def text_completion_tool(self, prompt: str):
110
+ messages = [
111
+ {"role": "user", "content": prompt}
112
+ ]
113
+ response = openai.ChatCompletion.create(
114
+ model="gpt-3.5-turbo-16k",
115
+ messages=messages,
116
+ temperature=0.2,
117
+ max_tokens=350,
118
+ top_p=1,
119
+ frequency_penalty=0,
120
+ presence_penalty=0
121
+ )
122
+
123
+ return response.choices[0].message['content'].strip()
babyagi/classic/babyfoxagi/skills/call_babyagi.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from skills.skill import Skill
2
+ import openai
3
+
4
+ #This is an experimental skill to have BabyAGI use BabyAGI as a skill, which really isnt necessary, and also buggy.
5
+ class CallBabyagi(Skill):
6
+ name = 'call_babyagi'
7
+ description = 'A skill that rewrites a task description into an objective, and sends it to itself (an autonomous agent) to complete. Helpful for research.'
8
+ api_keys_required = [['openai']]
9
+
10
+ def __init__(self, api_keys, main_loop_function):
11
+ super().__init__(api_keys, main_loop_function)
12
+
13
+ def execute(self, params, dependent_task_outputs, objective):
14
+ # Generate the new objective by rewriting the task description
15
+ new_objective = self.generate_new_objective(params)
16
+
17
+ LOAD_SKILLS = ['web_search','text_completion']
18
+ # Send the new objective to the main loop
19
+ result = self.main_loop_function(new_objective, LOAD_SKILLS, self.api_keys, False)
20
+
21
+ # Return the main loop's output as the result of the skill
22
+ return result
23
+
24
+
25
+ def generate_new_objective(self, task_description):
26
+ prompt = f"You are an AI assistant tasked with rewriting the following task description into an objective, and remove any mention of call_babyagi: {task_description}\nObjective:"
27
+
28
+ messages = [
29
+ {"role": "user", "content": prompt}
30
+ ]
31
+ response = openai.ChatCompletion.create(
32
+ model="gpt-3.5-turbo-16k",
33
+ messages=messages,
34
+ temperature=0.2,
35
+ max_tokens=1500,
36
+ top_p=1,
37
+ frequency_penalty=0,
38
+ presence_penalty=0
39
+ )
40
+
41
+ return response.choices[0].message['content'].strip()
babyagi/classic/babyfoxagi/skills/code_reader.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from skills.skill import Skill
2
+ import openai
3
+ import os
4
+
5
+ class CodeReader(Skill):
6
+ name = 'code_reader'
7
+ description = "A skill that finds a file's location in it's own program's directory and returns its contents."
8
+ api_keys_required = ['openai']
9
+
10
+ def __init__(self, api_keys, main_loop_function):
11
+ super().__init__(api_keys, main_loop_function)
12
+
13
+ def execute(self, params, dependent_task_outputs, objective):
14
+ if not self.valid:
15
+ return
16
+
17
+ dir_structure = self.get_directory_structure(self.get_top_parent_path(os.path.realpath(__file__)))
18
+ print(f"Directory structure: {dir_structure}")
19
+ example_dir_structure = {'.': {'main.py': None}, 'skills': {'__init__.py': None, 'web_scrape.py': None, 'skill.py': None, 'test_skill.py': None, 'text_completion.py': None, 'web_search.py': None, 'skill_registry.py': None, 'directory_structure.py': None, 'code_reader.py': None}, 'tasks': {'task_registry.py': None}, 'output': {}}
20
+ example_params = "Analyze main.py"
21
+ example_response = "main.py"
22
+
23
+ task_prompt = f"Find a specific file in a directory and return only the file path, based on the task description below. Always return a directory.###The directory structure is as follows: \n{example_dir_structure}\nYour task: {example_params}\n###\nRESPONSE:{example_response} ###The directory structure is as follows: \n{dir_structure}\nYour task: {params}\n###\nRESPONSE:"
24
+
25
+ messages = [
26
+ {"role": "system", "content": "You are a helpful assistant."},
27
+ {"role": "user", "content": task_prompt}
28
+ ]
29
+ response = openai.ChatCompletion.create(
30
+ model="gpt-3.5-turbo",
31
+ messages=messages,
32
+ temperature=0.2,
33
+ max_tokens=1500,
34
+ top_p=1,
35
+ frequency_penalty=0,
36
+ presence_penalty=0
37
+ )
38
+ file_path = response.choices[0].message['content'].strip()
39
+ print(f"AI suggested file path: {file_path}")
40
+
41
+ try:
42
+ with open(file_path, 'r') as file:
43
+ file_content = file.read()
44
+ #print(f"File content:\n{file_content}")
45
+ return file_content
46
+ except FileNotFoundError:
47
+ print("File not found. Please check the AI's suggested file path.")
48
+ return None
49
+
50
+ def get_directory_structure(self, start_path):
51
+ dir_structure = {}
52
+ ignore_dirs = ['.','__init__.py', '__pycache__', 'pydevd', 'poetry','venv'] # add any other directories to ignore here
53
+
54
+ for root, dirs, files in os.walk(start_path):
55
+ dirs[:] = [d for d in dirs if not any(d.startswith(i) for i in ignore_dirs)] # exclude specified directories
56
+ files = [f for f in files if not f[0] == '.' and f.endswith('.py')] # exclude hidden files and non-Python files
57
+
58
+ current_dict = dir_structure
59
+ path_parts = os.path.relpath(root, start_path).split(os.sep)
60
+ for part in path_parts:
61
+ if part: # skip empty parts
62
+ if part not in current_dict:
63
+ current_dict[part] = {}
64
+ current_dict = current_dict[part]
65
+ for f in files:
66
+ current_dict[f] = None
67
+
68
+ return dir_structure
69
+
70
+ def get_top_parent_path(self, current_path):
71
+ relative_path = ""
72
+ while True:
73
+ new_path = os.path.dirname(current_path)
74
+ if new_path == '/home/runner/BabyfoxAGIUI/skills': # reached the top
75
+ return '/home/runner/BabyfoxAGIUI'
76
+ current_path = new_path
77
+ relative_path = os.path.join("..", relative_path)
78
+
79
+ return relative_path