oceansweep commited on
Commit
a2369d8
1 Parent(s): c5b0bb7

Update config.txt

Browse files
Files changed (1) hide show
  1. config.txt +47 -20
config.txt CHANGED
@@ -17,26 +17,33 @@ mistral_model = mistral-large-latest
17
  mistral_api_key = <mistral_api_key>
18
 
19
  [Local-API]
20
- kobold_api_key = <kobold api key>
21
  kobold_api_IP = http://127.0.0.1:5001/api/v1/generate
22
- llama_api_key = <llama.cpp api key>
23
  llama_api_IP = http://127.0.0.1:8080/completion
24
- ooba_api_key = <ooba api key>
 
25
  ooba_api_IP = http://127.0.0.1:5000/v1/chat/completions
26
  tabby_api_IP = http://127.0.0.1:5000/v1/chat/completions
27
- tabby_api_key = <tabbyapi key>
28
  vllm_api_IP = http://127.0.0.1:8000/v1/chat/completions
29
-
30
- [Paths]
31
- output_path = Results
32
- logging_file = Logs
33
-
 
34
 
35
  [Processing]
36
  processing_choice = cuda
37
 
38
  [Settings]
39
- max_tokens = 1000
 
 
 
 
 
 
40
 
41
  [Prompts]
42
  prompt_sample = "What is the meaning of life?"
@@ -44,25 +51,45 @@ video_summarize_prompt = "Above is the transcript of a video. Please read throug
44
 
45
  [Database]
46
  type = sqlite
47
- sqlite_path = media_summary.db
 
 
48
  elasticsearch_host = localhost
49
  elasticsearch_port = 9200
50
  # Additionally you can use elasticsearch as the database type, just replace `sqlite` with `elasticsearch` for `type` and provide the `elasticsearch_host` and `elasticsearch_port` of your configured ES instance.
51
- chroma_db_path = chroma_db
 
 
 
52
 
53
  [Embeddings]
54
- provider = openai
55
- # Can be 'openai', 'local', or 'huggingface'
56
- model = text-embedding-3-small
57
- # Model name or path
58
- api_key = your_api_key_here
59
- api_url = http://localhost:8080/v1/embeddings
60
- # Only needed for 'local' provider
 
 
 
 
61
 
62
  [Chunking]
63
  method = words
 
64
  max_size = 400
65
  overlap = 200
66
  adaptive = false
 
67
  multi_level = false
68
- language = english
 
 
 
 
 
 
 
 
 
 
17
  mistral_api_key = <mistral_api_key>
18
 
19
  [Local-API]
 
20
  kobold_api_IP = http://127.0.0.1:5001/api/v1/generate
21
+ kobold_api_key =
22
  llama_api_IP = http://127.0.0.1:8080/completion
23
+ llama_api_key =
24
+ ooba_api_key =
25
  ooba_api_IP = http://127.0.0.1:5000/v1/chat/completions
26
  tabby_api_IP = http://127.0.0.1:5000/v1/chat/completions
27
+ tabby_api_key =
28
  vllm_api_IP = http://127.0.0.1:8000/v1/chat/completions
29
+ vllm_model =
30
+ ollama_api_IP = http://127.0.0.1:11434/v1/chat/completions
31
+ ollama_api_key =
32
+ ollama_model = llama3
33
+ aphrodite_api_IP = http://127.0.0.1:8080/completion
34
+ aphrodite_api_key =
35
 
36
  [Processing]
37
  processing_choice = cuda
38
 
39
  [Settings]
40
+ chunk_duration = 30
41
+ words_per_second = 3
42
+
43
+ [Auto-Save]
44
+ save_character_chats = False
45
+ save_rag_chats = False
46
+
47
 
48
  [Prompts]
49
  prompt_sample = "What is the meaning of life?"
 
51
 
52
  [Database]
53
  type = sqlite
54
+ sqlite_path = Databases/media_summary.db
55
+ backup_path = ./tldw_DB_Backups/
56
+ #Path to the backup location for the database. If the path does not exist, the backup will not be created.
57
  elasticsearch_host = localhost
58
  elasticsearch_port = 9200
59
  # Additionally you can use elasticsearch as the database type, just replace `sqlite` with `elasticsearch` for `type` and provide the `elasticsearch_host` and `elasticsearch_port` of your configured ES instance.
60
+ chroma_db_path = Databases/chroma_db
61
+ prompts_db_path = Databases/prompts.db
62
+ rag_qa_db_path = Databases/RAG_QA_Chat.db
63
+ character_db_path = Databases/chatDB.db
64
 
65
  [Embeddings]
66
+ embedding_provider = openai
67
+ embedding_model = text-embedding-3-small
68
+ onnx_model_path = ./App_Function_Libraries/models/onnx_models/
69
+ model_dir = ./App_Function_Libraries/models/embedding_models
70
+ embedding_api_url = http://localhost:8080/v1/embeddings
71
+ embedding_api_key = your_api_key_here
72
+ chunk_size = 400
73
+ overlap = 200
74
+ # 'embedding_provider' Can be 'openai', 'local', or 'huggingface'
75
+ # `embedding_model` Set to the model name you want to use for embeddings. For OpenAI, this can be 'text-embedding-3-small', or 'text-embedding-3-large'.
76
+ # huggingface: model = dunzhang/stella_en_400M_v5
77
 
78
  [Chunking]
79
  method = words
80
+ # 'method' Can be 'words' / 'sentences' / 'paragraphs' / 'semantic' / 'tokens'
81
  max_size = 400
82
  overlap = 200
83
  adaptive = false
84
+ # Use ntlk+punkt to split text into sentences and then ID average sentence length and set that as the chunk size
85
  multi_level = false
86
+ language = english
87
+
88
+ [Metrics]
89
+ log_file_path =
90
+ #os.getenv("tldw_LOG_FILE_PATH", "tldw_app_logs.json")
91
+ max_bytes =
92
+ #int(os.getenv("tldw_LOG_MAX_BYTES", 10 * 1024 * 1024)) # 10 MB
93
+ backup_count = 5
94
+ #int(os.getenv("tldw_LOG_BACKUP_COUNT", 5))
95
+