File size: 1,952 Bytes
e1aa577
 
 
 
 
 
e91d22b
e1aa577
 
e91d22b
e1aa577
 
 
 
e91d22b
 
e1aa577
 
 
 
 
 
 
0e0ee95
e1aa577
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
use_wandb: False
dataset:
    name: 'dataset'
    records_path: null
    initial_dataset: ''
    label_schema: ["Yes", "No"]
    max_samples: 10
    semantic_sampling: False # Change to True in case you don't have M1. Currently there is an issue with faiss and M1


annotator:
    method : 'argilla'
    config:
        api_url: 'https://kenken999-arglira.hf.space'
        api_key: '12345678'
        workspace: 'team'
        time_interval: 5

predictor:
    method : 'llm'
    config:
        llm:
            type: 'OpenAI'
            name: 'llama3-70b-8192'
#            async_params:
#                retry_interval: 10
#                max_retries: 2
            model_kwargs: {"seed": 220}
        num_workers: 5
        prompt: 'prompts/predictor_completion/prediction.prompt'
        mini_batch_size: 1  #change to >1 if you want to include multiple samples in the one prompt
        mode: 'prediction'

meta_prompts:
    folder: 'prompts/meta_prompts_classification'
    num_err_prompt: 1  # Number of error examples per sample in the prompt generation
    num_err_samples: 2 # Number of error examples per sample in the sample generation
    history_length: 4 # Number of sample in the meta-prompt history
    num_generated_samples: 10 # Number of generated samples at each iteration
    num_initialize_samples: 10 # Number of generated samples at iteration 0, in zero-shot case
    samples_generation_batch: 10 # Number of samples generated in one call to the LLM
    num_workers: 5 #Number of parallel workers
    warmup: 4 # Number of warmup steps

eval:
    function_name: 'accuracy'
    num_large_errors: 4
    num_boundary_predictions : 0
    error_threshold: 0.5

llm:
    type: 'OpenAI'
    name: 'llama3-70b-8192'
    temperature: 0.8

stop_criteria:
    max_usage: 2 #In $ in case of OpenAI models, otherwise number of tokens
    patience: 10 # Number of patience steps
    min_delta: 0.01 # Delta for the improvement definition