kenken999's picture
df
e91d22b
raw
history blame
1.95 kB
use_wandb: False
dataset:
name: 'dataset'
records_path: null
initial_dataset: ''
label_schema: ["Yes", "No"]
max_samples: 10
semantic_sampling: False # Change to True in case you don't have M1. Currently there is an issue with faiss and M1
annotator:
method : 'argilla'
config:
api_url: 'https://kenken999-arglira.hf.space'
api_key: '12345678'
workspace: 'team'
time_interval: 5
predictor:
method : 'llm'
config:
llm:
type: 'OpenAI'
name: 'llama3-70b-8192'
# async_params:
# retry_interval: 10
# max_retries: 2
model_kwargs: {"seed": 220}
num_workers: 5
prompt: 'prompts/predictor_completion/prediction.prompt'
mini_batch_size: 1 #change to >1 if you want to include multiple samples in the one prompt
mode: 'prediction'
meta_prompts:
folder: 'prompts/meta_prompts_classification'
num_err_prompt: 1 # Number of error examples per sample in the prompt generation
num_err_samples: 2 # Number of error examples per sample in the sample generation
history_length: 4 # Number of sample in the meta-prompt history
num_generated_samples: 10 # Number of generated samples at each iteration
num_initialize_samples: 10 # Number of generated samples at iteration 0, in zero-shot case
samples_generation_batch: 10 # Number of samples generated in one call to the LLM
num_workers: 5 #Number of parallel workers
warmup: 4 # Number of warmup steps
eval:
function_name: 'accuracy'
num_large_errors: 4
num_boundary_predictions : 0
error_threshold: 0.5
llm:
type: 'OpenAI'
name: 'llama3-70b-8192'
temperature: 0.8
stop_criteria:
max_usage: 2 #In $ in case of OpenAI models, otherwise number of tokens
patience: 10 # Number of patience steps
min_delta: 0.01 # Delta for the improvement definition