-
Notifications
You must be signed in to change notification settings - Fork 11
/
experiment.yaml
66 lines (51 loc) · 1.83 KB
/
experiment.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
# This file contains three parts: data, llm, and grimoire_generator
# ─── Tasks To Be Evaluated On ─────────────────────────────────────────────────
# Note:
# * Only string values are allowed
data:
- "SST5"
- "Subj"
- "AgNews"
- "TREC"
- "RTE"
- "QNLI"
- "hate_speech18"
- "ethos"
# ─── Models To Be Evaluated ───────────────────────────────────────────────────
# There are three types of models: api, local, and remote
# Configure models you need and their corresponding parameters under each type
# Use ~ if there is no parameter to be configured
# Note:
# * For API models, you need to apply for an API key for each model.
# * For Local models, you need to deploy them first using vLLM.
# * For Remote models, they are only for internal use currently.
llm:
api:
# - GPT:
# model_name: gpt-3.5-turbo
# report: true
local:
- LLaMA2_70B_Chat: ~
- LLaMA2_13B_Chat: ~
- Baichuan2_7B_Chat: ~
- PHI2: ~
remote:
- GPT_transit:
model_name: gpt-3.5-turbo
report: true
- GPT_transit:
model_name: gpt-4-1106-preview
report: true
hf:
- AquilaChat2_7B: ~
# ─── The Model Used To Generate Grimoire ──────────────────────────────────────
# Note:
# * One and only one model is allowed
# * Any type of model is allowed
grimoire_generator:
llm_type: remote
llm: GPT_transit
llm_params:
model_name: gpt-4-1106-preview
max_new_tokens: 1024
report: true