Skip to content

Commit eec2690

Browse files
authored
Merge pull request #150 from georgian-io/fix-generate
Bug Fix - `llmtune generate config` not finding the yml file
2 parents 98e1ad4 + 20be6e2 commit eec2690

File tree

2 files changed

+4
-4
lines changed

2 files changed

+4
-4
lines changed

llmtune/cli/toolkit.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ def generate_config():
126126
"""
127127
Generate an example `config.yml` file in current directory
128128
"""
129-
module_path = Path(llmtune.__file__).parent
129+
module_path = Path(llmtune.__file__)
130130
example_config_path = module_path.parent / EXAMPLE_CONFIG_FNAME
131131
destination = Path.cwd()
132132
shutil.copy(example_config_path, destination)

config.yml llmtune/config.yml

+3-3
Original file line numberDiff line numberDiff line change
@@ -17,15 +17,15 @@ data:
1717
prompt_stub:
1818
>- # Stub to add for training at the end of prompt, for test set or inference, this is omitted; make sure only one variable is present
1919
{output}
20-
test_size: 0.1 # Proportion of test as % of total; if integer then # of samples
21-
train_size: 0.9 # Proportion of train as % of total; if integer then # of samples
20+
test_size: 25 # Proportion of test as % of total; if integer then # of samples
21+
train_size: 500 # Proportion of train as % of total; if integer then # of samples
2222
train_test_split_seed: 42
2323

2424
# Model Definition -------------------
2525
model:
2626
hf_model_ckpt: "mistralai/Mistral-7B-Instruct-v0.2"
2727
torch_dtype: "bfloat16"
28-
attn_implementation: "flash_attention_2"
28+
#attn_implementation: "flash_attention_2"
2929
quantize: true
3030
bitsandbytes:
3131
load_in_4bit: true

0 commit comments

Comments
 (0)