From 4e853b184310af25d58238d48d98548dde8e9044 Mon Sep 17 00:00:00 2001 From: Chirag Jain Date: Sun, 8 Dec 2024 09:24:36 +0000 Subject: [PATCH] Minor updates --- finetune.ipynb | 4 ++-- reporting.py | 4 ++-- utils.py | 1 + 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/finetune.ipynb b/finetune.ipynb index bf24fcd..e75d7c7 100644 --- a/finetune.ipynb +++ b/finetune.ipynb @@ -265,7 +265,7 @@ "micro_batch_size = launch_parameters.batch_size\n", "\n", "# Learning rate\n", - "learning_rate = 0.0002\n", + "learning_rate = launch_parameters.learning_rate\n", "\n", "# How many epochs to run training for\n", "num_epochs = 10\n", @@ -394,7 +394,7 @@ "--lora_alpha {lora_alpha} \\\n", "--lora_dropout 0.05 \\\n", "--logging_steps 5 \\\n", - "--evaluation_strategy steps \\\n", + "--eval_strategy steps \\\n", "--eval_steps {eval_steps} \\\n", "--save_strategy steps \\\n", "--save_steps {save_steps} \\\n", diff --git a/reporting.py b/reporting.py index 289edcf..989b6ec 100644 --- a/reporting.py +++ b/reporting.py @@ -55,13 +55,13 @@ class ReportingConfig(BaseModel): --max_steps 3 --gradient_accumulation_steps 4 --gradient_checkpointing unsloth ---learning_rate 0.00001 +--learning_rate 0.0001 --output_dir ./outputs --train_on_inputs False --logging_steps 1 --save_strategy steps --save_steps 0.5 ---evaluation_strategy steps +--eval_strategy steps --eval_steps 0.5 --adapter qlora --lora_target_linear True diff --git a/utils.py b/utils.py index 2ebffaf..d67f78f 100644 --- a/utils.py +++ b/utils.py @@ -97,6 +97,7 @@ class Config: model_id: str = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" max_length: Optional[int] = 2048 batch_size: int = 1 + learning_rate: float = 0.0001 def load_launch_parameters(path):