Skip to content

Commit

Permalink
fix for empty lora+ lr embedding (axolotl-ai-cloud#1932)
Browse files Browse the repository at this point in the history
  • Loading branch information
winglian authored Sep 27, 2024
1 parent b98d7d7 commit 61aa291
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 1 deletion.
2 changes: 1 addition & 1 deletion src/axolotl/core/trainer_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -456,7 +456,7 @@ def create_optimizer(self):
if self.args.loraplus_lr_ratio is not None:
loraplus_lr_ratio = getattr(self.args, "loraplus_lr_ratio", None)
loraplus_lr_embedding = getattr(
self.args, "loraplus_lr_embedding", None
self.args, "loraplus_lr_embedding", 1e-6
)
self.optimizer = create_loraplus_optimizer( # pylint: disable=attribute-defined-outside-init
opt_model,
Expand Down
7 changes: 7 additions & 0 deletions src/axolotl/utils/config/models/input/v0_4_1/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -298,6 +298,13 @@ def validate_qlora(self):
raise ValueError("Require cfg.load_in_4bit to be True for qlora")
return self

@field_validator("loraplus_lr_embedding")
@classmethod
def convert_loraplus_lr_embedding(cls, loraplus_lr_embedding):
if loraplus_lr_embedding and isinstance(loraplus_lr_embedding, str):
loraplus_lr_embedding = float(loraplus_lr_embedding)
return loraplus_lr_embedding


class ReLoRAConfig(BaseModel):
"""ReLoRA configuration subset"""
Expand Down

0 comments on commit 61aa291

Please sign in to comment.