Skip to content

Commit

Permalink
fix xcomposer lora_target_modules (modelscope#1645)
Browse files Browse the repository at this point in the history
  • Loading branch information
Jintao-Huang committed Aug 8, 2024
1 parent 62aea6d commit d7e0e10
Show file tree
Hide file tree
Showing 3 changed files with 21 additions and 7 deletions.
4 changes: 4 additions & 0 deletions swift/llm/utils/argument.py
Original file line number Diff line number Diff line change
Expand Up @@ -830,6 +830,10 @@ def _prepare_target_modules(self, target_modules) -> Union[List[str], str]:
target_modules.remove('DEFAULT')
default_lora_tm = get_default_lora_target_modules(self.model_type)
if isinstance(default_lora_tm, str):
# Make sure the regex can find all linear in the module.
from swift.tuners.peft import _create_and_replace_hook2
from peft import LoraModel
LoraModel._create_and_replace = _create_and_replace_hook2
return default_lora_tm
target_modules += default_lora_tm
if 'EMBEDDING' in target_modules:
Expand Down
2 changes: 1 addition & 1 deletion swift/llm/utils/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -514,7 +514,7 @@ class LoRATM(NamedTuple):
llava_llama = f'{get_regex_for_mm_default_lora("llava_llama")}'
llava = f'{get_regex_for_mm_default_lora("llava")}'
yi_vl = f'{get_regex_for_mm_default_lora("yi_vl")}'
internlm_xcomposer = f'{get_regex_for_mm_default_lora("internlm_xcomposer")}'
internlm_xcomposer = ['attention.wqkv', 'attention.wo', 'feed_forward.w1', 'feed_forward.w2', 'feed_forward.w3']
internvl = f'{get_regex_for_mm_default_lora("internvl")}'
deepseek_vl = f'{get_regex_for_mm_default_lora("deepseek_vl")}'
paligemma = f'{get_regex_for_mm_default_lora("paligemma")}'
Expand Down
22 changes: 16 additions & 6 deletions swift/tuners/peft.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def from_pretrained(cls, pretrained_model_name_or_path: str, subfolder: Optional
return self


def _create_and_replace_hook(self, *args, **kwargs):
def _get_target(*args, **kwargs):
target = None
if 'target' in kwargs:
target = kwargs['target']
Expand All @@ -86,10 +86,20 @@ def _create_and_replace_hook(self, *args, **kwargs):
if isinstance(arg, torch.nn.Module):
target = arg
break
return target


def _create_and_replace_hook(self, *args, **kwargs):
target = _get_target(*args, **kwargs)
if target and target.__class__.__name__ == 'NonDynamicallyQuantizableLinear':
return

return self._create_and_replace_origin(*args, **kwargs)


def _create_and_replace_hook2(self, *args, **kwargs):
target = _get_target(*args, **kwargs)

all_supported_names = ('linear', )
all_supported_types = (torch.nn.Embedding, torch.nn.Conv2d, transformers.pytorch_utils.Conv1D)

Expand All @@ -100,7 +110,7 @@ def _create_and_replace_hook(self, *args, **kwargs):
for name in all_supported_names]) and not any([isinstance(target, type) for type in all_supported_types])):
return

return self._create_and_replace_origin(*args, **kwargs)
return _create_and_replace_hook(self, *args, **kwargs)


def _convert_dtype(target: torch.nn.Module, adapter_name: str, lora_dtype: str):
Expand Down Expand Up @@ -291,14 +301,14 @@ def hot_patch_peft_module():
LoraModel._create_and_replace_origin = LoraModel._create_and_replace
LoraModel._create_and_replace = _create_and_replace_hook
VeraModel._create_and_replace_origin = VeraModel._create_and_replace
VeraModel._create_and_replace = _create_and_replace_hook
VeraModel._create_and_replace = _create_and_replace_hook2
BOFTModel._create_and_replace_origin = BOFTModel._create_and_replace
BOFTModel._create_and_replace = _create_and_replace_hook
BOFTModel._create_and_replace = _create_and_replace_hook2
IA3Model._create_and_replace_origin = IA3Model._create_and_replace
IA3Model._create_and_replace = _create_and_replace_hook
IA3Model._create_and_replace = _create_and_replace_hook2
if FourierFTModel is not None:
FourierFTModel._create_and_replace_origin = FourierFTModel._create_and_replace
FourierFTModel._create_and_replace = _create_and_replace_hook
FourierFTModel._create_and_replace = _create_and_replace_hook2

# Support type conversion
def init(self, model: torch.nn.Module, config: Dict[str, LoraConfig], adapter_name):
Expand Down

0 comments on commit d7e0e10

Please sign in to comment.