Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 8 additions & 6 deletions src/peft/tuners/lora/layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -785,14 +785,16 @@ def __init__(
lora_bias=lora_bias,
)

def resolve_lora_variant(self, *, use_dora: bool, **kwargs) -> Optional[LoraVariant]:
if not use_dora:
def resolve_lora_variant(self, *, use_dora: bool, use_sine_lora:bool,**kwargs) -> Optional[LoraVariant]:
if use_dora:
from .variants import DoraEmbeddingVariant
return DoraEmbeddingVariant()
elif use_sine_lora:
from .variants import SineLoraLinearVariant
return SineLoraLinearVariant()
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This should reference the (not yet existing) SineLoraEmbeddingVariant since we're in the Embedding class.

But this code is good for Linear.resolve_lora_variant :) You can use it there!

Effectively every class that overrides modules in the model (Linear, Embedding, Conv2d, ...) needs its own variant implementation and resolve_lora_variant implementation but we can keep it at Linear and Embedding for now if you want.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I corrected. Please check my new PR.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm can you check again? I don't see changes in this regard :/

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I git push again. I only implemented Linear and Embedding for now and is that ok?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yep that's totally fine. We can add support for convolutions later once Linear and Embedding work as expected.

else:
return None

from .variants import DoraEmbeddingVariant

return DoraEmbeddingVariant()

def update_layer(
self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora, lora_bias
):
Expand Down
19 changes: 19 additions & 0 deletions src/peft/tuners/lora/variants.py
Original file line number Diff line number Diff line change
Expand Up @@ -285,3 +285,22 @@ class DoraConv3dVariant(_DoraConvNdVariant):
def init(module: Conv3d, adapter_name: str) -> None:
dora_layer = DoraConv3dLayer(fan_in_fan_out=False)
_DoraConvNdVariant.init_convd_variant(module, adapter_name, dora_layer=dora_layer)


class SineLoraLinearVariant(LoraVariant):
@staticmethod
def init(module: Linear, adapter_name:str) -> None:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
def init(module: Linear, adapter_name:str) -> None:
def init(module: Linear, adapter_name:str, **kwargs) -> None:

With PR #2455 now merged, init() receives all the parameters that update_layer receives.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

hmmmm I did not use that and do you think that is ok?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think so, no.

Currently the tests do not work because of the changes necessary in Linear.__init__ and Embedding.__init__. Once the changes are in place you'll see that calls to init will complain about unexpected arguments passed to init(). That's because all the config args are passed to init and without the wildcard **kwargs you have to define them all (which we don't want, of course).

Also you need a place to set module.sinelora_scaling and module.sinelora_frequency. This is here, from the kwargs, e.g.

module.sinelora_frequency = kwargs['sinelora_frequency']

For sinelora_scaling you need to check if kwargs['sinelora_scaling'] is None.

module.freq =

if module.sine_scaling is None:
import math
module.sine_scaling = math.sqrt(module.in_features)

@staticmethod
def forward(module: Linear, active_adapter: str, x: torch.Tensor, result: torch.Tensor) -> torch.Tensor:

lora_A = module.lora_A[active_adapter]
lora_B = module.lora_B[active_adapter]
lora_scaling = module.scaling[active_adapter]
sine_output = x @ torch.sin(module.freq * lora_B.weight.T @ lora_A.weight) / module.sine_scaling * lora_scaling
result = result + sine_output