118118 ("Conv2d 1 LoRA with DoRA" , "Conv2d" , LoraConfig , {"target_modules" : ["conv2d" ], "use_dora" : True }),
119119 ("Conv2d 2 LoRA with DoRA" , "Conv2d" , LoraConfig , {"target_modules" : ["conv2d" , "lin0" ], "use_dora" : True }),
120120 ("Conv2d Groups LoRA" , "Conv2dGroups" , LoraConfig , {"target_modules" : ["conv2d" ]}),
121+ ("Conv2d Groups2 LoRA" , "Conv2dGroups2" , LoraConfig , {"target_modules" : ["conv2d" ]}),
121122 ("Conv2d Groups LoRA with DoRA" , "Conv2dGroups" , LoraConfig , {"target_modules" : ["conv2d" ], "use_dora" : True }),
123+ ("Conv2d Groups2 LoRA with DoRA" , "Conv2dGroups2" , LoraConfig , {"target_modules" : ["conv2d" ], "use_dora" : True }),
122124 ("Conv3d 1 LoRA" , "Conv3d" , LoraConfig , {"target_modules" : ["conv3d" ]}),
123125 ("Conv3d 2 LoRA" , "Conv3d" , LoraConfig , {"target_modules" : ["conv3d" , "lin0" ]}),
124126 ("Conv3d 1 LoRA with DoRA" , "Conv3d" , LoraConfig , {"target_modules" : ["conv3d" ], "use_dora" : True }),
@@ -1082,16 +1084,43 @@ def forward(self, X):
10821084class ModelConv2DGroups (nn .Module ):
10831085 def __init__ (self ):
10841086 super ().__init__ ()
1085- self .conv2d = nn .Conv2d (5 , 5 , 3 , groups = 5 )
1087+ self .lin0 = nn .Linear (90 , 288 )
1088+ # groups is set as 8 since default r=8
1089+ # hence to make r divisible by groups
1090+ self .conv2d = nn .Conv2d (16 , 16 , 3 , groups = 8 )
10861091 self .relu = nn .ReLU ()
10871092 self .flat = nn .Flatten ()
1088- self .lin0 = nn .Linear (5 , 2 )
1093+ self .lin1 = nn .Linear (16 , 2 )
10891094 self .sm = nn .LogSoftmax (dim = - 1 )
10901095 self .dtype = torch .float
10911096
10921097 def forward (self , X ):
10931098 X = X .to (self .dtype )
1094- X = X .reshape (- 1 , 5 , 3 , 3 )
1099+ X = X .flatten ()
1100+ X = self .lin0 (X )
1101+ X = X .reshape (2 , 16 , 3 , 3 )
1102+ X = self .conv2d (X )
1103+ X = self .relu (X )
1104+ X = self .flat (X )
1105+ X = self .lin1 (X )
1106+ X = self .sm (X )
1107+ return X
1108+
1109+
1110+ class ModelConv2DGroups2 (nn .Module ):
1111+ def __init__ (self ):
1112+ super ().__init__ ()
1113+ self .conv2d = nn .Conv2d (16 , 32 , 3 , padding = 1 , groups = 2 )
1114+ self .relu = nn .ReLU ()
1115+ self .flat = nn .Flatten ()
1116+ self .lin0 = nn .Linear (12800 , 2 )
1117+ self .sm = nn .LogSoftmax (dim = - 1 )
1118+ self .dtype = torch .float
1119+
1120+ def forward (self , X ):
1121+ # Note: needs a different input shape, thus ignore original input
1122+ X = torch .arange (9 * 16 * 20 * 20 ).view ([9 , 16 , 20 , 20 ]).to (self .conv2d .weight .device )
1123+ X = X .to (self .dtype )
10951124 X = self .conv2d (X )
10961125 X = self .relu (X )
10971126 X = self .flat (X )
@@ -1170,6 +1199,9 @@ def from_pretrained(cls, model_id, torch_dtype=None):
11701199 if model_id == "Conv2dGroups" :
11711200 return ModelConv2DGroups ().to (torch_dtype )
11721201
1202+ if model_id == "Conv2dGroups2" :
1203+ return ModelConv2DGroups2 ().to (torch_dtype )
1204+
11731205 if model_id == "Conv3d" :
11741206 return ModelConv3D ().to (torch_dtype )
11751207
@@ -1242,7 +1274,7 @@ def test_load_multiple_adapters(self, test_name, model_id, config_cls, config_kw
12421274 @pytest .mark .parametrize ("test_name, model_id, config_cls, config_kwargs" , TEST_CASES )
12431275 def test_merge_layers (self , test_name , model_id , config_cls , config_kwargs ):
12441276 # https://github.com/huggingface/peft/pull/2403
1245- if model_id in ["Conv2dGroups" ]:
1277+ if model_id in ["Conv2dGroups" , "Conv2dGroups2" ]:
12461278 pytest .skip (
12471279 f"Skipping test for { model_id } as merging is not supported. (See https://github.com/huggingface/peft/pull/2403 for details)"
12481280 )
@@ -1265,7 +1297,7 @@ def test_merge_layers(self, test_name, model_id, config_cls, config_kwargs):
12651297 @pytest .mark .parametrize ("test_name, model_id, config_cls, config_kwargs" , TEST_CASES )
12661298 def test_merge_layers_fp16 (self , test_name , model_id , config_cls , config_kwargs ):
12671299 # https://github.com/huggingface/peft/pull/2403
1268- if model_id in ["Conv2dGroups" ]:
1300+ if model_id in ["Conv2dGroups" , "Conv2dGroups2" ]:
12691301 pytest .skip (
12701302 f"Skipping test for { model_id } as merging is not supported. (See https://github.com/huggingface/peft/pull/2403 for details)"
12711303 )
@@ -1280,7 +1312,7 @@ def test_merge_layers_fp16(self, test_name, model_id, config_cls, config_kwargs)
12801312 @pytest .mark .parametrize ("test_name, model_id, config_cls, config_kwargs" , TEST_CASES )
12811313 def test_merge_layers_is_idempotent (self , test_name , model_id , config_cls , config_kwargs ):
12821314 # https://github.com/huggingface/peft/pull/2403
1283- if model_id in ["Conv2dGroups" ]:
1315+ if model_id in ["Conv2dGroups" , "Conv2dGroups2" ]:
12841316 pytest .skip (
12851317 f"Skipping test for { model_id } as merging is not supported. (See https://github.com/huggingface/peft/pull/2403 for details)"
12861318 )
@@ -1296,7 +1328,7 @@ def test_merge_layers_is_idempotent(self, test_name, model_id, config_cls, confi
12961328 @pytest .mark .parametrize ("test_name, model_id, config_cls, config_kwargs" , TEST_CASES )
12971329 def test_safe_merge (self , test_name , model_id , config_cls , config_kwargs ):
12981330 # https://github.com/huggingface/peft/pull/2403
1299- if model_id in ["Conv2dGroups" ]:
1331+ if model_id in ["Conv2dGroups" , "Conv2dGroups2" ]:
13001332 pytest .skip (
13011333 f"Skipping test for { model_id } as merging is not supported. (See https://github.com/huggingface/peft/pull/2403 for details)"
13021334 )
@@ -1390,7 +1422,7 @@ def test_forward_float16(self, test_name, model_id, config_cls, config_kwargs):
13901422 # check that none of this raises an error
13911423 model (** X )
13921424
1393- if model_id in ["Conv2dGroups" ]:
1425+ if model_id in ["Conv2dGroups" , "Conv2dGroups2" ]:
13941426 # this model does not support merging
13951427 return
13961428
@@ -1432,7 +1464,7 @@ def test_forward_bfloat16(self, test_name, model_id, config_cls, config_kwargs):
14321464 # check that none of this raises an error
14331465 model (** X )
14341466
1435- if model_id in ["Conv2dGroups" ]:
1467+ if model_id in ["Conv2dGroups" , "Conv2dGroups2" ]:
14361468 # this model does not support merging
14371469 return
14381470
@@ -1473,7 +1505,7 @@ def test_forward_float16_no_autocast(self, test_name, model_id, config_cls, conf
14731505 # check that none of this raises an error
14741506 model (** X )
14751507
1476- if model_id in ["Conv2dGroups" ]:
1508+ if model_id in ["Conv2dGroups" , "Conv2dGroups2" ]:
14771509 # this model does not support merging
14781510 return
14791511
@@ -1514,7 +1546,7 @@ def test_forward_bfloat16_no_autocast(self, test_name, model_id, config_cls, con
15141546 # check that none of this raises an error
15151547 model (** X )
15161548
1517- if model_id in ["Conv2dGroups" ]:
1549+ if model_id in ["Conv2dGroups" , "Conv2dGroups2" ]:
15181550 # this model does not support merging
15191551 return
15201552
@@ -1685,7 +1717,7 @@ def test_disable_adapters(self, test_name, model_id, config_cls, config_kwargs):
16851717 @pytest .mark .parametrize ("test_name, model_id, config_cls, config_kwargs" , TEST_CASES )
16861718 def test_disable_adapters_with_merging (self , test_name , model_id , config_cls , config_kwargs ):
16871719 # https://github.com/huggingface/peft/pull/2403
1688- if model_id in ["Conv2dGroups" ]:
1720+ if model_id in ["Conv2dGroups" , "Conv2dGroups2" ]:
16891721 pytest .skip (
16901722 f"Skipping test for { model_id } as merging is not supported. (See https://github.com/huggingface/peft/pull/2403 for details)"
16911723 )
0 commit comments