From 5fae09261f670ca74e618e0303a960005411f8ef Mon Sep 17 00:00:00 2001 From: Ajay Kumar Date: Sun, 23 Feb 2025 06:37:52 +0000 Subject: [PATCH] Update finetune.py --- finetune.py | 22 +--------------------- 1 file changed, 1 insertion(+), 21 deletions(-) diff --git a/finetune.py b/finetune.py index b8a869b..1895982 100644 --- a/finetune.py +++ b/finetune.py @@ -16,7 +16,6 @@ sys.path.append(os.path.join(os.getcwd(), "peft/src/")) from peft import ( # noqa: E402 LoraConfig, - BottleneckConfig, PrefixTuningConfig, get_peft_model, get_peft_model_state_dict, @@ -48,14 +47,6 @@ def train( lora_alpha: int = 16, lora_dropout: float = 0.05, lora_target_modules: List[str] = None, - # bottleneck adapter hyperparams - bottleneck_size: int = 256, - non_linearity: str = "tanh", - adapter_dropout: float = 0.0, - use_parallel_adapter: bool = False, - use_adapterp: bool = False, - target_modules: List[str] = None, - scaling: Union[float, str] = 1.0, # prefix tuning hyperparams num_virtual_tokens: int = 30, # llm hyperparams @@ -203,18 +194,7 @@ def generate_and_tokenize_prompt(data_point): bias="none", task_type="CAUSAL_LM", ) - elif adapter_name == "bottleneck": - config = BottleneckConfig( - bottleneck_size=bottleneck_size, - non_linearity=non_linearity, - adapter_dropout=adapter_dropout, - use_parallel_adapter=use_parallel_adapter, - use_adapterp=use_adapterp, - target_modules=target_modules, - scaling=scaling, - bias="none", - task_type="CAUSAL_LM", - ) + elif adapter_name == "prefix-tuning": config = PrefixTuningConfig( num_virtual_tokens=num_virtual_tokens,