@@ -530,38 +530,6 @@ def __init__(self, config: Idefics3Config):
530530
531531 self .post_init ()
532532
533- # Copied from transformers.models.idefics2.modeling_idefics2.Idefics2Model.enable_input_require_grads
534- def enable_input_require_grads (self ):
535- """
536- Enables the gradients for the input embeddings.
537-
538- This is useful for lora when using gradient checkpointing.
539- c.f. https://github.com/huggingface/peft/issues/1402#issuecomment-1913675032
540-
541- Override to set output.requires_grad = True for both the decoder's and vision model's embeddings.
542- """
543-
544- def get_lowest_module (module ):
545- if len (list (module .children ())) == 0 :
546- # If the module has no children, it is a leaf module (e.g., Linear, Conv2d, etc.)
547- return module
548- else :
549- # Recursively call the function on each child module
550- return get_lowest_module (list (module .children ())[0 ])
551-
552- def make_inputs_require_grads (module , input , output ):
553- output .requires_grad_ (True )
554-
555- self ._text_require_grads_hook = self .get_input_embeddings ().register_forward_hook (make_inputs_require_grads )
556- self ._vision_require_grads_hook = get_lowest_module (self .vision_model ).register_forward_hook (
557- make_inputs_require_grads
558- )
559-
560- # Copied from transformers.models.idefics2.modeling_idefics2.Idefics2Model.disable_input_require_grads
561- def disable_input_require_grads (self ):
562- self ._text_require_grads_hook .remove ()
563- self ._vision_require_grads_hook .remove ()
564-
565533 # Copied from transformers.models.idefics2.modeling_idefics2.Idefics2Model.get_input_embeddings
566534 def get_input_embeddings (self ):
567535 return self .text_model .get_input_embeddings ()
@@ -765,26 +733,6 @@ def __init__(self, config):
765733 # Initialize weights and apply final processing
766734 self .post_init ()
767735
768- # Copied from transformers.models.idefics2.modeling_idefics2.Idefics2ForConditionalGeneration.enable_input_require_grads
769- def enable_input_require_grads (self ):
770- """
771- Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping
772- the model weights fixed.
773- """
774-
775- def make_inputs_require_grads (module , input , output ):
776- output .requires_grad_ (True )
777-
778- self ._text_require_grads_hook = self .get_input_embeddings ().register_forward_hook (make_inputs_require_grads )
779- self ._vision_require_grads_hook = self .model .vision_model .get_input_embeddings ().register_forward_hook (
780- make_inputs_require_grads
781- )
782-
783- # Copied from transformers.models.idefics2.modeling_idefics2.Idefics2ForConditionalGeneration.disable_input_require_grads
784- def disable_input_require_grads (self ):
785- self ._text_require_grads_hook .remove ()
786- self ._vision_require_grads_hook .remove ()
787-
788736 # Copied from transformers.models.idefics2.modeling_idefics2.Idefics2ForConditionalGeneration.get_input_embeddings
789737 def get_input_embeddings (self ):
790738 return self .model .text_model .get_input_embeddings ()
0 commit comments