From e8689838b8785857b545769e660a1d402ba44b66 Mon Sep 17 00:00:00 2001 From: MichelDucartier Date: Wed, 10 Dec 2025 16:47:36 +0100 Subject: [PATCH 1/7] Add auto import --- docs/source/conf.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/source/conf.py b/docs/source/conf.py index 3c14ef2..78b3137 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -25,6 +25,11 @@ html_theme = 'shibuya' html_static_path = ['_static'] +autodoc_mock_imports = [ + "torch", + "torchvision", + "verl", +] import os import sys From 87ca8e74eb31be7db4701274bfeee8da3e807ded Mon Sep 17 00:00:00 2001 From: MichelDucartier Date: Wed, 10 Dec 2025 16:59:42 +0100 Subject: [PATCH 2/7] Add torch and torchvision to docs import --- docs/source/conf.py | 2 -- pyproject.toml | 2 ++ 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 78b3137..9f160c2 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -26,8 +26,6 @@ html_static_path = ['_static'] autodoc_mock_imports = [ - "torch", - "torchvision", "verl", ] diff --git a/pyproject.toml b/pyproject.toml index 1a53c98..8637d91 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -43,6 +43,8 @@ docs = [ "sphinx", "sphinx-tabs", "shibuya", + "torch", + "torchvision", ] [project.scripts] From 6f8dee965ca18b79767cf98b092855bdb1f19154 Mon Sep 17 00:00:00 2001 From: MichelDucartier Date: Tue, 16 Dec 2025 20:59:37 +0100 Subject: [PATCH 3/7] Add sphinx click --- docs/source/conf.py | 2 +- pyproject.toml | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 9f160c2..021e3d4 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -14,7 +14,7 @@ # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration -extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon", "sphinx_tabs.tabs"] +extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon", "sphinx_tabs.tabs", "sphinx_click"] templates_path = ['_templates'] exclude_patterns = [] diff --git a/pyproject.toml b/pyproject.toml index 8637d91..b6a34ab 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,9 +40,10 @@ flash-attn = [ "flash-attn", ] docs = [ + "shibuya", "sphinx", "sphinx-tabs", - "shibuya", + "sphinx-click", "torch", "torchvision", ] From c6c6ddf30a7eeea7592e87cbf01b772935249e3e Mon Sep 17 00:00:00 2001 From: MichelDucartier Date: Tue, 16 Dec 2025 21:40:54 +0100 Subject: [PATCH 4/7] Add sphinx<9 requirement --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index b6a34ab..7c34a0a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -41,7 +41,7 @@ flash-attn = [ ] docs = [ "shibuya", - "sphinx", + "sphinx<9", "sphinx-tabs", "sphinx-click", "torch", From 1e92b9e501fb817f1298b295c375accb1ecb5fb8 Mon Sep 17 00:00:00 2001 From: MichelDucartier Date: Tue, 16 Dec 2025 21:48:34 +0100 Subject: [PATCH 5/7] Maybe? --- docs/source/guides/add_modality.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/guides/add_modality.rst b/docs/source/guides/add_modality.rst index e4a6f48..b9d1925 100644 --- a/docs/source/guides/add_modality.rst +++ b/docs/source/guides/add_modality.rst @@ -170,7 +170,7 @@ Lastly, we implement the modality model. This is the model that performs the for A modality class must inherit :class:`~multimeditron.model.modalities.base.BaseModality` is typically created with 2 main modules: 1. A pretrained modality embedder (like a CLIP model): This module produces meaningful embeddings for given modalities -2. A tunable projection module (usually a simple MLP or a linear layer): This module map embeddings from the modality embedder to the LLM embedding space. The dimension of this embedding space is given by the `hidden_size` attribute of :func:`~multimeditron.model.modalities.base.BaseModalityConfig` +2. A tunable projection module (usually a simple MLP or a linear layer): This module map embeddings from the modality embedder to the LLM embedding space. The dimension of this embedding space is given by the `hidden_size` attribute of :class:`~multimeditron.model.modalities.base.BaseModalityConfig` .. code-block:: python From 9bf0de56224cc97cc8933a8d63c327381f7f7d85 Mon Sep 17 00:00:00 2001 From: MichelDucartier Date: Tue, 16 Dec 2025 21:55:27 +0100 Subject: [PATCH 6/7] Exclude __all__ directive --- docs/source/conf.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/source/conf.py b/docs/source/conf.py index 021e3d4..869e29e 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -29,6 +29,11 @@ "verl", ] +autodoc_default_options = { + "exclude-members": "__all__", +} + + import os import sys sys.path.insert(0, os.path.abspath('../../src')) From 76235ab7c1ed962eb379aad05c95ebc7cfc07095 Mon Sep 17 00:00:00 2001 From: MichelDucartier Date: Sat, 27 Dec 2025 18:21:38 +0100 Subject: [PATCH 7/7] Remove ambiguous import --- docs/source/conf.py | 6 +----- src/multimeditron/model/modalities/base.py | 2 ++ src/multimeditron/model/modalities/image_modality.py | 2 +- src/multimeditron/model/modalities/image_modality_moe.py | 2 +- .../model/modalities/image_modality_moe_pep.py | 3 +-- 5 files changed, 6 insertions(+), 9 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 869e29e..5e32746 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -27,13 +27,9 @@ autodoc_mock_imports = [ "verl", + "deepspeed" ] -autodoc_default_options = { - "exclude-members": "__all__", -} - - import os import sys sys.path.insert(0, os.path.abspath('../../src')) diff --git a/src/multimeditron/model/modalities/base.py b/src/multimeditron/model/modalities/base.py index 04301a7..499a794 100644 --- a/src/multimeditron/model/modalities/base.py +++ b/src/multimeditron/model/modalities/base.py @@ -5,6 +5,8 @@ import torch from transformers import AutoModel, AutoConfig, AutoProcessor, PretrainedConfig, PreTrainedModel +__all__ = [] + class BaseModalityConfig(PretrainedConfig): """ Configuration class for defining modality parameters. diff --git a/src/multimeditron/model/modalities/image_modality.py b/src/multimeditron/model/modalities/image_modality.py index 371bfc6..806a9c4 100644 --- a/src/multimeditron/model/modalities/image_modality.py +++ b/src/multimeditron/model/modalities/image_modality.py @@ -1,5 +1,5 @@ from multimeditron.model.constants import NUM_EMBEDDINGS_KEY, MODALITY_VALUE_KEY, POSITION_IDS_KEY -from multimeditron.model.modalities.base import BaseModality, BaseModalityConfig, AutoModality, BaseModalityProcessor +from multimeditron.model.modalities import BaseModality, BaseModalityConfig, AutoModality, BaseModalityProcessor from multimeditron.model.projectors.mlp import MLPProjector import torch from transformers import AutoImageProcessor, AutoModel, AutoConfig diff --git a/src/multimeditron/model/modalities/image_modality_moe.py b/src/multimeditron/model/modalities/image_modality_moe.py index ed7eb97..183d495 100644 --- a/src/multimeditron/model/modalities/image_modality_moe.py +++ b/src/multimeditron/model/modalities/image_modality_moe.py @@ -1,6 +1,6 @@ import torch from multimeditron.model.constants import NUM_EMBEDDINGS_KEY, MODALITY_VALUE_KEY -from multimeditron.model.modalities.base import AutoModality, BaseModality, BaseModalityConfig, BaseModalityProcessor +from multimeditron.model.modalities import AutoModality, BaseModality, BaseModalityConfig, BaseModalityProcessor from multimeditron.model.modalities.moe.gating import GatingNetwork from multimeditron.model.projectors.mlp import MLPProjector from multimeditron.model.attention import CrossAttention diff --git a/src/multimeditron/model/modalities/image_modality_moe_pep.py b/src/multimeditron/model/modalities/image_modality_moe_pep.py index ddc958c..22740d5 100644 --- a/src/multimeditron/model/modalities/image_modality_moe_pep.py +++ b/src/multimeditron/model/modalities/image_modality_moe_pep.py @@ -1,6 +1,5 @@ -import uuid from multimeditron.model.constants import NUM_EMBEDDINGS_KEY, MODALITY_VALUE_KEY -from multimeditron.model.modalities.base import AutoModality, BaseModality, BaseModalityConfig, BaseModalityProcessor +from multimeditron.model.modalities import AutoModality, BaseModality, BaseModalityConfig, BaseModalityProcessor from multimeditron.model.modalities.moe.gating import GatingNetwork from multimeditron.model.projectors.mlp import MLPProjector from multimeditron.model.attention import CrossAttention