From 19cb6ccc6b0682bc7dbb6a138c38b442a1a6b2a0 Mon Sep 17 00:00:00 2001
From: exdysa <91800957+exdysa@users.noreply.github.com>
Date: Thu, 1 Jan 2026 01:26:30 -0500
Subject: [PATCH 01/16] ~update code, small changes
---
.env | 11 +
CODE_OF_CONDUCT.md | 3 +
MIR.egg-info/PKG-INFO | 34 +-
MIR.egg-info/SOURCES.txt | 22 +-
mir/mir.json | 13254 +------------------------------------
5 files changed, 35 insertions(+), 13289 deletions(-)
create mode 100644 .env
diff --git a/.env b/.env
new file mode 100644
index 0000000..74c2435
--- /dev/null
+++ b/.env
@@ -0,0 +1,11 @@
+# Should Not Change
+LOGO_BASE_URL="https://raw.githubusercontent.com/darkshapes/entity-statement/refs/heads/main/png/"
+DOC_PATH="darkshapes.github.io/public/docs"
+
+# May Change
+VENV=".venv"
+DOC_REPO_CLONE="${HOME}/Documents/GitHub/darkshapes/"
+SKIP_DOCS=0
+
+# Change Every Time
+LOGO_PATH="mir/mir75_dark.png"
\ No newline at end of file
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index 0242a25..5d216f1 100644
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
@@ -25,6 +25,9 @@ Version = 0.0.5_2025-22-12
| Creepy Vibes... | Unacceptable. Words and flirts CAN hurt. End coercion. |
| Users vs Developers | Everyone involved, anywhere. Skill DIVERSITY, not division. |
+\*More behavior guidelines
+https://www.recurse.com/social-rules
+
## Constructive Criticism Guide:
- Ask consent first. Don't forget to wait for the answer!
diff --git a/MIR.egg-info/PKG-INFO b/MIR.egg-info/PKG-INFO
index 970ed4f..678b0bd 100644
--- a/MIR.egg-info/PKG-INFO
+++ b/MIR.egg-info/PKG-INFO
@@ -70,10 +70,10 @@ This repo is an example development implementation of autogenerated model infere
>
> ## Example:
>
-> ## mir : model . transformer . clip-l : stable-diffusion-xl
+> ## mir : // model . vit . clip-l : stable-diffusion-xl
>
> ```
-> mir : model . lora . hyper : flux-1
+> mir : // model . lora . hyper : flux-1
> ↑ ↑ ↑ ↑ ↑
> [URI]:[Domain].[Architecture].[Series]:[Compatibility]
> ```
@@ -132,27 +132,28 @@ Meant to be created by standards community, derived from code and file analysis
| Abbreviation | Description |
| ------------------------------------- | ----------------------------------------- |
+|
AET
| Autoencoding Transformer |
+| ART
| Autoregressive Transformer |
+| BRNN
| Bi-directional Recurrent Neural Network |
+| CNN
| Convolutional Neural Network |
+| CONTROLNET
| Controlnet |
+| DETR
| Detection Transformer |
+| GAN
| Generative Adversarial Model |
| GRU
| Gated recurrent unit |
-| RBM
| Restricted Boltzmann machine |
-| TAE
| Tiny Autoencoder |
-| VAE
| Variable Autoencoder |
+| LORA
| Low-Rank Adaptation |
| LSTM
| Long Short-Term Memory |
-| RESNET
| Residual Network |
-| CNN
| Convolutional Neural Network |
+| MOE
| Mixture of Experts |
+| RBM
| Restricted Boltzmann machine |
| RCNN
| Region-based Convolutional Neural Network |
+| RESNET
| Residual Network |
| RNN
| Recurrent Neural Network |
-| BRNN
| Bi-directional Recurrent Neural Network |
-| GAN
| Generative Adversarial Model |
| SSM
| State-Space Model |
-| DETR
| Detection Transformer |
-| VIT
| Vision Transformer |
-| MOE
| Mixture of Experts |
-| AET
| Autoencoding Transformer |
| STST
| Sequence-to-Sequence Transformer |
-| ART
| Autoregressive Transformer |
-| LORA
| Low-Rank Adaptation |
-| CONTROLNET
| Controlnet |
+| TAE
| Tiny Autoencoder |
| UNCLASSIFIED
| Unknown |
+| VAE
| Variable Autoencoder |
+| VLA
| Vision Language Action |
+| VIT
| Vision Transformer |
--
@@ -196,6 +197,7 @@ MIR is inspired by:
+[](https://github.com/darkshapes/MIR/actions/workflows/mir.yml)


[
](https://discord.gg/VVn9Ku74Dk)
diff --git a/MIR.egg-info/SOURCES.txt b/MIR.egg-info/SOURCES.txt
index 101d0c4..23176a9 100644
--- a/MIR.egg-info/SOURCES.txt
+++ b/MIR.egg-info/SOURCES.txt
@@ -1,4 +1,5 @@
.gitignore
+CODE_OF_CONDUCT.md
LICENSE
README.md
pyproject.toml
@@ -10,25 +11,6 @@ MIR.egg-info/dependency_links.txt
MIR.egg-info/entry_points.txt
MIR.egg-info/requires.txt
MIR.egg-info/top_level.txt
-docs/index.html
-docs/mir.html
-docs/search.js
-docs/mir/automata.html
-docs/mir/config.html
-docs/mir/doc_parser.html
-docs/mir/indexers.html
-docs/mir/inspect.html
-docs/mir/maid.html
-docs/mir/run.html
-docs/mir/tag.html
-docs/mir/config/constants.html
-docs/mir/config/conversion.html
-docs/mir/config/json_io.html
-docs/mir/inspect/classes.html
-docs/mir/inspect/metadata.html
-docs/mir/inspect/parenting.html
-docs/mir/inspect/pipes.html
-docs/mir/inspect/tasks.html
mir/__init__.py
mir/__main__.py
mir/automata.py
@@ -38,10 +20,10 @@ mir/maid.py
mir/mir.json
mir/tag.py
mir/config/__init__.py
+mir/config/console.py
mir/config/constants.py
mir/config/conversion.py
mir/config/json_io.py
-mir/config/logging.py
mir/inspect/__init__.py
mir/inspect/classes.py
mir/inspect/metadata.py
diff --git a/mir/mir.json b/mir/mir.json
index 2cba92d..59ae13b 100644
--- a/mir/mir.json
+++ b/mir/mir.json
@@ -1,13255 +1,3 @@
{
- "info.dit.allegro": {
- "*": {
- "repo": "rhymes-ai/Allegro",
- "pkg": {
- "0": {
- "diffusers": "AllegroPipeline",
- "precision": "ops.precision.bfloat.B16",
- "generation": {
- "guidance_scale": 7.5,
- "max_sequence_length": 512,
- "num_inference_steps": 100
- }
- }
- },
- "file_256": [
- "6927dcc812841c1da549bf11c97ddf30532aee0e708a6642fa64cf8e0dfcdef7"
- ],
- "layer_b3": [
- "8b20714a6af89ea4bf4ada1f805c5b9d529ef136c229e9b75392242d62d80c3e"
- ],
- "layer_256": [
- "9e44e6c919dc71c24a193641e6265cd9983a2a773b9bbaf527c10ac4837b29fd"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "allegro"
- ],
- "text_encoder": [
- "T5EncoderModel"
- ],
- "vae": [
- "info.vae.kl",
- "allegro"
- ],
- "transformer": [
- "AllegroTransformer3DModel"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ]
- }
- }
- },
- "info.dit.amused-512": {
- "*": {
- "repo": "amused/amused-512",
- "pkg": {
- "0": {
- "diffusers": "AmusedInpaintPipeline"
- }
- },
- "pipe_names": {
- "vqvae": [
- "VQModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "amused-512"
- ],
- "text_encoder": [
- "CLIPTextModelWithProjection"
- ],
- "transformer": [
- "UVit2DModel"
- ],
- "scheduler": [
- "ops.scheduler.amused",
- "scheduler"
- ]
- }
- }
- },
- "info.lora.animatediff-motion-adapter-v1-5-2": {
- "*": {
- "repo": "guoyww/animatediff-motion-adapter-v1-5-2",
- "pkg": {
- "0": {
- "diffusers": "AnimateDiffVideoToVideoPipeline"
- }
- }
- }
- },
- "info.lora.animatediff-motion-adapter-sdxl": {
- "*": {
- "repo": "a-r-r-o-w/animatediff-motion-adapter-sdxl-beta",
- "pkg": {
- "0": {
- "diffusers": "AnimateDiffSDXLPipeline"
- }
- }
- }
- },
- "info.controlnet.animatediff-sparsectrl-scribble": {
- "*": {
- "repo": "guoyww/animatediff-sparsectrl-scribble",
- "pkg": {
- "0": {
- "diffusers": "SparseControlNetModel"
- }
- }
- }
- },
- "info.controlnet.animatelcm": {
- "*": {
- "repo": "wangfuyun/AnimateLCM",
- "pkg": {
- "0": {
- "diffusers": "ControlNetModel"
- }
- }
- }
- },
- "info.unet.audioldm-s-v2": {
- "*": {
- "repo": "cvssp/audioldm-s-full-v2",
- "pkg": {
- "0": {
- "diffusers": "AudioLDMPipeline"
- }
- },
- "file_256": [
- "fc30d5b5a3bb8d08672736efb1fff10755ba7024dace39b2dcb579a105aa2a5a"
- ],
- "layer_b3": [
- "82fbcc553c1ad770d28fd1866b935249c5ebfbf75f3166ae823e1bc6ef39a95a"
- ],
- "layer_256": [
- "d076446a58a36bf436e37444679d62bcf2f45689d4aa3d799b3fe801c71ed2c8"
- ],
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "ClapTextModelWithProjection"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "vocoder": [
- "SpeechT5HifiGan"
- ]
- }
- }
- },
- "info.unet.audioldm2": {
- "*": {
- "repo": "cvssp/audioldm2",
- "pkg": {
- "0": {
- "diffusers": "AudioLDM2Pipeline",
- "precision": "ops.precision.float.F16",
- "generation": {
- "num_inference_steps": 200,
- "audio_length_in_s": 10.0
- }
- }
- },
- "file_256": [
- "359a5ffb89a844beb2fcfac584aae2cd7cd6e87c3ab1ec4e892ef45d91db77c2"
- ],
- "layer_b3": [
- "eac241273f9f30982fc04aa88b4dc1c38b533430956a55b9ed4d3e5c717ec962"
- ],
- "layer_256": [
- "ab109d01b43788063802f00c6ecab024c830ea58d668f5c2df9e3ae5b87d86cb"
- ],
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "ClapModel"
- ],
- "text_encoder_2": [
- "T5EncoderModel",
- "VitsModel"
- ],
- "projection_model": [
- "AudioLDM2ProjectionModel"
- ],
- "language_model": [
- "GPT2LMHeadModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "audioldm2"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "audioldm2"
- ],
- "feature_extractor": [
- "ClapFeatureExtractor"
- ],
- "unet": [
- "AudioLDM2UNet2DConditionModel"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "vocoder": [
- "SpeechT5HifiGan"
- ]
- }
- }
- },
- "info.dit.auraflow": {
- "*": {
- "repo": "fal/AuraFlow",
- "pkg": {
- "0": {
- "diffusers": "AuraFlowPipeline"
- }
- },
- "identifiers": [
- [
- 8192,
- 3072
- ],
- "mlpX.c_fc2.weight",
- "joint_transformer_blocks.2.ff_context.linear_2.weight"
- ],
- "file_256": [
- "ce3e475246258b94ee9dcb8b83292cb34edfffc2bbde46c74604d9c6cd7c585c",
- "526be97cf581c89ad87c6b19c1f7c2378851137698f7ec436596d061a382d37b",
- "6a40b011f287452dbca80face78e667055904c5ad97eb2097ade3200259b2203",
- "05e5493018333d947bb5940083dbc2f071093027ff414bc5b1b1229e4836e5cb"
- ],
- "layer_b3": [
- "cc6d383576c35a9709798d2e2b9e3eb31ba8c608040cf3712bc37871cfd14e21",
- "ddd54c44fa28fbddecf7cfae91cfa04917fd2f2fa94fc78c528cef2356a4ec3a",
- "90c694e7d1e20e6da49b571e9954338d384775419790be315304103227b1051b",
- "9e85aec1bdb616f52f88c80ddc7ab1eae8c16c0b5fbfcdb61a71ac02c325003d"
- ],
- "layer_256": [
- "3c13e6a965d03a49227d8b1606ba6a343a23772d8768407cc78d4ddb9102bc80",
- "b356cc84a23bc93bda4cc0fce1d0ba1b8e3d5a521e659ffc72e9e4a2d2c7f204",
- "270df7317fe01abf06333acbbd4f15f8fc7a7c56053219f42efb598454a3af24",
- "7ab6aa4514dd09f3cf589587d51a81734193ce45dd51bda9db0bd62fe48ef7d5"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "auraflow"
- ],
- "text_encoder": [
- "UMT5EncoderModel"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "transformer": [
- "AuraFlowTransformer2DModel"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- },
- "tasks": [
- "AuraFlowPipeline"
- ]
- }
- },
- "info.unet.blipdiffusion": {
- "*": {
- "repo": "Salesforce/blipdiffusion",
- "pkg": {
- "0": {
- "diffusers": "BlipDiffusionPipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "blipdiffusion"
- ],
- "text_encoder": [
- "ContextCLIPTextModel"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "scheduler": [
- "ops.scheduler.pndm",
- "scheduler"
- ],
- "qformer": [
- "Blip2QFormerModel"
- ],
- "image_processor": [
- "BlipImageProcessor"
- ]
- }
- }
- },
- "info.dit.chroma": {
- "*": {
- "repo": "lodestones/Chroma",
- "pkg": {
- "0": {
- "diffusers": "ChromaPipeline"
- },
- "1": {
- "generation": {
- "neg_text": "",
- "num_steps": "28",
- "latent_size": [
- 64,
- 64
- ]
- }
- }
- },
- "file_256": [
- "53adcb3b6b6005758d40e2d8058b044ed4892bc8616efb7a62cc2dd384be07de",
- "2c41e8a9831f3be1eaff2c2ed590abb62e4534e814f7ec58a5fd74ff71dc2036",
- "0a7b2d9699dbd22b3744ee2692900cabcfb731a43dac13729c33807f2bb7c9f6",
- "6ddc9e2bbe3376ab5ee9f10b2d947f127b6bf6f879f06f316a2208bb0da357b8"
- ],
- "layer_b3": [
- "15e227ced8a89c41abaa9cc44f84dfffdf5ead0c626035e5a2dde2bbb0935479"
- ],
- "layer_256": [
- "a4daa6ff6f45ca70c738adb8c19bc3b6f228df931e6bf2a3394463e4dd7ec882"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "T5EncoderModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "chroma"
- ],
- "transformer": [
- "ChromaTransformer2DModel"
- ],
- "image_encoder": [
- "CLIPVisionModelWithProjection"
- ],
- "feature_extractor": [
- "CLIPImageProcessor"
- ]
- },
- "tasks": [
- "ChromaPipeline"
- ]
- },
- "chroma1-hd": {
- "repo": "lodestones/Chroma1-HD",
- "pkg": {
- "0": {
- "generation": {
- "num_inference_steps": 40
- }
- }
- },
- "file_256": [
- "d845553f11e6afe8139c41ca73678f9f03eab2e68d2e1c6f03ae19509a4d546",
- "1b2993a44e63b2250496f69edce643bac2fb79833cf92ba8dd95cbd764d970c7",
- "2dd46f08516246df1f582047cc09268ce4f747357baff05b13148e71519029fc"
- ]
- },
- "chroma1-flash": {
- "repo": "lodestones/Chroma1-Flash",
- "pkg": {
- "0": {
- "diffusers": "ChromaPipeline",
- "generation": {
- "num_inference_steps": 8,
- "guidance_scale": 1.0,
- "num_images_per_prompt": 1
- }
- }
- },
- "file_256": [
- "2c0c7d908d04418a48b453c293237a9826d54472cf0ba76e28697d1309d1021b",
- "c88f6794753ba23e8f6bf8c84cf220daa35a6aa16d54ea0c3e0136f52e5da7e1",
- "c759d67ca3ef50a9a1c242e3291c57f406646f226a95f43f66577996494986db"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "T5EncoderModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "chroma"
- ],
- "transformer": [
- "ChromaTransformer2DModel"
- ],
- "image_encoder": [
- "CLIPVisionModelWithProjection"
- ],
- "feature_extractor": [
- "CLIPImageProcessor"
- ]
- },
- "tasks": [
- "ChromaPipeline"
- ]
- }
- },
- "info.dit.cogvideox": {
- "*": {
- "repo": "zai-org/CogVideoX-2b",
- "pkg": {
- "0": {
- "diffusers": "CogVideoXPipeline",
- "precision": "ops.precision.float.F16",
- "generation": {
- "num_videos_per_prompt": 1,
- "num_inference_steps": 50,
- "num_frames": 49,
- "guidance_scale": 6
- }
- }
- },
- "file_256": [
- "8fbb6a5e67c70885a8ed8e33df144ac61253e45977be5035fa18cfdf77d386c7"
- ],
- "layer_b3": [
- "1db3439649b5362448455fb2ed6ebde0c3b973655a206832731149757ad165bb"
- ],
- "layer_256": [
- "edd6bd51f1236f528ff8d32dc754f0b86cfac901b800642ea497358156dc00bd"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "cogvideox"
- ],
- "text_encoder": [
- "T5EncoderModel"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "transformer": [
- "CogVideoXTransformer3DModel"
- ],
- "scheduler": [
- [
- "ops.scheduler.cogvideoxddim",
- "scheduler"
- ],
- [
- "ops.scheduler.cogvideoxdpm",
- "scheduler"
- ]
- ]
- }
- }
- },
- "info.controlnet.cogvideox-fun-v-pose": {
- "*": {
- "repo": "alibaba-pai/CogVideoX-Fun-V1.1-5b-Pose",
- "pkg": {
- "0": {
- "diffusers": "CogVideoXFunControlPipeline"
- }
- }
- }
- },
- "info.dit.cogvideox-i2v": {
- "*": {
- "repo": "zai-org/CogVideoX-5b-I2V",
- "pkg": {
- "0": {
- "diffusers": "CogVideoXImageToVideoPipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "cogvideox-i2v"
- ],
- "text_encoder": [
- "T5EncoderModel"
- ],
- "vae": [
- "info.vae.cogvideox",
- "cogvideox-i2v"
- ],
- "transformer": [
- "CogVideoXTransformer3DModel"
- ],
- "scheduler": [
- [
- "ops.scheduler.cogvideoxddim",
- "scheduler"
- ],
- [
- "ops.scheduler.cogvideoxdpm",
- "scheduler"
- ]
- ]
- }
- }
- },
- "info.dit.cogview3": {
- "*": {
- "repo": "zai-org/CogView3-Plus-3B",
- "pkg": {
- "0": {
- "diffusers": "CogView3PlusPipeline",
- "precision": "ops.precision.float.F16",
- "generation": {
- "guidance_scale": 7.0,
- "num_images_per_prompt": 1,
- "num_inference_steps": 50,
- "width": 1024,
- "height": 1024
- }
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "cogview3"
- ],
- "text_encoder": [
- "T5EncoderModel"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "transformer": [
- "CogView3PlusTransformer2DModel"
- ],
- "scheduler": [
- [
- "ops.scheduler.cogvideoxddim",
- "scheduler"
- ],
- [
- "ops.scheduler.cogvideoxdpm",
- "scheduler"
- ]
- ]
- },
- "tasks": [
- "CogView3PlusPipeline"
- ]
- }
- },
- "info.dit.cogview4": {
- "*": {
- "repo": "zai-org/CogView4-6B",
- "pkg": {
- "0": {
- "diffusers": "CogView4Pipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "cogview4"
- ],
- "text_encoder": [
- "GlmModel"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "transformer": [
- "CogView4Transformer2DModel"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- },
- "tasks": [
- "CogView4ControlPipeline",
- "CogView4Pipeline"
- ]
- }
- },
- "info.controlnet.cogview4-control": {
- "*": {
- "repo": "zai-org/CogView4-6B-Control",
- "pkg": {
- "0": {
- "diffusers": "CogView4ControlPipeline"
- }
- }
- }
- },
- "info.dit.consisid": {
- "*": {
- "repo": "BestWishYsh/ConsisID-preview",
- "pkg": {
- "0": {
- "diffusers": "ConsisIDPipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "consisid"
- ],
- "text_encoder": [
- "T5EncoderModel"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "transformer": [
- "ConsisIDTransformer3DModel"
- ],
- "scheduler": [
- "ops.scheduler.cogvideoxdpm",
- "scheduler"
- ]
- }
- }
- },
- "info.unet.diffusers-cd-imagenet64-l2": {
- "*": {
- "repo": "openai/diffusers-cd_imagenet64_l2",
- "pkg": {
- "0": {
- "diffusers": "ConsistencyModelPipeline"
- }
- }
- }
- },
- "info.controlnet.sd-controlnet-canny": {
- "*": {
- "repo": "lllyasviel/sd-controlnet-canny",
- "pkg": {
- "0": {
- "diffusers": "ControlNetModel"
- }
- }
- }
- },
- "info.controlnet.blipdiffusion-controlnet": {
- "*": {
- "repo": "Salesforce/blipdiffusion-controlnet",
- "pkg": {
- "0": {
- "diffusers": "BlipDiffusionControlNetPipeline"
- }
- }
- }
- },
- "info.controlnet.control-v11p-sd15-inpaint": {
- "*": {
- "repo": "lllyasviel/control_v11p_sd15_inpaint",
- "pkg": {
- "0": {
- "diffusers": "ControlNetModel"
- }
- }
- }
- },
- "info.controlnet.controlnet-canny-sdxl-1": {
- "*": {
- "repo": "diffusers/controlnet-canny-sdxl-1.0",
- "pkg": {
- "0": {
- "diffusers": "ControlNetModel"
- }
- }
- }
- },
- "info.controlnet.controlnet-depth-sdxl-1": {
- "*": {
- "repo": "diffusers/controlnet-depth-sdxl-1.0-small",
- "pkg": {
- "0": {
- "diffusers": "ControlNetModel"
- }
- }
- }
- },
- "info.controlnet.controlnet-union-sdxl-1": {
- "*": {
- "repo": "xinsir/controlnet-union-sdxl-1.0",
- "pkg": {
- "0": {
- "diffusers": "ControlNetUnionModel"
- }
- }
- }
- },
- "info.controlnet.sd3-controlnet-canny": {
- "*": {
- "repo": "InstantX/SD3-Controlnet-Canny",
- "pkg": {
- "0": {
- "diffusers": "SD3ControlNetModel"
- }
- }
- }
- },
- "info.controlnet.sd3-controlnet-inpainting": {
- "*": {
- "repo": "alimama-creative/SD3-Controlnet-Inpainting",
- "pkg": {
- "0": {
- "diffusers": "SD3ControlNetModel"
- }
- }
- }
- },
- "info.controlnet.testing-conrolnetxs-sd2-canny": {
- "*": {
- "repo": "UmerHA/Testing-ConrolNetXS-SD2.1-canny",
- "pkg": {
- "0": {
- "diffusers": "ControlNetXSAdapter"
- }
- }
- }
- },
- "info.controlnet.testing-conrolnetxs-sdxl-canny": {
- "*": {
- "repo": "UmerHA/Testing-ConrolNetXS-SDXL-canny",
- "pkg": {
- "0": {
- "diffusers": "ControlNetXSAdapter"
- }
- }
- }
- },
- "info.dit.cosmos-predict2-text2image": {
- "*": {
- "repo": "nvidia/Cosmos-Predict2-2B-Text2Image",
- "pkg": {
- "0": {
- "diffusers": "Cosmos2TextToImagePipeline"
- }
- },
- "file_256": [
- "7fbd20dae97cc26a55c7aff3024bc84e554cff8f69966c725a24c8238c5431ec",
- "6d211f1c14cd793156da3a840dd5462ae072046fcd6f1dc64c613a5343bfe896",
- "95a2b32ad31a271eb64d35985c7ea46f1448528af70932eb1f35d57f90c27be2",
- "344e67faf333b7849fa94290c9028bdd5e40eb19700754c833cda0423bc10ad0",
- "ce15ef565cbb9ef414a6f7a396c455d82d5f762d2174493da87fe009c5fee75b",
- "94aa9f2b59330b88e97b6b439e2f206a51c86e6b154fb66d43ed149bfac23cf8",
- "636de5388da249130d51752991a1792b90af31cbf43f021ae07f75756ee2d79a",
- "472c5e4cf5056a1a59085addb5a86d801de39bf5e000d253f206a7f63c710029",
- "663266ace67c22529c3b6bfa0e8bd69f0ba6e683f5f02b8e3da50881057ba142",
- "21a674b314c1364d0dbb3712f5ed702996a7b7403c452835cac22709e01c2f77",
- "3bf2df806c6472e039efc9e8d3181163d7faa7b385e61519b7d17d5e9c993a49",
- "1de35e1603c4c30bc80b132ccea15fc0503369caf68290708f17e679e98cd41f",
- "0738e559bbd71f7351ccba34b2b47362a3f829b92f3dbcffeaf1e44b0d52f42c"
- ],
- "layer_b3": [
- "5a18ba14c41c6601dcc1195ca180ac7744357eb15ace39272788bda1a7151e9b",
- "67cc3eaf7987c89cd7ccff13de6bc03e3eec59d260d44486e2367cd946ce6f20",
- "3c6fefa107742488d2e6856714198a762f2fd35c67edd50d4657eaf4b59c7ca3",
- "4e1f90ee1e8959d334c9b1ea2cc5e58d0b8340e271c35f81c8a5ec26e16d9d76",
- "f8171071e828524fcc2806126ad100a2198e450c82c0864c8fe8b358c5cbbfbd",
- "8126101a0207ecfbd741394fd59f306bcb4c492b2a921e0921c426ca7bd38985",
- "c942c5a85ff7cb602d8ca894f5d180c2224e91f0b62c3a21f6a425f9e0e8554b",
- "c8c500de74da879a547875fe1046f62ab18bdfd09c09eb3da723cbc2319cb4e3",
- "c0ac3f67501004e9e9a55d1658402ad97e42bf8a266edf81f6f3bb835ee476b9",
- "84f5926eb4e11d826815682b076ed7d3bba4c86520859be80aa1ef92c72b26a4",
- "1d4375aab5548708559b0fde150754a2163cd211eb20a5471e17afaeeb26e082",
- "68bd8982f59c60d69c301d16dfb5a60f5d43d66c0b60138d48a22f5ded598e7b",
- "c3e9a10cad7aebf979072092008be6e2815d03d28cbf316c15e8daf22116bd7d"
- ],
- "layer_256": [
- "38f2a75eab667c0cc85f3946a23ca6dc2278438c25a9f93aaaa9f79c3808e180",
- "ee8434a5e9bc6fa07199de2d0c69fb87f7922c31792bafd13f527c9d92fecb0c",
- "2f8382657babb4d0ae4f8e425ae33b21ad71deb6ba457fd6734f05208d52e06a",
- "34b181a8291b571857cdbf67ac0081fea594a2f223bf20bd2fc8b0c889e9602d",
- "d198c412b972e381acfb812304fa98ed0d97a2f072ddc195cd9a1eb83b1d8146",
- "79580a13aff9859e67b0a9f4f8893236cdcfa58c3d43770641aaac8daee55a94",
- "cfd48c7ad71c913fa8768167ed0c2ee8c207311b22b1e5a8761369b5a780e8d6",
- "da91362ad85d4d2e80a2cb7a55e4ae0e52c9eef8b437a95894ce5ab75d36568c",
- "15f84001f5205b6dd8c6f1334cb51c46f6171c7795fb2a557ea16b874f0c71e5",
- "5d29179ad15a15d2561defcdda66f1d1e4d065c1e0738f9cba4db5b68b93d2ea",
- "7ec489d1e461f5fb2af627b68034ca57f19c516aeccbc5d188b3bd27e3353a15",
- "c8dc42fe7b411d746ebdf86286b91cd6893c5f028076b8fe4103f7ea8e1d8833",
- "86df7c095aee01588e961438f322b85ca0100a9e440b8a2b6c724e00f748d8b5"
- ],
- "pipe_names": {
- "text_encoder": [
- "T5EncoderModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "cosmos-predict2-text2image"
- ],
- "transformer": [
- "CosmosTransformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "safety_checker": [
- "CosmosSafetyChecker"
- ]
- }
- }
- },
- "info.dit.cosmos-predict2-video2world": {
- "*": {
- "repo": "nvidia/Cosmos-Predict2-2B-Video2World",
- "pkg": {
- "0": {
- "diffusers": "Cosmos2VideoToWorldPipeline"
- }
- },
- "pipe_names": {
- "text_encoder": [
- "T5EncoderModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "cosmos-predict2-video2world"
- ],
- "transformer": [
- "CosmosTransformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "safety_checker": [
- "CosmosSafetyChecker"
- ]
- }
- }
- },
- "info.dit.cosmos-1-diffusion-text2world": {
- "*": {
- "repo": "nvidia/Cosmos-1.0-Diffusion-7B-Text2World",
- "pkg": {
- "0": {
- "diffusers": "CosmosTextToWorldPipeline"
- }
- },
- "pipe_names": {
- "text_encoder": [
- "T5EncoderModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "cosmos-1-diffusion-text2world"
- ],
- "transformer": [
- "CosmosTransformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "cosmos-1-diffusion-video2world"
- ],
- "scheduler": [
- "ops.scheduler.edmeuler",
- "scheduler"
- ],
- "safety_checker": [
- "CosmosSafetyChecker"
- ]
- }
- }
- },
- "info.dit.cosmos-1-diffusion-video2world": {
- "*": {
- "repo": "nvidia/Cosmos-1.0-Diffusion-7B-Video2World",
- "pkg": {
- "0": {
- "diffusers": "CosmosVideoToWorldPipeline"
- }
- },
- "pipe_names": {
- "text_encoder": [
- "T5EncoderModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "cosmos-1-diffusion-video2world"
- ],
- "transformer": [
- "CosmosTransformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "cosmos-1-diffusion-video2world"
- ],
- "scheduler": [
- "ops.scheduler.edmeuler",
- "scheduler"
- ],
- "safety_checker": [
- "CosmosSafetyChecker"
- ]
- }
- }
- },
- "info.unet.if-i-xl-v1": {
- "*": {
- "repo": "DeepFloyd/IF-I-XL-v1.0",
- "pkg": {
- "0": {
- "diffusers": "IFPipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "if-i-xl-v1"
- ],
- "text_encoder": [
- "T5EncoderModel"
- ],
- "scheduler": [
- "ops.scheduler.ddpm",
- "scheduler"
- ]
- },
- "tasks": [
- "IFImg2ImgPipeline",
- "IFInpaintingPipeline",
- "IFPipeline"
- ]
- }
- },
- "info.dit.easyanimatev5-zh": {
- "diffusers": {
- "repo": "alibaba-pai/EasyAnimateV5.1-7b-zh-diffusers",
- "pkg": {
- "0": {
- "diffusers": "EasyAnimatePipeline"
- }
- },
- "pipe_names": {
- "vae": [
- "info.vae.kl",
- "easyanimatev5-zh"
- ],
- "text_encoder": [
- "Qwen2VLForConditionalGeneration",
- "BertModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "easyanimatev5-zh"
- ],
- "transformer": [
- "EasyAnimateTransformer3DModel"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.controlnet.easyanimatev5-zh-control": {
- "diffusers": {
- "repo": "alibaba-pai/EasyAnimateV5.1-12b-zh-Control-diffusers",
- "pkg": {
- "0": {
- "diffusers": "EasyAnimateControlPipeline"
- }
- }
- }
- },
- "info.dit.easyanimatev5-zh-inp": {
- "diffusers": {
- "repo": "alibaba-pai/EasyAnimateV5.1-12b-zh-InP-diffusers",
- "pkg": {
- "0": {
- "diffusers": "EasyAnimateInpaintPipeline"
- }
- },
- "pipe_names": {
- "vae": [
- "info.vae.kl",
- "easyanimatev5-zh"
- ],
- "text_encoder": [
- "Qwen2VLForConditionalGeneration",
- "BertModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "easyanimatev5-zh-inp"
- ],
- "transformer": [
- "EasyAnimateTransformer3DModel"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.dit.flux1-schnell": {
- "*": {
- "repo": "black-forest-labs/FLUX.1-schnell",
- "pkg": {
- "0": {
- "diffusers": "FluxInpaintPipeline",
- "precision": "ops.precision.bfloat.B16",
- "generation": {
- "height": 1024,
- "width": 1024,
- "guidance_scale": 0.0,
- "num_inference_steps": 4,
- "max_sequence_length": 256
- }
- },
- "1": {
- "mflux": "flux.flux.Flux1",
- "generation": {
- "height": 1024,
- "width": 1024,
- "num_inference_steps": 4
- }
- }
- },
- "identifiers": [
- "double_blocks.12.txt_mod.lin.weight",
- "add_q_proj.weight",
- "single_transformer_blocks.9.norm.linear.weight"
- ],
- "file_256": [
- "9403429e0052277ac2a87ad800adece5481eecefd9ed334e1f348723621d2a0a",
- "9b633dbe87316385c5b1c262bd4b5a01e3d955170661d63dcec8a01e89c0d820"
- ],
- "layer_b3": [
- "c65ba812ce3ce056eb1585673f62fb896afe6ec049faaf00a97bc35c9a398c44",
- "03049273329fc7db2da10de6d3eb27cb03f190e379c0556cc97b3f0f29001d0c",
- "483c4be8ef031c56bc8450d1a3cfbe54445ed317bcd801be5abe89f1d3c48790"
- ],
- "layer_256": [
- "79c07e339865fe9e22c80f723d728c778130acd07a330339c68218b92bb7b3b8",
- "ef5c9cd1ebe6e3be5e8b1347eca0a6f0b138986c71220a7f1c2c14f29d01beed",
- "27bc71eca2d2ff7459165acc12010230911db7709a4f6a5c255befedfa6b1649"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "CLIPTextModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "flux1-schnell"
- ],
- "text_encoder_2": [
- "T5EncoderModel"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "flux1-schnell"
- ],
- "transformer": [
- "FluxTransformer2DModel"
- ],
- "image_encoder": [
- "CLIPVisionModelWithProjection"
- ],
- "feature_extractor": [
- "CLIPImageProcessor"
- ]
- },
- "tasks": [
- "FluxControlImg2ImgPipeline",
- "FluxControlInpaintPipeline",
- "FluxControlNetImg2ImgPipeline",
- "FluxControlNetInpaintPipeline",
- "FluxControlNetPipeline",
- "FluxControlPipeline",
- "FluxImg2ImgPipeline",
- "FluxInpaintPipeline",
- "FluxKontextPipeline",
- "FluxPipeline",
- "Image",
- "Redux",
- "Kontext",
- "Depth",
- "Fill",
- "ConceptAttention",
- "ControlNet",
- "CavTon",
- "IC-Edit"
- ]
- },
- "shuttle-3-aesthetic": {
- "repo": "shuttleai/shuttle-3.1-aesthetic",
- "pkg": {
- "2": {
- "diffusers": "DiffusionPipeline",
- "generation": {
- "guidance_scale": 3.5,
- "num_inference_steps": 4
- }
- }
- },
- "file_256": [
- "176871da1d5d2d511a52ae9b0dd70faa1f5d1b7734b7e33ed6b4bffa52050e0d",
- "4b80d37681eaed07b7f5b3825a392da929d1620933ede7c2749ef3613cc53f42"
- ],
- "layer_256": [
- "e5d95de314cbfc49b79479118a1ac0b90fc95ccd6bb1a5c95803996d6cebf8fe",
- "d299e8ea4a605917ab98a4a7330d4d398b4ae295efbf458eeeceb5ff1bd7959a"
- ],
- "layer_b3": [
- "ff422d1734abf33366e87bbf44267dc6096c5d499e695287c35558174877412e",
- "5ad8034eac6b82d842311437101c52b5d35826ce34994940d9e667e702a0d45c"
- ]
- },
- "shuttle-3-diffusion": {
- "repo": "shuttleai/shuttle-3-diffusion",
- "pkg": {
- "2": {
- "diffusers": "DiffusionPipeline",
- "generation": {
- "guidance_scale": 3.5,
- "num_inference_steps": 4
- }
- }
- },
- "file_256": [
- "a5b04df4072698395387c21e8da0176d03f6557e0c38ff1dd3bf469ebab9d0fd",
- "a91b46de2055b3511ee87523b57862648856e8c00100161d5b520543a7302755",
- "23a77c86189d5934da48bf44bb871cf80ba99177ffd3fd5272cdecb208c8b8be",
- "d3782d5a8f6e82c6676e8e26d54020934ada589d2aceb17fc5ca604b1bd55da8"
- ],
- "layer_256": [
- "14d0e1b573023deb5a4feaddf85ebca10ab2abf3452c433e2e3ae93acb216443",
- "7ce8d449b32a9c959431ade729b513ee7a6457f11e1c13e3ef04dd8db3494621",
- "9c3395f67a3d844483b77f0ddd5e2ea64b61732fa9d9da19845bb8ae574c1f8c"
- ],
- "layer_b3": [
- "4dd3174edf6b680ce9daf3de643e33ae2c4f09a4d5968da61ea48885f3a193c0",
- "9fdf191b2c58b2a6e190396e12314530593dca4f2a2bee389ec5175da5e52af8",
- "ad203ad6a00d8b1315337e34069e7c41016ea407469a536de8ad6807042017fd"
- ]
- },
- "shuttle-jaguar": {
- "repo": "shuttleai/shuttle-jaguar",
- "pkg": {
- "2": {
- "diffusers": "DiffusionPipeline",
- "generation": {
- "guidance_scale": 3.5,
- "num_inference_steps": 4
- }
- }
- },
- "file_256": [
- "dcbc4f2470b177eed12c7d7515c0e7342515a849ebd31a50c8d8d43913d7bd32",
- "26a7aa64c0798a3549e1d767932da0a7fb82b49f8edcbdcde804a20d9ed1478f"
- ],
- "layer_b3": [
- "9906c29933d0c33a6ee8d9712f33fa8bd4b35b46a1c7b565ae48832b757dd980",
- "89c453c4bf99220405687eed984dace4492bdae1b6fb08f3d9629145b1a11672"
- ]
- }
- },
- "info.dit.flux1-dev": {
- "*": {
- "repo": "black-forest-labs/FLUX.1-dev",
- "pkg": {
- "0": {
- "diffusers": "FluxPipeline",
- "precision": "ops.precision.bfloat.B16",
- "generation": {
- "height": 1024,
- "width": 1024,
- "guidance_scale": 3.5,
- "num_inference_steps": 50,
- "max_sequence_length": 512
- }
- },
- "1": {
- "mflux": "flux.flux.Flux1",
- "generation": {
- "height": 1024,
- "width": 1024,
- "gudance": 3.5,
- "num_inference_steps": 25
- }
- }
- },
- "file_256": [
- "f6315581b7cddd450b9aba72b4e9ccf8b6580dc1a6b9538aff43ee26a1a3b6c2",
- "1b2170ac37156d4cf91909eb6834bb8adac84bc1fce8098a29cfb03738df84ad",
- "4610115bb0c89560703c892c59ac2742fa821e60ef5871b33493ba544683abd7",
- "d86a3038eacaa720682cb9b1da3c49fecf8a3ded605af4def6061eaa18903eb8",
- "b7d840eef01c27dfd72ae9143c261355a51bab3b2662263a6cb0059d55347c3d"
- ],
- "layer_b3": [
- "261559c8eaccae558f72621804a9ee188d338e45e2c622a58db709ac190198ba",
- "87f5d565c66e40eb02eb96498243ad81afcbf86192db99a4fc8fff215470320e",
- "e61d10a394902dadca9367467b2245070f651f4553ec4a96192fbba64e820acb"
- ],
- "layer_256": [
- "3db58cf834d2f81abb1e035131956da4c90451074c681d0db10810e55e60c2c4",
- "ddf1a34a06b355ce2bcd0f9beb0713450d9bcdc61a03a6bc37716361735e96f1",
- "ad8763121f98e28bc4a3d5a8b494c1e8f385f14abe92fc0ca5e4ab3191f3a881"
- ],
- "identifiers": [
- "double_blocks.12.txt_mod.lin.weight",
- "add_q_proj.weight",
- "single_transformer_blocks.9.norm.linear.weight"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "CLIPTextModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "flux1-dev"
- ],
- "text_encoder_2": [
- "T5EncoderModel"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "flux1-dev"
- ],
- "transformer": [
- "FluxTransformer2DModel"
- ],
- "image_encoder": [
- "CLIPVisionModelWithProjection"
- ],
- "feature_extractor": [
- "CLIPImageProcessor"
- ]
- },
- "tasks": [
- "FluxControlImg2ImgPipeline",
- "FluxControlInpaintPipeline",
- "FluxControlNetImg2ImgPipeline",
- "FluxControlNetInpaintPipeline",
- "FluxControlNetPipeline",
- "FluxControlPipeline",
- "FluxImg2ImgPipeline",
- "FluxInpaintPipeline",
- "FluxKontextPipeline",
- "FluxPipeline",
- "Image",
- "Redux",
- "Kontext",
- "Depth",
- "Fill",
- "ConceptAttention",
- "ControlNet",
- "CavTon",
- "IC-Edit"
- ]
- },
- "mystic": {
- "repo": "enhanceaiteam/Mystic",
- "pkg": {
- "0": {
- "generation": {
- "num_inference_steps": 16,
- "guidance_scale": 7.5,
- "width": 768,
- "height": 1024
- }
- }
- },
- "file_256": [
- "179d4000e44295f6dfadc0e4ac210146454724d46371b82657200ff9fb5c68a9",
- "48ca85274e3b67f07f70dd84b67725e62395c2f7b188394342716f783ea4c6ac"
- ],
- "layer_256": [
- "3942e6a52dbb0abaf63b031d9c4eda0df47576b51d4c81361978a3dc27b1309e"
- ],
- "layer_b3": [
- "91074aaebe1b5f3b2e7755d3c092af7eb240e92a192360690f1033949d3c8a68"
- ]
- },
- "flux1-lite": {
- "repo": "freepik/flux.1-lite-8b",
- "pkg": {
- "0": {
- "generation": {
- "num_inference_steps": 28
- }
- }
- },
- "file_256": [
- "09e970a7b8d1813ea7cacd48f9a944fd223882b137a8f4f3b61d864cdc20bbec",
- "de90e69945c2f4afcb9b6a057ce48190905c984370fce76b16ba3b97d46e2747"
- ],
- "layer_256": [
- "e1afe2f9b1ca55b3c659293cf3237f6b5571f5c4e826bad025ff0f7b54dc34ee"
- ],
- "layer_b3": [
- "9276fa4805efeb45c08cca32c5b51d490e57a2ce5c15ef476a8e468a509c5cdf"
- ]
- },
- "f-lite": {
- "repo": "freepik/f-lite",
- "pkg": {
- "0": {
- "f_lite": "FLitePipeline",
- "generation": {
- "num_inference_steps": 28
- }
- }
- }
- },
- "f-lite-texture": {
- "repo": "freepik/f-lite-texture",
- "pkg": {
- "0": {
- "f_lite": "FLitePipeline",
- "generation": {
- "num_inference_steps": 28
- }
- }
- }
- },
- "flux": {
- "repo": "TencentARC/flux-mini",
- "file_256": [
- "4236455adeaeb4ed444d63b253ec99805022d17e962ed7261ada9c72ce11cfee"
- ],
- "layer_256": [
- "e4a0d8cf2034da094518ab058da1d4aea14e00d132c6152a266ec196ffef02d0"
- ],
- "layer_b3": [
- "c1a6f83585398fe452d20596a79a522e2986f4c2c01a40e7bfd787af113735d3"
- ]
- },
- "flex2": {
- "repo": "ostris/Flex.2-preview",
- "file_256": [
- "0407108e446a4f57efffc5e7518bc374876af970d3c6068dc4074de0d221c615",
- "df168ba94d5f96c478b24604a6beedff6189047152190509c73c162ea0d8ec02"
- ],
- "layer_256": [
- "5063de856be5365807d12b47ef6919b4ac611a72651739b2b4050e113bed7a83"
- ],
- "layer_b3": [
- "7f85cdc186896da6965b57d5edb672f08663075d2b207f0e20e328c4034a8076"
- ]
- },
- "flex1-alpha": {
- "repo": "ostris/Flex.1-alpha",
- "file_256": [
- "5d6dce30a266ccbf530c3a3bf253cd5486720a8fb71cdeed556c28304201dc2f",
- "7acf8771b80a91eaa21566abe8c7d9d3ba33d8688e6e98446827749aee7ca1ee"
- ],
- "layer_256": [
- "a6b9af6efc25fa77cd24046b81ee66fea09a9987d2a8e56ffca9b7a1c9c9c519"
- ],
- "layer_b3": [
- "cb3d3edafd81651eefd62894b3572deb02c5304f4b5d4f7ab8654f1fb922ecd6"
- ]
- }
- },
- "info.controlnet.flux1-canny-dev": {
- "*": {
- "repo": "black-forest-labs/FLUX.1-Canny-dev",
- "pkg": {
- "0": {
- "diffusers": "FluxControlPipeline"
- }
- }
- }
- },
- "info.controlnet.flux1-dev-controlnet-canny": {
- "*": {
- "repo": "InstantX/FLUX.1-dev-controlnet-canny",
- "pkg": {
- "0": {
- "diffusers": "FluxControlNetModel"
- }
- }
- }
- },
- "info.controlnet.flux1-dev-controlnet-canny-alpha": {
- "*": {
- "repo": "InstantX/FLUX.1-dev-Controlnet-Canny-alpha",
- "pkg": {
- "0": {
- "diffusers": "FluxControlNetModel"
- }
- }
- }
- },
- "info.dit.flux1-fill-dev": {
- "*": {
- "repo": "black-forest-labs/FLUX.1-Fill-dev",
- "pkg": {
- "0": {
- "diffusers": "FluxFillPipeline"
- }
- },
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "CLIPTextModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "flux1-fill-dev"
- ],
- "text_encoder_2": [
- "T5EncoderModel"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "flux1-fill-dev"
- ],
- "transformer": [
- "FluxTransformer2DModel"
- ]
- },
- "tasks": [
- "FluxControlImg2ImgPipeline",
- "FluxControlInpaintPipeline",
- "FluxControlNetImg2ImgPipeline",
- "FluxControlNetInpaintPipeline",
- "FluxControlNetPipeline",
- "FluxControlPipeline",
- "FluxImg2ImgPipeline",
- "FluxInpaintPipeline",
- "FluxKontextPipeline",
- "FluxPipeline"
- ]
- }
- },
- "info.dit.flux1-kontext-dev": {
- "*": {
- "repo": "black-forest-labs/FLUX.1-Kontext-dev",
- "pkg": {
- "0": {
- "diffusers": "FluxKontextInpaintPipeline"
- }
- },
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "CLIPTextModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "flux1-kontext-dev"
- ],
- "text_encoder_2": [
- "T5EncoderModel"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "flux1-kontext-dev"
- ],
- "transformer": [
- "FluxTransformer2DModel"
- ],
- "image_encoder": [
- "CLIPVisionModelWithProjection"
- ],
- "feature_extractor": [
- "CLIPImageProcessor"
- ]
- },
- "tasks": [
- "FluxControlImg2ImgPipeline",
- "FluxControlInpaintPipeline",
- "FluxControlNetImg2ImgPipeline",
- "FluxControlNetInpaintPipeline",
- "FluxControlNetPipeline",
- "FluxControlPipeline",
- "FluxImg2ImgPipeline",
- "FluxInpaintPipeline",
- "FluxKontextPipeline",
- "FluxPipeline"
- ]
- }
- },
- "info.dit.hidream-i1": {
- "*": {
- "repo": "HiDream-ai/HiDream-I1-Full",
- "pkg": {
- "0": {
- "diffusers": "HiDreamImagePipeline"
- }
- },
- "file_256": [
- "3cb3f6d77a3fce19b90fa7f66da0cbe997b0785a38a788b559290d3062f6fd26"
- ],
- "layer_b3": [
- "612eb9b2676a3e7b28b10aae045a97a95de2a399fe3801c8f6369589c3a832a6"
- ],
- "layer_256": [
- "78fbfb7fddb9ccbdf91f22b0c3d304cbf0cc7305dbccb216982233849ec727df"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "CLIPTextModelWithProjection"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "hidream-i1"
- ],
- "text_encoder_2": [
- "CLIPTextModelWithProjection"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "hidream-i1"
- ],
- "text_encoder_3": [
- "T5EncoderModel"
- ],
- "tokenizer_3": [
- "info.encoder.tokenizer",
- "hidream-i1"
- ],
- "text_encoder_4": [
- "LlamaForCausalLM"
- ],
- "tokenizer_4": [
- "info.encoder.tokenizer",
- "hidream-i1"
- ],
- "transformer": [
- "HiDreamImageTransformer2DModel"
- ]
- }
- }
- },
- "info.dit.hunyuanvideo": {
- "*": {
- "repo": "hunyuanvideo-community/HunyuanVideo",
- "pkg": {
- "0": {
- "diffusers": "HunyuanVideoPipeline"
- }
- },
- "file_256": [
- "bdb957b35585ea74ae42ca92865a68fa1bf1ebc6c5b7e686a889e5c977dc24c7"
- ],
- "layer_b3": [
- "d31c56b4c9444d4c2f1b10120fe964e0956f6b8c7e7c1e4cc5a1f37406fc49f5"
- ],
- "layer_256": [
- "fe741fdfd163bcb1e0ed81d80f79ac3576dbf6e6740674efadfeff782a48bed4"
- ],
- "pipe_names": {
- "text_encoder": [
- "LlamaModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "hunyuanvideo"
- ],
- "transformer": [
- "HunyuanVideoTransformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "hunyuanvideo-i2v"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "text_encoder_2": [
- "CLIPTextModel"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "hunyuanvideo"
- ]
- }
- }
- },
- "info.dit.hunyuanvideo-i2v": {
- "*": {
- "repo": "hunyuanvideo-community/HunyuanVideo-I2V",
- "pkg": {
- "0": {
- "diffusers": "HunyuanVideoImageToVideoPipeline"
- }
- },
- "pipe_names": {
- "text_encoder": [
- "LlavaForConditionalGeneration"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "hunyuanvideo-i2v"
- ],
- "transformer": [
- "HunyuanVideoTransformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "hunyuanvideo-i2v"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "text_encoder_2": [
- "CLIPTextModel"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "hunyuanvideo-i2v"
- ],
- "image_processor": [
- "CLIPImageProcessor"
- ]
- }
- }
- },
- "info.dit.hunyuandit-v1": {
- "diffusers": {
- "repo": "tencent-hunyuan/hunyuandiT-v1.2-diffusers",
- "pkg": {
- "0": {
- "diffusers": "HunyuanDiTPipeline",
- "precision": "ops.precision.float.F16"
- }
- },
- "identifiers": [
- "extra_embedder",
- "model.blocks",
- "skip_norm.weight"
- ],
- "file_256": [
- "4fb84f84079cda457d171b3c6b15d1be95b5a3e5d9825703951a99ddf92d1787",
- "e01db5e129e8ca1117e9cf473fc5a2b096949f03ab90048aeabbc328de7ec800",
- "8af691cadb78047d55721259355d708e87ddbba1b7845df9377d9a5ae917b45d"
- ],
- "layer_b3": [
- "aead6b61b17ebc77c4c186a4b82c193f11ec267b20d909726422ee9852e2e0b2",
- "885a056b94f6f9844c0660be489844d63bb74cc13316f441d10968fff3dd3120",
- "390d951cbdda6e2cffb690031b60f02921624651534c2effaaa7d68ab476c700"
- ],
- "layer_256": [
- "d4842ce2b7f927203326b25ff4d6738ec9a8b95327f06791c387e4a351ed6ed0",
- "5af943f96f5dc9fecb1e92fe2b1fa17c94dd6947690201f4a5ee1a4a2721a68e",
- "4a1f2b8234fa4336e263842e042d42e8d64d8a4d3941d9c0c78366b50303950c"
- ],
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "BertModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "hunyuandit-v1"
- ],
- "transformer": [
- "HunyuanDiT2DModel"
- ],
- "scheduler": [
- "ops.scheduler.ddpm",
- "scheduler"
- ],
- "safety_checker": [
- "StableDiffusionSafetyChecker"
- ],
- "feature_extractor": [
- "CLIPImageProcessor"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "hunyuandit-v1"
- ]
- },
- "tasks": [
- "HunyuanDiTPipeline"
- ]
- }
- },
- "info.unet.i2vgen-xl": {
- "*": {
- "repo": "ali-vilab/i2vgen-xl",
- "pkg": {
- "0": {
- "diffusers": "I2VGenXLPipeline"
- }
- },
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "CLIPTextModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "i2vgen-xl"
- ],
- "image_encoder": [
- "CLIPVisionModelWithProjection"
- ],
- "feature_extractor": [
- "CLIPImageProcessor"
- ],
- "unet": [
- "I2VGenXLUNet"
- ],
- "scheduler": [
- "ops.scheduler.ddim",
- "scheduler"
- ]
- }
- }
- },
- "info.unet.kandinsky-2-1": {
- "prior": {
- "repo": "kandinsky-community/kandinsky-2-1-prior",
- "pkg": {
- "0": {
- "diffusers": "KandinskyPriorPipeline"
- }
- },
- "pipe_names": {
- "prior": [
- "PriorTransformer"
- ],
- "image_encoder": [
- "CLIPVisionModelWithProjection"
- ],
- "text_encoder": [
- "CLIPTextModelWithProjection"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "kandinsky-2-1"
- ],
- "scheduler": [
- "ops.scheduler.unclip",
- "scheduler"
- ],
- "image_processor": [
- "CLIPImageProcessor"
- ]
- },
- "tasks": [
- "Kandinsky3Img2ImgPipeline",
- "Kandinsky3Pipeline",
- "KandinskyCombinedPipeline",
- "KandinskyImg2ImgCombinedPipeline",
- "KandinskyImg2ImgPipeline",
- "KandinskyInpaintCombinedPipeline",
- "KandinskyInpaintPipeline",
- "KandinskyPipeline",
- "KandinskyV22CombinedPipeline",
- "KandinskyV22Img2ImgCombinedPipeline",
- "KandinskyV22Img2ImgPipeline",
- "KandinskyV22InpaintCombinedPipeline",
- "KandinskyV22InpaintPipeline",
- "KandinskyV22Pipeline"
- ]
- }
- },
- "info.unet.kandinsky-2-2": {
- "prior": {
- "repo": "kandinsky-community/kandinsky-2-2-prior",
- "pkg": {
- "0": {
- "diffusers": "KandinskyPriorPipeline"
- }
- },
- "pipe_names": {
- "prior": [
- "PriorTransformer"
- ],
- "image_encoder": [
- "CLIPVisionModelWithProjection"
- ],
- "text_encoder": [
- "CLIPTextModelWithProjection"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "kandinsky-2-2"
- ],
- "scheduler": [
- "ops.scheduler.unclip",
- "scheduler"
- ],
- "image_processor": [
- "CLIPImageProcessor"
- ]
- },
- "tasks": [
- "Kandinsky3Img2ImgPipeline",
- "Kandinsky3Pipeline",
- "KandinskyCombinedPipeline",
- "KandinskyImg2ImgCombinedPipeline",
- "KandinskyImg2ImgPipeline",
- "KandinskyInpaintCombinedPipeline",
- "KandinskyInpaintPipeline",
- "KandinskyPipeline",
- "KandinskyV22CombinedPipeline",
- "KandinskyV22Img2ImgCombinedPipeline",
- "KandinskyV22Img2ImgPipeline",
- "KandinskyV22InpaintCombinedPipeline",
- "KandinskyV22InpaintPipeline",
- "KandinskyV22Pipeline"
- ]
- }
- },
- "info.unet.kolors": {
- "diffusers": {
- "repo": "Kwai-Kolors/Kolors-diffusers",
- "pkg": {
- "0": {
- "diffusers": "KolorsPipeline",
- "precision": "ops.precision.float.F16",
- "generation": {
- "negative_prompt": "",
- "guidance_scale": 5.0,
- "num_inference_steps": 50,
- "width": 1024,
- "height": 1024
- }
- },
- "1": {
- "diffusers": "DiffusionPipeline"
- }
- },
- "file_256": [
- "425ff1dcbe3a70ac13d3afdd69bd4e3176b0c3260722527c80b210f11d2d966c"
- ],
- "layer_b3": [
- "6eb15506fa38b4cbb26391ab1b6c9ead05f86c711e46583bfbe8fc4421571414"
- ],
- "layer_256": [
- "04e3c17170b8a200481f6941b370fdc5056a00fe5a16956de01790f8a93c0dcd"
- ],
- "identifiers": [
- ".DenseReluDense.wi.weight",
- "encoder_hid_proj.weight"
- ],
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "ChatGLMModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "kolors"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "image_encoder": [
- "CLIPVisionModelWithProjection"
- ],
- "feature_extractor": [
- "CLIPImageProcessor"
- ]
- },
- "tasks": [
- "KolorsImg2ImgPipeline",
- "KolorsPAGPipeline",
- "KolorsPipeline"
- ]
- }
- },
- "info.dit.latte-1": {
- "*": {
- "repo": "maxin-cn/Latte-1",
- "pkg": {
- "0": {
- "diffusers": "LattePipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "latte-1"
- ],
- "text_encoder": [
- "T5EncoderModel"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "transformer": [
- "LatteTransformer3DModel"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ]
- }
- }
- },
- "info.dit.ltx-video": {
- "*": {
- "repo": "Lightricks/LTX-Video",
- "pkg": {
- "0": {
- "diffusers": "LTXImageToVideoPipeline"
- }
- },
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "info.vae.kl",
- "ltx-video"
- ],
- "text_encoder": [
- "T5EncoderModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "ltx-video"
- ],
- "transformer": [
- "LTXVideoTransformer3DModel"
- ]
- }
- }
- },
- "info.dit.ltx-video-09": {
- "*": {
- "repo": "Lightricks/LTX-Video-0.9.5",
- "pkg": {
- "0": {
- "diffusers": "LTXConditionPipeline"
- }
- },
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "info.vae.kl",
- "ltx-video"
- ],
- "text_encoder": [
- "T5EncoderModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "ltx-video-09"
- ],
- "transformer": [
- "LTXVideoTransformer3DModel"
- ]
- }
- }
- },
- "info.dit.lumina-next-sft": {
- "diffusers": {
- "repo": "Alpha-VLLM/Lumina-Next-SFT-diffusers",
- "pkg": {
- "0": {
- "diffusers": "LuminaPipeline",
- "precision": " ops.precision.bfloat.B16"
- }
- },
- "identifiers": [
- "time_caption",
- "feed_forward"
- ],
- "file_256": [
- "371153b7c7b7a64899d4016970c7cc472039f9c9b21ebe073adf0b8525cdf1bd"
- ],
- "layer_b3": [
- "fa134efd6e9672e7de2965e4895fc58879bd0a6c4fdf9165c278f2748254675f",
- "4d960ec35c53f72f065b94b836bcd923ea6074d38ad49881061f315d62e3c839"
- ],
- "layer_256": [
- "3938a85568d9df186923edf04391d79e89e6199123bc175afb520e0948d1ae05",
- "c0ca51fdea051fcd042bf4b56d32e1e8bb9525a921f2e197f370f101e90527f0"
- ],
- "pipe_names": {
- "transformer": [
- "LuminaNextDiT2DModel"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "GemmaPreTrainedModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "lumina-next-sft"
- ]
- },
- "tasks": [
- "Lumina2Pipeline",
- "LuminaPipeline"
- ]
- }
- },
- "info.dit.lumina-image-2": {
- "*": {
- "repo": "Alpha-VLLM/Lumina-Image-2.0",
- "pkg": {
- "0": {
- "diffusers": "Lumina2Pipeline"
- }
- },
- "file_256": [
- "132b4d213fdd3cfc14333746fc3eb8bbe6358cd73c3bc95ac4ccec230b97dca3",
- "a7c09ebae62996a8289782161338a3cdba58c11d2d849c50b2d6502e152b0d6d"
- ],
- "layer_b3": [
- "198bde52f09736f1fc650dcdbd0e6b0f6a5ce186582554c1d9ee8ab16ac0feb2",
- "b52807536902cabbf84f99e4fa2f8713fb4ef77e739f06367ee0d486e3222faa"
- ],
- "layer_256": [
- "982893c99860aac8198c2e435cf85f782fce8f10732daf1f2881a26864400a4e",
- "dc937b59892604f5a86ac96936cd7ff09e25f18ae6b758e8014a24c7fa039e91"
- ],
- "pipe_names": {
- "transformer": [
- "Lumina2Transformer2DModel"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "Gemma2PreTrainedModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "lumina-image-2"
- ]
- },
- "tasks": [
- "Lumina2Pipeline"
- ]
- },
- "illustrious-lumina-v3": {
- "repo": "OnomaAIResearch/Illustrious-Lumina-v0.03",
- "file_256": [
- "dc6cffcfb0ccfca6332ddb5d2fe25bcb5f496f44b481627f48c42626156fa6a8",
- "2ac549741fa1c6de2d6cd8be06abcdce52d472eeae2439f948e285258b66a214"
- ],
- "layer_256": [
- "39086c199b9ac296dcba53461ba1e113906d91fbc1b12556d92f5cc77ca11f9f",
- "e51ba2ded40f1af5ca6f78c46eed8305fbd87cd6401e9d439837e10d35cc5828"
- ],
- "layer_b3": [
- "a97b4a63e1e7678e8e7154fae55252267bd1f0ba76b03dba622d801644e657ac",
- "aa6c1b2d1971cea3c4ed0963c8d68d4c50db683f8eab9f77f60ea2d04ed6ce5c"
- ]
- }
- },
- "info.dit.mochi-1": {
- "*": {
- "repo": "genmo/mochi-1-preview",
- "pkg": {
- "0": {
- "diffusers": "MochiPipeline"
- }
- },
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "info.vae.kl",
- "mochi-1"
- ],
- "text_encoder": [
- "T5EncoderModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "mochi-1"
- ],
- "transformer": [
- "MochiTransformer3DModel"
- ]
- }
- }
- },
- "info.unet.musicldm": {
- "*": {
- "repo": "ucsd-reach/musicldm",
- "pkg": {
- "0": {
- "diffusers": "MusicLDMPipeline",
- "generation": {
- "num_inference_steps": 200,
- "audio_length_in_s": 10.0
- }
- }
- },
- "file_256": [
- "853d0ef1d61cbf5d682872322ea8b761ba3d2f85bfbccd58363bd6b2f837268f"
- ],
- "layer_b3": [
- "82fbcc553c1ad770d28fd1866b935249c5ebfbf75f3166ae823e1bc6ef39a95a"
- ],
- "layer_256": [
- "d076446a58a36bf436e37444679d62bcf2f45689d4aa3d799b3fe801c71ed2c8"
- ],
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "ClapTextModelWithProjection",
- "ClapModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "musicldm"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "vocoder": [
- "SpeechT5HifiGan"
- ]
- }
- }
- },
- "info.dit.pixart-xl-2-1024-ms": {
- "*": {
- "repo": "PixArt-alpha/PixArt-XL-2-1024-MS",
- "pkg": {
- "0": {
- "diffusers": "PixArtAlphaPipeline"
- }
- },
- "identifiers": [
- "aspect_ratio",
- "y_embedding",
- "emb.resolution",
- "caption_projection"
- ],
- "file_256": [
- "809a92d52a4a228f381a4b4f4b76051294b73285fb0cbb02f0ad24f9372217a8"
- ],
- "layer_b3": [
- "c5be83545ce9dbc564bcc9fd8fe4157d131347ccfc8f62adc877ec205b20acee"
- ],
- "layer_256": [
- "117225c0e91423746114b23d3e409708ad55c90ff52b21fa7a1c5105d2e935a5"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "pixart-xl-2-1024-ms"
- ],
- "text_encoder": [
- "T5EncoderModel"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "transformer": [
- "PixArtTransformer2DModel"
- ],
- "scheduler": [
- "ops.scheduler.dpm",
- "multistep"
- ]
- },
- "tasks": [
- "PixArtAlphaPipeline"
- ]
- }
- },
- "info.dit.pixart-sigma-xl-2-1024-ms": {
- "*": {
- "repo": "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS",
- "pkg": {
- "0": {
- "diffusers": "PixArtSigmaPipeline"
- }
- },
- "identifiers": [
- "adaln_single",
- "scale_shift_table"
- ],
- "file_256": [
- "c34b520ef473329b945c2a21083cdf1337c5a468d23b3215b65576789bfd0305",
- "2fa4dee9229c02b03163f57bdb8e80c7a5ee364b7161796abe9c05e8dd13f239"
- ],
- "layer_b3": [
- "a199930ff537994872da77391955f0dd52eddd22ab9105388f0c5852f1b8021f",
- "ee6f980c32e98da6885f3e97d3f88d9158031e362cd3a49b20d1e23924b251e3"
- ],
- "layer_256": [
- "e0afd203aff5a1d192e325d0f59361373273d85d138b51768c3f10a75c154dc0",
- "987f3c2ff5d399191e5fd7dd7b1f1f285c197dc8124ad77f05cde7f2fb677a3c"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "pixart-sigma-xl-2-1024-ms"
- ],
- "text_encoder": [
- "T5EncoderModel"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "transformer": [
- "PixArtTransformer2DModel"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ]
- },
- "tasks": [
- "PixArtAlphaPipeline",
- "PixArtSigmaPipeline"
- ]
- }
- },
- "info.dit.qwen-image": {
- "*": {
- "repo": "Qwen/Qwen-Image",
- "pkg": {
- "0": {
- "diffusers": "QwenImageInpaintPipeline"
- }
- },
- "file_256": [
- "9f33a59093af3abcc2836d4cf4b7bd122c238ca70a26c70f34fdde64646b3bcd"
- ],
- "layer_b3": [
- "c87eedda853c12844a8deb3592a90bbcbd4dff2f7a850c28755e4aa171432150"
- ],
- "layer_256": [
- "fda2472d8ef6587a4c979021a2390eeb7c8fc2bcf565330ab8dc6b22f5348ec9"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "info.vae.kl",
- "qwen-image"
- ],
- "text_encoder": [
- "Qwen2_5_VLForConditionalGeneration"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "qwen-image"
- ],
- "transformer": [
- "QwenImageTransformer2DModel"
- ]
- }
- }
- },
- "info.dit.qwen-image-edit": {
- "*": {
- "repo": "Qwen/Qwen-Image-Edit",
- "pkg": {
- "0": {
- "diffusers": "QwenImageEditPipeline"
- }
- },
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "info.vae.kl",
- "qwen-image"
- ],
- "text_encoder": [
- "Qwen2_5_VLForConditionalGeneration"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "qwen-image-edit"
- ],
- "processor": [
- "Qwen2VLProcessor"
- ],
- "transformer": [
- "QwenImageTransformer2DModel"
- ]
- }
- }
- },
- "info.dit.sana-1024px-bf16": {
- "diffusers": {
- "repo": "Efficient-Large-Model/Sana_1600M_1024px_BF16_diffusers",
- "pkg": {
- "0": {
- "diffusers": "SanaPipeline",
- "generation": {
- "height": 1024,
- "width": 1024,
- "guidance_scale": 4.5,
- "num_inference_steps": 20
- },
- "precision": "ops.precision.bfloat.B16"
- }
- },
- "file_256": [
- "b0b50c33be8758713459aa3c760feef6315d4bea31521fb5b8c3e8fdd9841ffe"
- ],
- "layer_b3": [
- "461e3d83dfa7e075ef21e2138ef153922ecfadde3db464b03dff92819f3e86dd"
- ],
- "layer_256": [
- "b928bbcc2ce99d55d21c189e2b1c57498bc313ef5b1457036e356107d567fc4e"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "sana-1024px-bf16"
- ],
- "text_encoder": [
- "Gemma2PreTrainedModel"
- ],
- "vae": [
- "info.vae.dc",
- "sana-1024px-bf16"
- ],
- "transformer": [
- "SanaTransformer2DModel"
- ],
- "scheduler": [
- "ops.scheduler.dpm",
- "multistep"
- ]
- },
- "tasks": [
- "SanaPAGPipeline",
- "SanaPipeline"
- ]
- }
- },
- "info.controlnet.sana-1024px-controlnet": {
- "diffusers": {
- "repo": "ishan24/Sana_600M_1024px_ControlNetPlus_diffusers",
- "pkg": {
- "0": {
- "diffusers": "SanaControlNetPipeline"
- }
- }
- }
- },
- "info.dit.sana-sprint-1024px": {
- "diffusers": {
- "repo": "Efficient-Large-Model/Sana_Sprint_1.6B_1024px_diffusers",
- "pkg": {
- "0": {
- "diffusers": "SanaSprintPipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "sana-sprint-1024px"
- ],
- "text_encoder": [
- "Gemma2PreTrainedModel"
- ],
- "vae": [
- "info.vae.dc",
- "sana-1024px-bf16"
- ],
- "transformer": [
- "SanaTransformer2DModel"
- ],
- "scheduler": [
- "ops.scheduler.dpm",
- "multistep"
- ]
- },
- "tasks": [
- "SanaPAGPipeline",
- "SanaPipeline"
- ]
- }
- },
- "info.unet.shap-e": {
- "*": {
- "repo": "openai/shap-e",
- "pkg": {
- "0": {
- "diffusers": "ShapEPipeline",
- "precision": "ops.precision.float.F16",
- "generation": {
- "num_inference_steps": 64,
- "size": 256,
- "guidance_scale": 15
- }
- }
- },
- "pipe_names": {
- "prior": [
- "PriorTransformer"
- ],
- "text_encoder": [
- "CLIPTextModelWithProjection"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "shap-e"
- ],
- "scheduler": [
- "ops.scheduler.heun",
- "discrete"
- ],
- "shap_e_renderer": [
- "ShapERenderer"
- ]
- }
- }
- },
- "info.dit.skyreels-v2-t2v-720p": {
- "diffusers": {
- "repo": "Skywork/SkyReels-V2-T2V-14B-720P-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "SkyReelsV2Pipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "skyreels-v2-t2v-720p"
- ],
- "text_encoder": [
- "UMT5EncoderModel"
- ],
- "transformer": [
- "SkyReelsV2Transformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.unipc",
- "multistep"
- ]
- }
- }
- },
- "info.dit.skyreels-v2-df-720p": {
- "diffusers": {
- "repo": "Skywork/SkyReels-V2-DF-14B-720P-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "SkyReelsV2DiffusionForcingVideoToVideoPipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "skyreels-v2-df-720p"
- ],
- "text_encoder": [
- "UMT5EncoderModel"
- ],
- "transformer": [
- "SkyReelsV2Transformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.unipc",
- "multistep"
- ]
- }
- }
- },
- "info.dit.skyreels-v2-i2v-720p": {
- "diffusers": {
- "repo": "Skywork/SkyReels-V2-I2V-14B-720P-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "SkyReelsV2ImageToVideoPipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "skyreels-v2-i2v-720p"
- ],
- "text_encoder": [
- "UMT5EncoderModel"
- ],
- "image_encoder": [
- "CLIPVisionModelWithProjection"
- ],
- "image_processor": [
- "CLIPProcessor"
- ],
- "transformer": [
- "SkyReelsV2Transformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.unipc",
- "multistep"
- ]
- }
- }
- },
- "info.dit.stable-audio-open-1": {
- "*": {
- "repo": "stabilityai/stable-audio-open-1.0",
- "pkg": {
- "0": {
- "diffusers": "StableAudioPipeline",
- "precision": "ops.precision.float.F16",
- "generation": {
- "num_inference_steps": 200,
- "audio_end_in_s": 10,
- "num_waveforms_per_prompt": 3
- }
- }
- },
- "pipe_names": {
- "vae": [
- "info.vae.oobleck",
- "stable-audio-open-1"
- ],
- "text_encoder": [
- "T5EncoderModel"
- ],
- "projection_model": [
- "StableAudioProjectionModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "stable-audio-open-1"
- ],
- "transformer": [
- "StableAudioDiTModel"
- ],
- "scheduler": [
- "ops.scheduler.dpm",
- "multistep"
- ]
- }
- }
- },
- "info.unet.stable-cascade": {
- "prior": {
- "repo": "stabilityai/stable-cascade-prior",
- "pkg": {
- "0": {
- "diffusers": "StableCascadePriorPipeline",
- "precision": "ops.precision.bfloat.B16",
- "generation": {
- "negative_prompt": "",
- "num_images_per_prompt": 1,
- "num_inference_steps": 20,
- "guidance_scale": 4.0,
- "width": 1024,
- "height": 1024
- }
- }
- },
- "file_256": [
- "673b3173b037fb5f65b14fde37267390641a36726683de75dcf9df76fce2b866",
- "45c1eb5ce9b69efac891ad459b15c215cd90a986adbbfaf3effd3a89578cbcaf",
- "088ddf1e444abf399007b2da2bac87791df165c69f477994f6b3c745a20904b0",
- "39cec96c7212607f9e526db719bf1df507166d09f4748676c13b0d31cd4adb07",
- "31ffe2f1a3e2351d658fc7d3002a4eca22466a680f7fb3715b1e3768476f9633",
- "dfe24009fc881011f350d08d9d13be13a1a3b3cbfed667435efe0fd419aca099"
- ],
- "layer_b3": [
- "c55c83fa435ed128457f605bf1312e54727996d1c94413fc5ab5b49e9933857c",
- "6fb07ed9fc6ee636e50783802754b3a37bbecfc67037813b616223aeaf6fe877",
- "2ea194240e105c8962923e2baca88cb6a0c826794afc2ef82474301694711d68",
- "3412c8a184805621e4595d57268ced0b5c3c1974cd221bf67b2c908eec4fd61c",
- "53abfb013cfb0e41d0bc7b96bb83e42a4d4c67cb7325f9acf645b02d90efd8fe",
- "34556558f680c183adc2accd493cb9888a98ba853226bbecb07d95eb2055ff4f"
- ],
- "layer_256": [
- "4f5e0a738b963d3d4f8413387a0966ac1ce51f0f985bcbcc124fa221a2fff467",
- "8aa77e732a398b7d0dcd9a35d5682c2b5ab090ae90e915c7c91878abff0284d8",
- "4bbd46ded0916de3108f0da7145a80f5c7acea26ed35b0aaa29af12008352453",
- "415d1f3ecd06416708c1b83ab21e50b39c9d88d19dc33e60b977b7b7061880b9",
- "f678c32815c238e14091f690c8a83c3375c8f7738dc7abff79ff086ed9b59204",
- "17c8da803df7b9bbc8b1d7cc0c44916fea5b5ac0891330c4fdf0326fcd4496cb"
- ],
- "identifiers": [
- "down_blocks.0.2.kv_mapper",
- "previewer",
- "backbone"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "stable-cascade"
- ],
- "text_encoder": [
- "CLIPTextModelWithProjection"
- ],
- "prior": [
- "StableCascadeUNet"
- ],
- "scheduler": [
- "ops.scheduler.ddpmwuerstchen",
- "scheduler"
- ]
- }
- },
- "decoder": {
- "repo": "stabilityai/stable-cascade",
- "pkg": {
- "0": {
- "diffusers": "StableCascadeDecoderPipeline",
- "generation": {
- "negative_prompt": "",
- "guidance_scale": 0.0,
- "output_type": "pil",
- "num_inference_steps": 10
- },
- "precision": "ops.precision.bfloat.B16"
- }
- },
- "file_256": [
- "fe92687deefcfb33bb3ec181254b55fe4e434c5084ce9d38815eaa32487ad376",
- "2c8d58b267678aecfa6705a0a0375c88613065a8a8d32ad3a4c3867f5461cb3a",
- "6c218dc948575e3b14b03dffe2014d7870ac505005770ce3abdc28e920a03c05",
- "a6c3d534a9be308e95d2c3224af94a854bebd9b503f620f1ae3c8e6ba4a341bf",
- "7b431ea7d0f10e72b3eaece353bf6bf2f6bc717b6f4207411be186b40dec1f43"
- ],
- "layer_b3": [
- "9506d989de0226018de214f7ced4670eb5aad4a0c399a9229488ceccdf9a3ceb",
- "6c09dcb83e0cd7ad735eb763c5e3721c579d796853f0b9d31ba74fb13cad4f94",
- "e07025965cee925e31f1d617ea8baa575e7db910d40cc0482fd83df317c0812b",
- "d9a42e4226fb2778aaeaf0d6bda173a4ff95aa574c6d9e27e41542aa469e40a3",
- "8dcd87dc7a9b877e8e2a00abac44c4da9eadf2b8df4ae68f27415bb791381a96"
- ],
- "layer_256": [
- "630ec0f3adf97145316c034139836f9df952060d0237ac4e478c55d9a3a50bc8",
- "80904f707c192ddd06be2cebeb2ebbec3eb0e9c99076d50824d391ef3ac67bf2",
- "8ccedbe1e8cc4093f05b5f8d90e6103e688ae1ac71e0d6261fb17c42ff7c25e4",
- "3524e7fa9ca6f7ef695bc2d3410934eabd5272946a05c8cacd7f329e0bd9f1dd",
- "40499a8f45ae28558ed2fe4fc549a4cb469bd237434b331ccc0b1910310ed733"
- ],
- "identifiers": [
- "0.2.channelwise",
- "clip_mapper.bias",
- ".12.self_attn.k_proj.weight"
- ],
- "pipe_names": {
- "decoder": [
- "StableCascadeUNet"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "stable-cascade"
- ],
- "text_encoder": [
- "CLIPTextModelWithProjection"
- ],
- "scheduler": [
- "ops.scheduler.ddpmwuerstchen",
- "scheduler"
- ],
- "vqgan": [
- "PaellaVQModel"
- ]
- },
- "tasks": [
- "StableCascadeCombinedPipeline",
- "StableCascadeDecoderPipeline"
- ]
- }
- },
- "info.unet.stable-diffusion-v1-5": {
- "*": {
- "repo": "stable-diffusion-v1-5/stable-diffusion-v1-5",
- "pkg": {
- "0": {
- "diffusers": "StableDiffusionPipeline"
- }
- },
- "identifiers": [
- "up_blocks.3.attentions.0.transformer_blocks.0.norm3.weight"
- ],
- "file_256": [
- "6ce0161689b3853acaa03779ec93eafe75a02f4ced659bee03f50797806fa2fa",
- "1a189f0be69d6106a48548e7626207dddd7042a418dbf372cefd05e0cdba61b6",
- "e1441589a6f3c5a53f5f54d0975a18a7feb7cdf0b0dee276dfc3331ae376a053",
- "cc6cb27103417325ff94f52b7a5d2dde45a7515b25c255d8e396c90014281516",
- "19da7aaa4b880e59d56843f1fcb4dd9b599c28a1d9d9af7c1143057c8ffae9f1",
- "cd1b6db09a81cb1d39fbd245a89c1e3db9da9fe8eba5e8f9098ea6c4994221d3",
- "c83908253f9a64d08c25fc90874c9c8aef9a329ce1ca5fb909d73b0c83d1ea21"
- ],
- "layer_b3": [
- "909c6ff3192ab2767e789a6125865bc23163db467ab78b1c633bad46a4293fad",
- "b52807536902cabbf84f99e4fa2f8713fb4ef77e739f06367ee0d486e3222faa",
- "d31382d71a1044b636d80d861a2b4dbca51826bed34d34b5c14608b7679ccefd",
- "5fd8b28013b7e5a64c7c235f0a93d93e48bc19a0e5dde7b646a87b429219643a",
- "731f552f29edcb4f86112cc94d296377f3533a9633ccf83e202d9e1785d94a00",
- "2d2f97574a161cf01a6f6d476b141c7be06f940d94b695ffc12c4e74eca2de1c"
- ],
- "layer_256": [
- "ece771354ad470a82d56eda413ae3dd6c00d2de28ab3c56a88201d08d4424b4b",
- "65b084dada803461ab9ca9be9b892d211870a121dd6c555a111eea470b951c54",
- "dc937b59892604f5a86ac96936cd7ff09e25f18ae6b758e8014a24c7fa039e91",
- "92565dec90f7c8412dc872e820f66cd0c56263bbbc392439645b6fee270f41bb"
- ],
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "CLIPTextModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "stable-diffusion-v1-5"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "safety_checker": [
- "StableDiffusionSafetyChecker"
- ],
- "feature_extractor": [
- "CLIPImageProcessor"
- ],
- "image_encoder": [
- "CLIPVisionModelWithProjection"
- ]
- },
- "tasks": [
- "StableDiffusion3ControlNetInpaintingPipeline",
- "StableDiffusion3ControlNetPipeline",
- "StableDiffusion3Img2ImgPipeline",
- "StableDiffusion3InpaintPipeline",
- "StableDiffusion3PAGImg2ImgPipeline",
- "StableDiffusion3PAGPipeline",
- "StableDiffusion3Pipeline",
- "StableDiffusionControlNetImg2ImgPipeline",
- "StableDiffusionControlNetInpaintPipeline",
- "StableDiffusionControlNetPAGInpaintPipeline",
- "StableDiffusionControlNetPAGPipeline",
- "StableDiffusionControlNetPipeline",
- "StableDiffusionImg2ImgPipeline",
- "StableDiffusionInpaintPipeline",
- "StableDiffusionPAGImg2ImgPipeline",
- "StableDiffusionPAGInpaintPipeline",
- "StableDiffusionPAGPipeline",
- "StableDiffusionPipeline",
- "StableDiffusionXLControlNetImg2ImgPipeline",
- "StableDiffusionXLControlNetInpaintPipeline",
- "StableDiffusionXLControlNetPAGImg2ImgPipeline",
- "StableDiffusionXLControlNetPAGPipeline",
- "StableDiffusionXLControlNetPipeline",
- "StableDiffusionXLControlNetUnionImg2ImgPipeline",
- "StableDiffusionXLControlNetUnionInpaintPipeline",
- "StableDiffusionXLControlNetUnionPipeline",
- "StableDiffusionXLImg2ImgPipeline",
- "StableDiffusionXLInpaintPipeline",
- "StableDiffusionXLPAGImg2ImgPipeline",
- "StableDiffusionXLPAGInpaintPipeline",
- "StableDiffusionXLPAGPipeline",
- "StableDiffusionXLPipeline"
- ]
- }
- },
- "info.unet.stable-unclip-2-1-l": {
- "*": {
- "repo": "fusing/stable-unclip-2-1-l",
- "pkg": {
- "0": {
- "diffusers": "StableUnCLIPPipeline"
- }
- },
- "pipe_names": {
- "prior_tokenizer": [
- "info.encoder.tokenizer",
- "stable-unclip-2-1-l"
- ],
- "prior_text_encoder": [
- "CLIPTextModelWithProjection"
- ],
- "prior": [
- "PriorTransformer"
- ],
- "prior_scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "image_normalizer": [
- "StableUnCLIPImageNormalizer"
- ],
- "image_noising_scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "stable-unclip-2-1-l"
- ],
- "text_encoder": [
- "CLIPTextModel"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "vae": [
- "AutoencoderKL"
- ]
- },
- "tasks": [
- "StableDiffusion3ControlNetInpaintingPipeline",
- "StableDiffusion3ControlNetPipeline",
- "StableDiffusion3Img2ImgPipeline",
- "StableDiffusion3InpaintPipeline",
- "StableDiffusion3PAGImg2ImgPipeline",
- "StableDiffusion3PAGPipeline",
- "StableDiffusion3Pipeline",
- "StableDiffusionControlNetImg2ImgPipeline",
- "StableDiffusionControlNetInpaintPipeline",
- "StableDiffusionControlNetPAGInpaintPipeline",
- "StableDiffusionControlNetPAGPipeline",
- "StableDiffusionControlNetPipeline",
- "StableDiffusionImg2ImgPipeline",
- "StableDiffusionInpaintPipeline",
- "StableDiffusionPAGImg2ImgPipeline",
- "StableDiffusionPAGInpaintPipeline",
- "StableDiffusionPAGPipeline",
- "StableDiffusionPipeline",
- "StableDiffusionXLControlNetImg2ImgPipeline",
- "StableDiffusionXLControlNetInpaintPipeline",
- "StableDiffusionXLControlNetPAGImg2ImgPipeline",
- "StableDiffusionXLControlNetPAGPipeline",
- "StableDiffusionXLControlNetPipeline",
- "StableDiffusionXLControlNetUnionImg2ImgPipeline",
- "StableDiffusionXLControlNetUnionInpaintPipeline",
- "StableDiffusionXLControlNetUnionPipeline",
- "StableDiffusionXLImg2ImgPipeline",
- "StableDiffusionXLInpaintPipeline",
- "StableDiffusionXLPAGImg2ImgPipeline",
- "StableDiffusionXLPAGInpaintPipeline",
- "StableDiffusionXLPAGPipeline",
- "StableDiffusionXLPipeline"
- ]
- }
- },
- "info.unet.stable-diffusion-2-1-unclip": {
- "*": {
- "repo": "stabilityai/stable-diffusion-2-1-unclip-small",
- "pkg": {
- "0": {
- "diffusers": "StableUnCLIPImg2ImgPipeline"
- }
- },
- "pipe_names": {
- "feature_extractor": [
- "CLIPImageProcessor"
- ],
- "image_encoder": [
- "CLIPVisionModelWithProjection"
- ],
- "image_normalizer": [
- "StableUnCLIPImageNormalizer"
- ],
- "image_noising_scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "stable-diffusion-2-1-unclip"
- ],
- "text_encoder": [
- "CLIPTextModel"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "vae": [
- "AutoencoderKL"
- ]
- },
- "tasks": [
- "StableDiffusion3ControlNetInpaintingPipeline",
- "StableDiffusion3ControlNetPipeline",
- "StableDiffusion3Img2ImgPipeline",
- "StableDiffusion3InpaintPipeline",
- "StableDiffusion3PAGImg2ImgPipeline",
- "StableDiffusion3PAGPipeline",
- "StableDiffusion3Pipeline",
- "StableDiffusionControlNetImg2ImgPipeline",
- "StableDiffusionControlNetInpaintPipeline",
- "StableDiffusionControlNetPAGInpaintPipeline",
- "StableDiffusionControlNetPAGPipeline",
- "StableDiffusionControlNetPipeline",
- "StableDiffusionImg2ImgPipeline",
- "StableDiffusionInpaintPipeline",
- "StableDiffusionPAGImg2ImgPipeline",
- "StableDiffusionPAGInpaintPipeline",
- "StableDiffusionPAGPipeline",
- "StableDiffusionPipeline",
- "StableDiffusionXLControlNetImg2ImgPipeline",
- "StableDiffusionXLControlNetInpaintPipeline",
- "StableDiffusionXLControlNetPAGImg2ImgPipeline",
- "StableDiffusionXLControlNetPAGPipeline",
- "StableDiffusionXLControlNetPipeline",
- "StableDiffusionXLControlNetUnionImg2ImgPipeline",
- "StableDiffusionXLControlNetUnionInpaintPipeline",
- "StableDiffusionXLControlNetUnionPipeline",
- "StableDiffusionXLImg2ImgPipeline",
- "StableDiffusionXLInpaintPipeline",
- "StableDiffusionXLPAGImg2ImgPipeline",
- "StableDiffusionXLPAGInpaintPipeline",
- "StableDiffusionXLPAGPipeline",
- "StableDiffusionXLPipeline"
- ]
- }
- },
- "info.dit.stable-diffusion-3": {
- "*": {
- "repo": "stabilityai/stable-diffusion-3.5-medium",
- "pkg": {
- "0": {
- "diffusers": "StableDiffusion3Pipeline",
- "precision": "ops.precision.float.F16"
- }
- },
- "identifiers": [
- "model.diffusion_model.joint_blocks.",
- "transformer_blocks.21.norm1_context.linear.weight",
- "transformer_blocks.31.norm1_context.linear.weight",
- "blocks.11.ff.net.2.weight"
- ],
- "file_256": [
- "ffef7a279d9134626e6ce0d494fba84fc1c7e720b3c7df2d19a09dc3796d8f93",
- "11fe06e22364b823dfeedc275912336b932b32a293a0b2f35ffac071990cc4de"
- ],
- "layer_b3": [
- "e411016545785046810b29cc3999f40bc6392be134a1318386c6f1c48f98726a",
- "a81e07ee67bc627e8b3c5e292ec1ca239009517a2106e8249d670ced0a88f746"
- ],
- "layer_256": [
- "13c982a6dc82d21c9f459e837d8c6f6d4696fd6e7e7b5783bdd2250b1f4fec61",
- "6ee79050373337bf63ac20916596df778bb22022bb38af986128a7459eda1463"
- ],
- "pipe_names": {
- "transformer": [
- "SD3Transformer2DModel"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "info.vae.tae",
- "stable-diffusion-3"
- ],
- "text_encoder": [
- "CLIPTextModelWithProjection"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "stable-diffusion-3"
- ],
- "text_encoder_2": [
- "CLIPTextModelWithProjection"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "stable-diffusion-3"
- ],
- "text_encoder_3": [
- "T5EncoderModel"
- ],
- "tokenizer_3": [
- "info.encoder.tokenizer",
- "stable-diffusion-3"
- ],
- "image_encoder": [
- "SiglipVisionModel"
- ],
- "feature_extractor": [
- "SiglipImageProcessor"
- ]
- },
- "tasks": [
- "StableDiffusion3ControlNetInpaintingPipeline",
- "StableDiffusion3ControlNetPipeline",
- "StableDiffusion3Img2ImgPipeline",
- "StableDiffusion3InpaintPipeline",
- "StableDiffusion3PAGImg2ImgPipeline",
- "StableDiffusion3PAGPipeline",
- "StableDiffusion3Pipeline"
- ]
- },
- "stable-diffusion-3-turbo": {
- "repo": "tensorart/stable-diffusion-3.5-medium-turbo",
- "pkg": {
- "0": {
- "precision": "ops.precision.bfloat.B16",
- "generation": {
- "num_inference_steps": 8,
- "guidance_scale": 1.5,
- "height": 1024,
- "width": 768
- }
- }
- },
- "file_256": [
- "5b0530e8d71b49fa1358f1208047cd789a40bae5b44406c9524b0f0d88f8b246",
- "07119c77c3548a1d9eb30923df4dd55ec74914dc5ec81626804dcbe51ce17a5d",
- "3c379381344d2a2b3ee3d7a1bc97f7d1e58fa95c6b5187fb48b3ce446f99f17b",
- "6b3806cafdb4303ea2638e9e08eb186067b4a46a95ddf344ccdbe56537afaf6e"
- ],
- "layer_256": [
- "3c324055a1ec6eb4ee0242e344bb2b6356afcbd2e215fdd9d160cda691a72fae",
- "7284d2027523482af9ef47405667ca891cc518bfb6ebf1f1d4666cb0accc8cd5",
- "d938ee5738c73f701760ed18acad274b074d2796123aee3f2eee1328b6c36ea4",
- "c4c40056c2a77959083b5a69a1a4b205caa463ccabde057352c5c4e38b2c67b6"
- ],
- "layer_b3": [
- "873821614080a98e1ebfe56673bc96c2ac57379720d4ad2f97e4bca317571d48",
- "7284d2027523482af9ef47405667ca891cc518bfb6ebf1f1d4666cb0accc8cd5",
- "d938ee5738c73f701760ed18acad274b074d2796123aee3f2eee1328b6c36ea4",
- "c4c40056c2a77959083b5a69a1a4b205caa463ccabde057352c5c4e38b2c67b6"
- ]
- }
- },
- "info.unet.gligen-1-4-inpainting-text-box": {
- "*": {
- "repo": "masterful/gligen-1-4-inpainting-text-box",
- "pkg": {
- "0": {
- "diffusers": "StableDiffusionGLIGENPipeline"
- }
- }
- }
- },
- "info.unet.gligen-inpainting-text-image": {
- "*": {
- "repo": "anhnct/Gligen_Inpainting_Text_Image",
- "pkg": {
- "0": {
- "diffusers": "StableDiffusionGLIGENTextImagePipeline"
- }
- }
- }
- },
- "info.unet.ldm3d-4c": {
- "*": {
- "repo": "Intel/ldm3d-4c",
- "pkg": {
- "0": {
- "diffusers": "StableDiffusionLDM3DPipeline"
- }
- },
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "CLIPTextModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "ldm3d-4c"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "safety_checker": [
- "StableDiffusionSafetyChecker"
- ],
- "feature_extractor": [
- "CLIPImageProcessor"
- ]
- },
- "tasks": [
- "StableDiffusion3ControlNetInpaintingPipeline",
- "StableDiffusion3ControlNetPipeline",
- "StableDiffusion3Img2ImgPipeline",
- "StableDiffusion3InpaintPipeline",
- "StableDiffusion3PAGImg2ImgPipeline",
- "StableDiffusion3PAGPipeline",
- "StableDiffusion3Pipeline",
- "StableDiffusionControlNetImg2ImgPipeline",
- "StableDiffusionControlNetInpaintPipeline",
- "StableDiffusionControlNetPAGInpaintPipeline",
- "StableDiffusionControlNetPAGPipeline",
- "StableDiffusionControlNetPipeline",
- "StableDiffusionImg2ImgPipeline",
- "StableDiffusionInpaintPipeline",
- "StableDiffusionPAGImg2ImgPipeline",
- "StableDiffusionPAGInpaintPipeline",
- "StableDiffusionPAGPipeline",
- "StableDiffusionPipeline",
- "StableDiffusionXLControlNetImg2ImgPipeline",
- "StableDiffusionXLControlNetInpaintPipeline",
- "StableDiffusionXLControlNetPAGImg2ImgPipeline",
- "StableDiffusionXLControlNetPAGPipeline",
- "StableDiffusionXLControlNetPipeline",
- "StableDiffusionXLControlNetUnionImg2ImgPipeline",
- "StableDiffusionXLControlNetUnionInpaintPipeline",
- "StableDiffusionXLControlNetUnionPipeline",
- "StableDiffusionXLImg2ImgPipeline",
- "StableDiffusionXLInpaintPipeline",
- "StableDiffusionXLPAGImg2ImgPipeline",
- "StableDiffusionXLPAGInpaintPipeline",
- "StableDiffusionXLPAGPipeline",
- "StableDiffusionXLPipeline"
- ]
- }
- },
- "info.unet.stable-diffusion-xl-1": {
- "*": {
- "repo": "stabilityai/stable-diffusion-xl-base-1.0",
- "pkg": {
- "0": {
- "diffusers": "StableDiffusionXLPipeline",
- "precision": "ops.precision.float.F16",
- "generation": {
- "denoising_end": 0.8,
- "num_inference_steps": 40,
- "output_type": "latent",
- "safety_checker": false,
- "width": 1024,
- "height": 1024
- }
- },
- "1": {
- "diffusers": "DiffusionPipeline"
- }
- },
- "file_256": [
- "357650fbfb3c7b4d94c1f5fd7664da819ad1ff5a839430484b4ec422d03f710a",
- "83e012a805b84c7ca28e5646747c90a243c65c8ba4f070e2d7ddc9d74661e139",
- "31e35c80fc4829d14f90153f4c74cd59c90b779f6afe05a74cd6120b893f7e5b",
- "6f001c090fb13c0d0f8b0a5916da814712a94400b99471fabe77c1c4a51ecaaf"
- ],
- "layer_256": [
- "62a5ab1b5fdfa4fedb32323841298c6effe1af25be94a8583350b0a7641503ef",
- "34dff8d98898baa0f10e71943e56b588cc114253b0d2f1051f3ce7a8a45fee0b",
- "56b1ccd89b0d6ab658048aa34d659788b6ed663f13ef566f4b11bccef590b9da"
- ],
- "layer_b3": [
- "8be44fa13c1efa60f8bcadaa57f1d718473f9660f03c4f0e65dc037960d8cba1",
- "c9ab95ed1851418b65ef99651c1eb6bbdd2e3b0715e0e435d6d1e56ce310fac3",
- "adfa260098d87616d748e3cf9c10bb2c90ff8890a84abbb2853d4aa69664070b"
- ],
- "identifiers": [
- "logit_scale",
- "conditioner.embedders.0.transformer.text_model.encoder.layers.0.self_attn.k_proj.weight",
- "add_embedding.linear_2.bias"
- ],
- "pipe_names": {
- "vae": [
- "info.vae.eq",
- "info.vae.ms-lc-eq",
- "stable-diffusion-xl-1"
- ],
- "text_encoder": [
- "CLIPTextModel"
- ],
- "text_encoder_2": [
- "CLIPTextModelWithProjection"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "stable-diffusion-xl-1"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "stable-diffusion-xl-1"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "image_encoder": [
- "CLIPVisionModelWithProjection"
- ],
- "feature_extractor": [
- "CLIPImageProcessor"
- ]
- },
- "tasks": [
- "StableDiffusionXLControlNetImg2ImgPipeline",
- "StableDiffusionXLControlNetInpaintPipeline",
- "StableDiffusionXLControlNetPAGImg2ImgPipeline",
- "StableDiffusionXLControlNetPAGPipeline",
- "StableDiffusionXLControlNetPipeline",
- "StableDiffusionXLControlNetUnionImg2ImgPipeline",
- "StableDiffusionXLControlNetUnionInpaintPipeline",
- "StableDiffusionXLControlNetUnionPipeline",
- "StableDiffusionXLImg2ImgPipeline",
- "StableDiffusionXLInpaintPipeline",
- "StableDiffusionXLPAGImg2ImgPipeline",
- "StableDiffusionXLPAGInpaintPipeline",
- "StableDiffusionXLPAGPipeline",
- "StableDiffusionXLPipeline"
- ]
- },
- "pony-diffusion": {
- "file_256": [
- "67ab2fd8ec439a89b3fedb15cc65f54336af163c7eb5e4f2acc98f090a29b0b3"
- ],
- "layer_256": [
- "465425d4420dcf5aa4b4d5b456db11a1fcc7c8f61b2e4a87e2470297c98bb96e"
- ],
- "layer_b3": [
- "bf4c2154daa4ece7292277b210d081f98759e9ed4d5c889564632e3ccc4a1071"
- ]
- },
- "pony-diffusion-turbo": {
- "file_256": [
- "7555ac941f3a767833830ba5cc9a4508a9777cbf97b487b6baf0400ab7000587",
- "9322f9d91b28abf09e4137bc02ec806af23510221a164e71b81778e61cc3b4b2"
- ],
- "layer_256": [
- "7edf51ef09b39c46937a4e4141707c040cd12af0d95299a4d3cd2b7d3fabe035",
- "74e4dbc89d57d61ff7e8af8b0fddcf7466ba233d53ca4ffb7777138991bc3d52"
- ],
- "layer_b3": [
- "1e8f23fcd4be0f00eb52368b91c709fffa8a3b8e21772b92b2e0671eed9117d0",
- "5c8b3f34f9d0a58135cf72fbfe9b5d75b5545a10e3d726478543fa7cc510a8bc"
- ]
- },
- "animagine-xl-4": {
- "repo": "cagliostrolab/animagine-xl-4.0",
- "file_256": [
- "8ece83aa1bed1fb39a2b81f1660f0ce6889218e493c1f2ed55e9f15f59a7e03f",
- "6327eca98bfb6538dd7a4edce22484a1bbc57a8cff6b11d075d40da1afb847ac",
- "1449e5b0b9de87b0f414c5f29cb11ce3b3dc61fa2b320e784c9441720bf7b766",
- "e3c47aedb06418c6c331443cd89f2b3b3b34b7ed2102a3d4c4408a8d35aad6b0"
- ],
- "layer_256": [
- "c21d1c38813e078817122e12866ab39f5aa7f56945dd4a8beee3cae1e0f139e7",
- "b916c162c981155aaf74e93d5314038af6767bb5a129c51ee05a1fb6a206c6ac",
- "ecc6bfc73824a2d7c3b0ca184854a235859f329c83768f017b07a19a535d17b4",
- "97f6ca05de7fbdae7aacb2427a552f924492176c474a23dd252c192e1c0e9d65"
- ],
- "layer_b3": [
- "268ffbb120670b9c4b25158bd474c787740884b7738b48203aa03c4c3f00028f",
- "18fda1a55cad137d62c81d4328f5ece85d88b126261e06b9e14ab68055d5d484",
- "bae9bc8a5c43145bcf92ee3391618d9eaddd689f626991bae202de9cf5f1e70e",
- "d6bc5ccafa2b97c867b13a1e7a8c2c7ad9c4877055a66c71bb773557bc306447"
- ]
- },
- "illustrious-xl-v2": {
- "repo": "OnomaAIResearch/Illustrious-XL-v2.0",
- "file_256": [
- "c2a1a3eaa13d4c107dc7e00c3fe830cab427aa026362740ea094745b3422a331",
- "536863e9f0c13b0ce834e2f8a19ada425ee4f722c0ad3d0051ec7e6adaa8156c",
- "3e15ba00387db678ab4a099f75771c4f5ac67fda9e7100a01d263eaf30145aa9",
- "e3d12d0f76d61aa31d2668a2217e5b642592193f2946842c44d7056ea5469cce",
- "735cf3fefcbdc4f7817f53247e38b836ffd27c7641af6d8daa21d245242cb4bd"
- ],
- "layer_256": [
- "397791b3d77affb7bd35c5ded7377493c6bf456920a41388ba95bd0157109803",
- "b23c02b8519c6777a1f271662f4251a59468c4b3e11184a2d722fa8929b4ea48",
- "a373981494f5508c124a1960bdd096bbc96935fbb54b1218f563206d3892c176",
- "b709df257c40d9d981f686f2880bbe64f43b78805b7213768d659a142a593efd",
- "f1e6b4cab0fce608dca6fa851384e8728202449f16270fbd1f0c4c5ec4946c10"
- ],
- "layer_b3": [
- "93b061baf21d743d592327a61f027d099d8e18da9808a76c7704ad123eba4a29",
- "dc05fed2acbc73cef4c377cfa2a681c5cf6d065b88d8bf70d371bbcce6a223a8",
- "8eb1c30327e5b71b35b9a4513dc5f2cac9f244667393c0eedb10a26aa9991cd8",
- "3dafbe31f6ebaffa3d054e1b37049e1147faa2474ceb6dab7bc3c4cded0c845e",
- "892533778ee14454938f7b50830093f58e12f1e14560a148f71927e4ccff5f5c"
- ]
- },
- "playground-v2---aesthetic": {
- "repo": "playgroundai/playground-v2.5-1024px-aesthetic",
- "pkg": {
- "0": {
- "diffusers": "DiffusionPipeline",
- "precision": "ops.precision.float.F16",
- "generation": {
- "num_inference_steps": 50,
- "guidance_scale": 3
- }
- }
- },
- "file_256": [
- "11b6d7bce65674659cc6b7ea960658436edfd80e566cb240ebd4bfbc3e2076c8",
- "bcaa7dd6780974f000b17b5a6c63e6f867a75c51ffa85c67d6b196882c69b992",
- "956dca99114aaa5c3eb526381309d37ee96737e78ed64c8ae613409f47c3f65a",
- "933778ce76c1fc0ca918b37e1488411b8a99bbd3279c12f527a3ac995a340864",
- "5c7d38880d0940e6795158b7608ccef89217272b1f2a9331c5b0a2adffcd82c4",
- "0411e988479884b1a3ecd184123efe38d051d8d0ef24270585a7d1d57499464a"
- ],
- "layer_256": [
- "adb7be228d4ee6e583c3e5ae4ddb579fef64c3987617ce4d4aff3eb7f8d6a3f7",
- "d4813e9f984aa76cb4ac9bf0972d55442923292d276e97e95cb2f49a57227843",
- "fe2e9edf7e3923a80e64c2552139d8bae926cc3b028ca4773573a6ba60e67c20",
- "bc7021473a04a6de3fe0d0fed600875d852ad1ad9d47c445278f66ce9e8ec7a0fc94481f0c52b21c5ac1fdade8d9c5b210f7239253f86ef21e6198fe393ed60e",
- "a6f31493ceeb51c88c5239188b9078dc64ba66d3fc5958ad48c119115b06120c"
- ],
- "layer_b3": [
- "d55b22740da2d5b98020ad2390cdc0a7ee08cf9e0d98c11957f16cc20c49815b",
- "7e9be9bd9a3aed1ad7207e2f77c98c24c3a75f6adcc9b53514033c6c3365d289",
- "5c6dfcc8d01dfb64723f8f5785caa080e2987859c0a050470bfdbe5312be9efc",
- "703f775c6e48ed5b0eba6e847414f047bcd4adc677dbc1bf221b3ef05b2ac471",
- "72d4ebe4af61f8a7add8fe36b8acd16602894279fb5a744ad50b5b5bac7067b8",
- "acb757b851db12cdf9d4365a45ee0d6e64afa77ac95583bb82711baf7c4125fd"
- ],
- "pipe_names": {}
- },
- "segmind-vega": {
- "repo": "segmind/Segmind-Vega",
- "file_256": [
- "94762e983e5942056be73c5c1d4464b8ffa1ada500b4fef1267550e2447953ce",
- "1ab33e37fbb2566c55cd729e4ab79cc2f99cd9d0a578fabc7a2cf4ee47968be1",
- "8cfa375669b1222d6fecf470f41b2abb370c76a90ab9568964c4bb15b34ec8a2"
- ],
- "layer_256": [
- "029b89ee311110c8f945dbdfc52c1d5daeb1e78c353c38aa3141ec68ce28e7cc",
- "5cdb948e5f3873300679073391d48fc648171f02093d7737d078557ff75762bb",
- "f73afbe43cc76571cb86ebcfced618668a2fb2252b0bc6ba88d6e942bae75741"
- ],
- "layer_b3": [
- "2f353c5e6ed0a2c05af00d014e18e65f69f1ce8c48f8eefbf8ad71b34f940fbf",
- "cc34bd3135d7cafc3cb6e3f6e7cb6896c98277bad52877a952ddbd2ffe222e01",
- "b90efdc848f5386d5250b6fb233ce380cf6cc299f497cfa1d2feaef22f87c9d1"
- ]
- },
- "ssd": {
- "repo": "segmind/SSD-1B",
- "file_256": [
- "7cb406ec0662e91570a79f3c4fb8f0ea5325bffe6af5d9382edae838698f72bd",
- "1895a00bfc769a00b0c0c43a95e433e79e9db8a85402b45a33e8448785bde94d",
- "0bf1ce6b065a6b969ab02dc8e8fa21eb20ee189b10935c49ce68c77a7e432c1c",
- "02ed8ebd0ed55aec686fcf20946d7a1659a31f9f8d9c3798cd254ba6b67434ca",
- "40d8ea9159f3e875278dacc7879442d58c45850cf13c62f5e26681061c51829a"
- ],
- "layer_256": [
- "52267d5d327a2ba92c7a14261a9d081df621b8366819b1bb3a47d130523a813c",
- "b365a3631c6c74532f3a571c84c68e088be35496d35be1e932031713ddd2a2f4",
- "52267d5d327a2ba92c7a14261a9d081df621b8366819b1bb3a47d130523a813c",
- "89f86d9c846495870416b4945b6a46a517f28405e5bab666feb4057f012340be",
- "535b47e9b70da6494878ca6d45af3f2e201b7f17748432911c12232e586855e6"
- ],
- "layer_b3": [
- "c074dc38e8ec836816b91cbcc2ca17f80d6106de8d196d416ef9a27c8837ee45",
- "1d6c0216da57fe98e7ad29e9653566725f5b2a87845fdbdcda257b3be817b5f4",
- "c074dc38e8ec836816b91cbcc2ca17f80d6106de8d196d416ef9a27c8837ee45",
- "89f86d9c846495870416b4945b6a46a517f28405e5bab666feb4057f012340be",
- "535b47e9b70da6494878ca6d45af3f2e201b7f17748432911c12232e586855e6"
- ]
- }
- },
- "info.unet.stable-diffusion-xl-refiner-1": {
- "*": {
- "repo": "stabilityai/stable-diffusion-xl-refiner-1.0",
- "pkg": {
- "0": {
- "diffusers": "StableDiffusionXLImg2ImgPipeline"
- },
- "1": {
- "diffusers": "DiffusionPipeline",
- "generation": {
- "num_inference_steps": 40,
- "denoising_end": 0.8
- }
- }
- },
- "identifiers": [
- "conditioner.embedders.0.model.transformer.resblocks.9.mlp.c_proj.bias"
- ],
- "file_256": [
- "54f9cd2f2daf3aeec0b2708fa3dbc0e84e4f8ddd1ddead42e5bc60c6572c989f",
- "7440042bbdc8a24813002c09b6b69b64dc90fded4472613437b7f55f9b7d9c5f",
- "3ea0376dcf065eaefd27806394a90e310001b1a71d4f1cf1f655e86c0e566ffe"
- ],
- "layer_b3": [
- "6281355dbb37e5769c9460ae0ac75506d89932e2f97b09d9ade32ecf191e75ba",
- "afb0639aae2eb65577c12d4a30cf7c9b3620ae63ba64a8fa632b58608c8a7a2e",
- "669046014b69d98ab0f6fbb59547644436e0275f8b638f467ce2a873c3313683"
- ],
- "layer_256": [
- "bb9eadbfabb52c0d8645783525a3fa70b59e9d7d09d5290d742a303262e793a2",
- "c5adb56fe51343af2c3d493eb9f41515c204bd91eb9f40b983d45f70a1fa3b6d",
- "1f838e39ed6e916258aee6990b72c09b34aa8eb3b5342234a497b8852b3df1c6"
- ],
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "CLIPTextModel"
- ],
- "text_encoder_2": [
- "CLIPTextModelWithProjection"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "stable-diffusion-xl-refiner-1"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "stable-diffusion-xl-refiner-1"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "image_encoder": [
- "CLIPVisionModelWithProjection"
- ],
- "feature_extractor": [
- "CLIPImageProcessor"
- ]
- },
- "tasks": [
- "StableDiffusionXLControlNetImg2ImgPipeline",
- "StableDiffusionXLControlNetInpaintPipeline",
- "StableDiffusionXLControlNetPAGImg2ImgPipeline",
- "StableDiffusionXLControlNetPAGPipeline",
- "StableDiffusionXLControlNetPipeline",
- "StableDiffusionXLControlNetUnionImg2ImgPipeline",
- "StableDiffusionXLControlNetUnionInpaintPipeline",
- "StableDiffusionXLControlNetUnionPipeline",
- "StableDiffusionXLImg2ImgPipeline",
- "StableDiffusionXLInpaintPipeline",
- "StableDiffusionXLPAGImg2ImgPipeline",
- "StableDiffusionXLPAGInpaintPipeline",
- "StableDiffusionXLPAGPipeline",
- "StableDiffusionXLPipeline"
- ]
- }
- },
- "info.unet.sdxl-pix2pix-768": {
- "*": {
- "repo": "diffusers/sdxl-instructpix2pix-768",
- "pkg": {
- "0": {
- "diffusers": "StableDiffusionXLInstructPix2PixPipeline"
- }
- },
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "CLIPTextModel"
- ],
- "text_encoder_2": [
- "CLIPTextModelWithProjection"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "sdxl-pix2pix-768"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "sdxl-pix2pix-768"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ]
- },
- "tasks": [
- "StableDiffusionXLControlNetImg2ImgPipeline",
- "StableDiffusionXLControlNetInpaintPipeline",
- "StableDiffusionXLControlNetPAGImg2ImgPipeline",
- "StableDiffusionXLControlNetPAGPipeline",
- "StableDiffusionXLControlNetPipeline",
- "StableDiffusionXLControlNetUnionImg2ImgPipeline",
- "StableDiffusionXLControlNetUnionInpaintPipeline",
- "StableDiffusionXLControlNetUnionPipeline",
- "StableDiffusionXLImg2ImgPipeline",
- "StableDiffusionXLInpaintPipeline",
- "StableDiffusionXLPAGImg2ImgPipeline",
- "StableDiffusionXLPAGInpaintPipeline",
- "StableDiffusionXLPAGPipeline",
- "StableDiffusionXLPipeline"
- ]
- }
- },
- "info.unet.stable-video-diffusion-img2vid-xt": {
- "*": {
- "repo": "stabilityai/stable-video-diffusion-img2vid-xt",
- "pkg": {
- "0": {
- "diffusers": "StableVideoDiffusionPipeline"
- }
- },
- "pipe_names": {
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "image_encoder": [
- "CLIPVisionModelWithProjection"
- ],
- "unet": [
- "UNetSpatioTemporalConditionModel"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "feature_extractor": [
- "CLIPImageProcessor"
- ]
- }
- }
- },
- "info.dit.visualclozepipeline-384": {
- "*": {
- "repo": "VisualCloze/VisualClozePipeline-384",
- "pkg": {
- "0": {
- "diffusers": "VisualClozeGenerationPipeline"
- }
- },
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "CLIPTextModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "visualclozepipeline-384"
- ],
- "text_encoder_2": [
- "T5EncoderModel"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "visualclozepipeline-384"
- ],
- "transformer": [
- "FluxTransformer2DModel"
- ]
- }
- }
- },
- "info.dit.wan2-t2v": {
- "diffusers": {
- "repo": "Wan-AI/Wan2.1-T2V-14B-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "WanPipeline",
- "precision": "ops.precision.bfloat.B16",
- "generation": {
- "height": 480,
- "width": 832,
- "num_frames": 81,
- "guidance_scale": 5.0
- }
- }
- },
- "file_256": [
- "299e6304544f2783896372fa919e755a8bb9ab8caf898ce08a678dae391e1179",
- "a9278e6e9c82d174e6c67b3c97d8b97fef30af51dcf59160f2fc241f6819f5dc",
- "be531024cd9018cb5b48c40cfbb6a6191645b1c792eb8bf4f8c1c6e10f924dc5",
- "6f999b0d6cb9a72b3d98ac386ed96f57f8cecae13994a69232514ea4974ad5fd",
- "2e39adde59c5e0e90edbb35873126b0d67928b5c11c501e384e976d6dc597cce",
- "2ee88ab18d7ed7691c5b7f8bdc3d0a9815e6efe75499287564830fd209d3cdfb",
- "46c27d3693bf2475990a912e08bf67fc6e6cd5396eab87b5e8dd1fcd3651364a",
- "193535c6450045f718df5f011de6d94d49bd9b13f37ca0412500f050dbbb01a8"
- ],
- "layer_b3": [
- "32266d1c79b518adb9d21837e6a427f6ae55b68cfdd673a7dadb38820fddeb48",
- "3b6989856f4f05368524c1852d8660b73c84cfbe44460af017d7139c2a4641b8",
- "f4d6cee3c112db93b3c9137ad102ec0e79ec7ab68b9bbc59004fbc268ccd5ddb",
- "e627144f41055619eb5407699c46e69ac0d87cf8873721e3e48c9e842656abf8",
- "6c00f3fadedacb841c4b9b4321b94a11ef85a08c9dd9253e5f9ba95856715579",
- "a0c339253c714b05877c8fbab649ed631cf021930978f3696a46f685a07c9092",
- "6435da89a870fd0e88680d31de75b9a40c408a4768eff384ce9b9e99481e8e66"
- ],
- "layer_256": [
- "52493c23c5fc1d087a283bc4eabb151421b7ae09affa12a5bb059d62656c5766",
- "058dedb3d2683a9a5b671c6302690e22722c93f6ed92281d5fa74ab190e632a1",
- "5fbed4b95e7196d3626003ea9e0fbbffd074b4297ca406e01b5b6c5d881a6080",
- "3a2335c8e7a4359c071b50333b5c00eef6f42a1d5206915e2ee99464a8c5eae7",
- "0542780670dd75d4cd9deda123d2e150730646c0a1a8d34582460991498a77a6",
- "e925b8222774905c8fbf10af77811fde7870e563eedcde2c94bd5c727e952d49",
- "3d915854976284347efa7aa0a117c0fc3b415c4208e1a6c94beb4ccb9720743d"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "wan2-t2v"
- ],
- "text_encoder": [
- "UMT5EncoderModel"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.dit.wan2-i2v-480p": {
- "diffusers": {
- "repo": "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "WanImageToVideoPipeline"
- }
- },
- "file_256": [
- "b4602c35fa0519750a42c03e3f296c02d542291e344c4d702522cddbd1711f13",
- "6d7a34b63b70eb608324e546d979167a5e787ac6bca3528e63f54a11572d66aa",
- "b2051cd29d6b2f0c924fa7a3e78a4772f0134d7b059f21590dcce416f4f6cbe8",
- "7664fe075b3c82dcecf89012ad3429eee41ee9f10d476f60bc2d2ae3c4ca986c",
- "8ef7ea5bf9eea636b9b3ebd84c40671b4a18ae2704cb4c8595cb5b25c1d8e8b9",
- "b2de21b99b2e72cb0ff15253b07e926f26e7cf1b7e229efc32f94ad1f1ed9395",
- "0ca75338e7a47ca7cacddb7e626647e65829c497387f718ecb6ea0bae456944a",
- "c058a4ac5363c35d1ab4dd3bdec788c23b267fa42a0d7c68aba599f2f74600c9",
- "27988f6b510eb8d5fdd7485671b54897f8683f2bba7a772c5671be21d3491253"
- ],
- "layer_b3": [
- "4b6c3354c9ee5694e00a78f5658fdf14129f159c3b78a57f82fb18e0f265a83d",
- "c36c783559a40d22504f6c4bfb4f5aae760f3f46bbb3a595be79880935122175",
- "ac62f7d5583fd2e85b738fafaf233e2cde6e2857e04351135bb9ded45f9082ce",
- "215e89e855b5e9456af9aa68bc67567dc2269002aaa6b01d849ffec425fc628d",
- "324b8b6c2d512547a2c31bafa12e20acf313fd3aad587b293334f9f629edeec6"
- ],
- "layer_256": [
- "137881dad8c00063bc8bf05f93067736e419173cd171acc22f77b730db688a19",
- "8c5952fd3d333d3a4b719bf7d8ce6b12d1d2e78caaa7e42d713788cfdcadd244",
- "86c58bc4864c97f394ea6bccb2ecedc4aab7166f5b9bfeb313edfdcb2918164a",
- "cac45f7d8f1a0628cb0738bd308689e439b1cc6206e5f887d60d5b37d30138f2",
- "60e4f71a0961b1346b6f6b5ebe4c8cc93219239c5e13b4c0f1e19e9b8e1324d5"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "wan2-i2v-480p"
- ],
- "text_encoder": [
- "UMT5EncoderModel"
- ],
- "vae": [
- "info.vae.wan",
- "wan2-i2v-480p"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "image_processor": [
- "CLIPImageProcessor"
- ],
- "image_encoder": [
- "CLIPVisionModel"
- ],
- "transformer": [
- "WanTransformer3DModel"
- ],
- "transformer_2": [
- "WanTransformer3DModel"
- ]
- }
- }
- },
- "info.dit.wan21-vace": {
- "diffusers": {
- "repo": "Wan-AI/Wan2.1-VACE-1.3B-diffusers",
- "pkg": {
- "0": {
- "diffusers": "WanVACEPipeline"
- }
- },
- "file_256": [
- "bd8bbb8834a274525ab65cbb063f21aa58973a054bfd1638bfe395504c9d9b99",
- "192804a4e10b5bb0a13f5c224bc4ec9707b3b8cc0def8eea005dbce7c9d6752a",
- "f202a5c59b8a91ada1862c46a038214f1f7f216c61ec8350d25f69b919da4307",
- "654693bf2a93a27cd67c3bcee238bc1d0cbb0dd9a74928ed7155fb21a2a1900a",
- "640ccc0577e6a5d4bb15cd91b11b699ef914fc55f126c5a1c544e152130784f2"
- ],
- "layer_b3": [
- "5357d78799a61cd2d72a8a2824c919d63f718eb3fba624af63689e9c657db032",
- "7ae67b7ccf79d1c3f4531ae138e1eb63d52dd97a66b3fcbe1d68fded8df4d5b1",
- "ee63ecdfb3da6901853a59ec950f3e7c3f6595ac46347a03881a4a9c71425377",
- "82762df3539021d3c0342e0da04137ddbe95ef37ea933cd0a68c09c2c650f2ac"
- ],
- "layer_256": [
- "2684413479030170fb3f08c1069c02957ffc386a59168d23b55d579d5c675269",
- "d527680fa735e5f30ef8852aabf8a49f02a094bc4718f0787c5b85710a13c026",
- "9677492a107b3ed827c7285db3393f5321d451cc6d922a4d0488d2a67e939446",
- "aaef66a4f65ecf852888d160b2122753fe4c6d642b5d41db29e4ce9e6855b5a0"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "wan21-vace"
- ],
- "text_encoder": [
- "UMT5EncoderModel"
- ],
- "transformer": [
- "WanVACETransformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.dit.wan21-t2v": {
- "diffusers": {
- "repo": "Wan-AI/Wan2.1-T2V-1.3B-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "WanVideoToVideoPipeline",
- "precision": "ops.precision.bfloat.B16",
- "generation": {
- "height": 480,
- "width": 832,
- "num_frames": 81,
- "guidance_scale": 5.0
- }
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "wan21-t2v"
- ],
- "text_encoder": [
- "UMT5EncoderModel"
- ],
- "transformer": [
- "WanTransformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.unet.wuerstchen": {
- "prior": {
- "repo": "warp-ai/wuerstchen-prior",
- "pkg": {
- "0": {
- "diffusers": "WuerstchenPriorPipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "wuerstchen"
- ],
- "text_encoder": [
- "CLIPTextModel"
- ],
- "prior": [
- "WuerstchenPrior"
- ],
- "scheduler": [
- "ops.scheduler.ddpmwuerstchen",
- "scheduler"
- ]
- },
- "tasks": [
- "WuerstchenCombinedPipeline",
- "WuerstchenDecoderPipeline"
- ]
- },
- "decoder": {
- "repo": "warp-ai/wuerstchen",
- "pkg": {
- "0": {
- "diffusers": "WuerstchenDecoderPipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "wuerstchen"
- ],
- "text_encoder": [
- "CLIPTextModel"
- ],
- "decoder": [
- "WuerstchenDiffNeXt"
- ],
- "scheduler": [
- "ops.scheduler.ddpmwuerstchen",
- "scheduler"
- ],
- "vqgan": [
- "PaellaVQModel"
- ]
- },
- "tasks": [
- "WuerstchenCombinedPipeline",
- "WuerstchenDecoderPipeline"
- ]
- }
- },
- "info.encoder.tokenizer": {
- "powermoe": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "bert-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- },
- "1": {
- "transformers": "transformers.models.bert.tokenization_bert_fast.BertTokenizerFast"
- }
- }
- },
- "persimmon": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast"
- }
- }
- },
- "wav2vec2-conformer-rel-pos": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer"
- }
- }
- },
- "xclip-patch32": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
- },
- "1": {
- "transformers": "transformers.models.clip.tokenization_clip_fast.CLIPTokenizerFast"
- }
- }
- },
- "vilt-b32-mlm": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- },
- "1": {
- "transformers": "transformers.models.bert.tokenization_bert_fast.BertTokenizerFast"
- }
- }
- },
- "grounding-dino": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- },
- "1": {
- "transformers": "transformers.models.bert.tokenization_bert_fast.BertTokenizerFast"
- }
- }
- },
- "mpnet": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.mpnet.tokenization_mpnet.MPNetTokenizer"
- },
- "1": {
- "transformers": "transformers.models.mpnet.tokenization_mpnet_fast.MPNetTokenizerFast"
- }
- }
- },
- "vit-patch16-224": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
- },
- "1": {
- "transformers": "transformers.models.clip.tokenization_clip_fast.CLIPTokenizerFast"
- }
- }
- },
- "starcoder2": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- },
- "1": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast"
- }
- }
- },
- "glm-4v-thinking": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_fast.PreTrainedTokenizerFast"
- }
- }
- },
- "jamba-v0": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast"
- }
- }
- },
- "lilt-roberta-en": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.layoutlmv3.tokenization_layoutlmv3.LayoutLMv3Tokenizer"
- },
- "1": {
- "transformers": "transformers.models.layoutlmv3.tokenization_layoutlmv3_fast.LayoutLMv3TokenizerFast"
- }
- }
- },
- "aria-chat": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast"
- }
- }
- },
- "helium": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_fast.PreTrainedTokenizerFast"
- }
- }
- },
- "mvp": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.mvp.tokenization_mvp.MvpTokenizer"
- },
- "1": {
- "transformers": "transformers.models.mvp.tokenization_mvp_fast.MvpTokenizerFast"
- }
- }
- },
- "wmt19-en-ru": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.fsmt.tokenization_fsmt.FSMTTokenizer"
- }
- }
- },
- "vip-llava-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast"
- }
- }
- },
- "owlv2-patch16": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
- },
- "1": {
- "transformers": "transformers.models.clip.tokenization_clip_fast.CLIPTokenizerFast"
- }
- }
- },
- "falcon": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_fast.PreTrainedTokenizerFast"
- }
- }
- },
- "llama-3-vision": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast"
- }
- }
- },
- "hf-moshiko": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_fast.PreTrainedTokenizerFast"
- }
- }
- },
- "layoutlmv2-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer"
- },
- "1": {
- "transformers": "transformers.models.layoutlmv2.tokenization_layoutlmv2_fast.LayoutLMv2TokenizerFast"
- }
- }
- },
- "canine-s": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.canine.tokenization_canine.CanineTokenizer"
- }
- }
- },
- "idefics3-llama3": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast"
- }
- }
- },
- "owlvit-patch32": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
- },
- "1": {
- "transformers": "transformers.models.clip.tokenization_clip_fast.CLIPTokenizerFast"
- }
- }
- },
- "phi-3-moe": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast"
- }
- }
- },
- "deberta": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.deberta.tokenization_deberta.DebertaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.deberta.tokenization_deberta_fast.DebertaTokenizerFast"
- }
- }
- },
- "gpt2": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_sw3.tokenization_gpt_sw3.GPTSw3Tokenizer"
- }
- }
- },
- "xlm-mlm-en-2048": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.xlm.tokenization_xlm.XLMTokenizer"
- }
- }
- },
- "llava": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast"
- }
- }
- },
- "udop": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.udop.tokenization_udop.UdopTokenizer"
- },
- "1": {
- "transformers": "transformers.models.udop.tokenization_udop_fast.UdopTokenizerFast"
- }
- }
- },
- "moe-active-shared-experts": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "layoutlm-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.layoutlm.tokenization_layoutlm.LayoutLMTokenizer"
- },
- "1": {
- "transformers": "transformers.models.layoutlm.tokenization_layoutlm_fast.LayoutLMTokenizerFast"
- }
- }
- },
- "align": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- },
- "1": {
- "transformers": "transformers.models.bert.tokenization_bert_fast.BertTokenizerFast"
- }
- }
- },
- "retribert-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.deprecated.retribert.tokenization_retribert.RetriBertTokenizer"
- },
- "1": {
- "transformers": "transformers.models.deprecated.retribert.tokenization_retribert_fast.RetriBertTokenizerFast"
- }
- }
- },
- "ctrl": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.ctrl.tokenization_ctrl.CTRLTokenizer"
- }
- }
- },
- "smollm3": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_fast.PreTrainedTokenizerFast"
- }
- }
- },
- "layoutlmv3": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.layoutlmv3.tokenization_layoutlmv3.LayoutLMv3Tokenizer"
- },
- "1": {
- "transformers": "transformers.models.layoutlmv3.tokenization_layoutlmv3_fast.LayoutLMv3TokenizerFast"
- }
- }
- },
- "openai-gpt": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.openai.tokenization_openai.OpenAIGPTTokenizer"
- },
- "1": {
- "transformers": "transformers.models.openai.tokenization_openai_fast.OpenAIGPTTokenizerFast"
- }
- }
- },
- "blip-flan-t5": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- },
- "1": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast"
- }
- }
- },
- "siglip-patch16-224": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.siglip.tokenization_siglip.SiglipTokenizer"
- }
- }
- },
- "omdet-turbo-swin-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
- },
- "1": {
- "transformers": "transformers.models.clip.tokenization_clip_fast.CLIPTokenizerFast"
- }
- }
- },
- "funnel": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.funnel.tokenization_funnel.FunnelTokenizer"
- },
- "1": {
- "transformers": "transformers.models.funnel.tokenization_funnel_fast.FunnelTokenizerFast"
- }
- }
- },
- "qwen2": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
- },
- "1": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2_fast.Qwen2TokenizerFast"
- }
- }
- },
- "aimv2-patch14-224-lit": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
- },
- "1": {
- "transformers": "transformers.models.clip.tokenization_clip_fast.CLIPTokenizerFast"
- }
- }
- },
- "data2vec-text": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast"
- }
- }
- },
- "efficient-mlm-m0-0": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast"
- }
- }
- },
- "gemma2": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.gemma.tokenization_gemma_fast.GemmaTokenizerFast"
- }
- }
- },
- "yoso-4096": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.albert.tokenization_albert.AlbertTokenizer"
- },
- "1": {
- "transformers": "transformers.models.albert.tokenization_albert_fast.AlbertTokenizerFast"
- }
- }
- },
- "llama-4-scout-16e": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast"
- }
- }
- },
- "olmo-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox_fast.GPTNeoXTokenizerFast"
- }
- }
- },
- "plbart": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.plbart.tokenization_plbart.PLBartTokenizer"
- }
- }
- },
- "mixtral-8x": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast"
- }
- }
- },
- "bitnet-b18-4t": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_fast.PreTrainedTokenizerFast"
- }
- }
- },
- "lxmert-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.lxmert.tokenization_lxmert.LxmertTokenizer"
- },
- "1": {
- "transformers": "transformers.models.lxmert.tokenization_lxmert_fast.LxmertTokenizerFast"
- }
- }
- },
- "chameleon": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast"
- }
- }
- },
- "albert-xx-v2": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.albert.tokenization_albert.AlbertTokenizer"
- },
- "1": {
- "transformers": "transformers.models.albert.tokenization_albert_fast.AlbertTokenizerFast"
- }
- }
- },
- "ernie-3-zh": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- },
- "1": {
- "transformers": "transformers.models.bert.tokenization_bert_fast.BertTokenizerFast"
- }
- }
- },
- "clvp-dev": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clvp.tokenization_clvp.ClvpTokenizer"
- }
- }
- },
- "esm": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.esm.tokenization_esm.EsmTokenizer"
- }
- }
- },
- "rembert": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.rembert.tokenization_rembert.RemBertTokenizer"
- },
- "1": {
- "transformers": "transformers.models.rembert.tokenization_rembert_fast.RemBertTokenizerFast"
- }
- }
- },
- "c4ai-command-r-v01": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.cohere.tokenization_cohere_fast.CohereTokenizerFast"
- }
- }
- },
- "rwkv-4-pile": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox_fast.GPTNeoXTokenizerFast"
- }
- }
- },
- "umt5": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
- },
- "1": {
- "transformers": "transformers.models.t5.tokenization_t5_fast.T5TokenizerFast"
- }
- }
- },
- "gemma": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.gemma.tokenization_gemma_fast.GemmaTokenizerFast"
- }
- }
- },
- "reformer-crime-and-punishment": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.reformer.tokenization_reformer.ReformerTokenizer"
- },
- "1": {
- "transformers": "transformers.models.reformer.tokenization_reformer_fast.ReformerTokenizerFast"
- }
- }
- },
- "wav2vec2-960h": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer"
- }
- }
- },
- "nezha-cn": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- },
- "1": {
- "transformers": "transformers.models.bert.tokenization_bert_fast.BertTokenizerFast"
- }
- }
- },
- "git": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- },
- "1": {
- "transformers": "transformers.models.bert.tokenization_bert_fast.BertTokenizerFast"
- }
- }
- },
- "kosmos-2-patch14-224": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.xlm_roberta.tokenization_xlm_roberta_fast.XLMRobertaTokenizerFast"
- }
- }
- },
- "olmoe-0924": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox_fast.GPTNeoXTokenizerFast"
- }
- }
- },
- "phi-1": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.codegen.tokenization_codegen.CodeGenTokenizer"
- },
- "1": {
- "transformers": "transformers.models.codegen.tokenization_codegen_fast.CodeGenTokenizerFast"
- }
- }
- },
- "codegen-mono": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.codegen.tokenization_codegen.CodeGenTokenizer"
- },
- "1": {
- "transformers": "transformers.models.codegen.tokenization_codegen_fast.CodeGenTokenizerFast"
- }
- }
- },
- "data2vec-audio-960h": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer"
- }
- }
- },
- "musicgen": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
- },
- "1": {
- "transformers": "transformers.models.t5.tokenization_t5_fast.T5TokenizerFast"
- }
- }
- },
- "olmo2-1124-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox_fast.GPTNeoXTokenizerFast"
- }
- }
- },
- "splinter": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.splinter.tokenization_splinter.SplinterTokenizer"
- },
- "1": {
- "transformers": "transformers.models.splinter.tokenization_splinter_fast.SplinterTokenizerFast"
- }
- }
- },
- "luke": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.luke.tokenization_luke.LukeTokenizer"
- }
- }
- },
- "glm-4-chat": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_fast.PreTrainedTokenizerFast"
- }
- }
- },
- "long-t5-local": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
- },
- "1": {
- "transformers": "transformers.models.t5.tokenization_t5_fast.T5TokenizerFast"
- }
- }
- },
- "nemotron-3-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_fast.PreTrainedTokenizerFast"
- }
- }
- },
- "wav2vec2-bert-rel-pos": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer"
- }
- }
- },
- "led-16384": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.led.tokenization_led.LEDTokenizer"
- },
- "1": {
- "transformers": "transformers.models.led.tokenization_led_fast.LEDTokenizerFast"
- }
- }
- },
- "idefics2": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast"
- }
- }
- },
- "squeezebert-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.squeezebert.tokenization_squeezebert.SqueezeBertTokenizer"
- },
- "1": {
- "transformers": "transformers.models.squeezebert.tokenization_squeezebert_fast.SqueezeBertTokenizerFast"
- }
- }
- },
- "mms-tts-eng": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.vits.tokenization_vits.VitsTokenizer"
- }
- }
- },
- "mt5": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.mt5.tokenization_mt5.MT5Tokenizer"
- },
- "1": {
- "transformers": "transformers.models.mt5.tokenization_mt5_fast.MT5TokenizerFast"
- }
- }
- },
- "paligemma": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast"
- }
- }
- },
- "speecht5-asr": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.speecht5.tokenization_speecht5.SpeechT5Tokenizer"
- }
- }
- },
- "conv-bert": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.convbert.tokenization_convbert.ConvBertTokenizer"
- },
- "1": {
- "transformers": "transformers.models.convbert.tokenization_convbert_fast.ConvBertTokenizerFast"
- }
- }
- },
- "mamba2": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox_fast.GPTNeoXTokenizerFast"
- }
- }
- },
- "c4ai-command-r-12-2024": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.cohere.tokenization_cohere_fast.CohereTokenizerFast"
- }
- }
- },
- "xmod": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.xlm_roberta.tokenization_xlm_roberta_fast.XLMRobertaTokenizerFast"
- }
- }
- },
- "bart": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bart.tokenization_bart.BartTokenizer"
- },
- "1": {
- "transformers": "transformers.models.bart.tokenization_bart_fast.BartTokenizerFast"
- }
- }
- },
- "voxtral-2507": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast"
- }
- }
- },
- "pixtral": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_fast.PreTrainedTokenizerFast"
- }
- }
- },
- "whisper": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.whisper.tokenization_whisper.WhisperTokenizer"
- },
- "1": {
- "transformers": "transformers.models.whisper.tokenization_whisper_fast.WhisperTokenizerFast"
- }
- }
- },
- "blip-vqa": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- },
- "1": {
- "transformers": "transformers.models.bert.tokenization_bert_fast.BertTokenizerFast"
- }
- }
- },
- "flaubert-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.flaubert.tokenization_flaubert.FlaubertTokenizer"
- }
- }
- },
- "transfo-xl-wt103": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.deprecated.transfo_xl.tokenization_transfo_xl.TransfoXLTokenizer"
- }
- }
- },
- "moonshine": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_fast.PreTrainedTokenizerFast"
- }
- }
- },
- "tvp": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- },
- "1": {
- "transformers": "transformers.models.bert.tokenization_bert_fast.BertTokenizerFast"
- }
- }
- },
- "biogpt": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.biogpt.tokenization_biogpt.BioGptTokenizer"
- }
- }
- },
- "opt": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- },
- "1": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast"
- }
- }
- },
- "blip2-opt": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- },
- "1": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast"
- }
- }
- },
- "gptsan-japanese": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.deprecated.gptsan_japanese.tokenization_gptsan_japanese.GPTSanJapaneseTokenizer"
- }
- }
- },
- "dpr-question-encoder-single-nq": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.dpr.tokenization_dpr.DPRQuestionEncoderTokenizer"
- },
- "1": {
- "transformers": "transformers.models.dpr.tokenization_dpr_fast.DPRQuestionEncoderTokenizerFast"
- }
- }
- },
- "mbart-cc25": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.mbart.tokenization_mbart.MBartTokenizer"
- },
- "1": {
- "transformers": "transformers.models.mbart.tokenization_mbart_fast.MBartTokenizerFast"
- }
- }
- },
- "idefics": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast"
- }
- }
- },
- "diffllama-handcut": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast"
- }
- }
- },
- "deberta-v2-x": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.deberta_v2.tokenization_deberta_v2.DebertaV2Tokenizer"
- },
- "1": {
- "transformers": "transformers.models.deberta_v2.tokenization_deberta_v2_fast.DebertaV2TokenizerFast"
- }
- }
- },
- "granite": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "internvl3-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
- },
- "1": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2_fast.Qwen2TokenizerFast"
- }
- }
- },
- "fnet": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.fnet.tokenization_fnet.FNetTokenizer"
- },
- "1": {
- "transformers": "transformers.models.fnet.tokenization_fnet_fast.FNetTokenizerFast"
- }
- }
- },
- "blenderbot": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.blenderbot_small.tokenization_blenderbot_small.BlenderbotSmallTokenizer"
- }
- }
- },
- "clipseg-rd64": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
- },
- "1": {
- "transformers": "transformers.models.clip.tokenization_clip_fast.CLIPTokenizerFast"
- }
- }
- },
- "jetmoe": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast"
- }
- }
- },
- "mobilebert-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.mobilebert.tokenization_mobilebert.MobileBertTokenizer"
- },
- "1": {
- "transformers": "transformers.models.mobilebert.tokenization_mobilebert_fast.MobileBertTokenizerFast"
- }
- }
- },
- "groupvit-gcc-yfcc": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
- },
- "1": {
- "transformers": "transformers.models.clip.tokenization_clip_fast.CLIPTokenizerFast"
- }
- }
- },
- "ibert-roberta": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast"
- }
- }
- },
- "nystromformer-512": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.albert.tokenization_albert.AlbertTokenizer"
- },
- "1": {
- "transformers": "transformers.models.albert.tokenization_albert_fast.AlbertTokenizerFast"
- }
- }
- },
- "gpt-j": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- },
- "1": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast"
- }
- }
- },
- "stablelm-4e1t": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox_fast.GPTNeoXTokenizerFast"
- }
- }
- },
- "hubert-ls960": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer"
- }
- }
- },
- "mpt": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox_fast.GPTNeoXTokenizerFast"
- }
- }
- },
- "xlstm": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox_fast.GPTNeoXTokenizerFast"
- }
- }
- },
- "xglm": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.xglm.tokenization_xglm.XGLMTokenizer"
- },
- "1": {
- "transformers": "transformers.models.xglm.tokenization_xglm_fast.XGLMTokenizerFast"
- }
- }
- },
- "afm": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast"
- }
- }
- },
- "clap-htsat-fused": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast"
- }
- }
- },
- "zamba2": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast"
- }
- }
- },
- "xlnet-cased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer"
- },
- "1": {
- "transformers": "transformers.models.xlnet.tokenization_xlnet_fast.XLNetTokenizerFast"
- }
- }
- },
- "nllb-moe": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.nllb.tokenization_nllb.NllbTokenizer"
- },
- "1": {
- "transformers": "transformers.models.nllb.tokenization_nllb_fast.NllbTokenizerFast"
- }
- }
- },
- "jukebox-lyrics": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.deprecated.jukebox.tokenization_jukebox.JukeboxTokenizer"
- }
- }
- },
- "mistral-v0": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast"
- }
- }
- },
- "xlm-roberta-xl": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.xlm_roberta.tokenization_xlm_roberta_fast.XLMRobertaTokenizerFast"
- }
- }
- },
- "t5": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
- },
- "1": {
- "transformers": "transformers.models.t5.tokenization_t5_fast.T5TokenizerFast"
- }
- }
- },
- "mgp-str": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.mgp_str.tokenization_mgp_str.MgpstrTokenizer"
- }
- }
- },
- "opus-mt-en-de": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.marian.tokenization_marian.MarianTokenizer"
- }
- }
- },
- "glm-4-0414": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_fast.PreTrainedTokenizerFast"
- }
- }
- },
- "mega-wikitext": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast"
- }
- }
- },
- "janus": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast"
- }
- }
- },
- "roformer-chinese": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roformer.tokenization_roformer.RoFormerTokenizer"
- },
- "1": {
- "transformers": "transformers.models.roformer.tokenization_roformer_fast.RoFormerTokenizerFast"
- }
- }
- },
- "qwen3": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
- },
- "1": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2_fast.Qwen2TokenizerFast"
- }
- }
- },
- "siglip2-patch16-224": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.gemma.tokenization_gemma_fast.GemmaTokenizerFast"
- }
- }
- },
- "mra-512-4": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast"
- }
- }
- },
- "tapas-finetuned-sqa": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.tapas.tokenization_tapas.TapasTokenizer"
- }
- }
- },
- "zamba-v1": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast"
- }
- }
- },
- "bark": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- },
- "1": {
- "transformers": "transformers.models.bert.tokenization_bert_fast.BertTokenizerFast"
- }
- }
- },
- "roberta": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast"
- }
- }
- },
- "electra-discriminator": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.electra.tokenization_electra.ElectraTokenizer"
- },
- "1": {
- "transformers": "transformers.models.electra.tokenization_electra_fast.ElectraTokenizerFast"
- }
- }
- },
- "language-perceiver": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.perceiver.tokenization_perceiver.PerceiverTokenizer"
- }
- }
- },
- "mm-grounding-dino-o365v1-goldg-v3det": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- },
- "1": {
- "transformers": "transformers.models.bert.tokenization_bert_fast.BertTokenizerFast"
- }
- }
- },
- "camembert": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.camembert.tokenization_camembert.CamembertTokenizer"
- },
- "1": {
- "transformers": "transformers.models.camembert.tokenization_camembert_fast.CamembertTokenizerFast"
- }
- }
- },
- "exaone-4": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- },
- "1": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast"
- }
- }
- },
- "bloom": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bloom.tokenization_bloom_fast.BloomTokenizerFast"
- }
- }
- },
- "bros-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- },
- "1": {
- "transformers": "transformers.models.bert.tokenization_bert_fast.BertTokenizerFast"
- }
- }
- },
- "megatron-bert-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- },
- "1": {
- "transformers": "transformers.models.bert.tokenization_bert_fast.BertTokenizerFast"
- }
- }
- },
- "bert-for-seq-generation-l-24-bbc-encoder": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert_generation.tokenization_bert_generation.BertGenerationTokenizer"
- }
- }
- },
- "modernbert": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_fast.PreTrainedTokenizerFast"
- }
- }
- },
- "max-text-01-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- },
- "1": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast"
- }
- }
- },
- "bridgetower": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast"
- }
- }
- },
- "phi-3": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.llama.tokenization_llama_fast.LlamaTokenizerFast"
- }
- }
- },
- "xlm-roberta": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer"
- },
- "1": {
- "transformers": "transformers.models.xlm_roberta.tokenization_xlm_roberta_fast.XLMRobertaTokenizerFast"
- }
- }
- }
- },
- "info.moe.powermoe": {
- "*": {
- "repo": "ibm-research/PowerMoE-3b",
- "pkg": {
- "0": {
- "transformers": "GraniteMoeModel"
- }
- },
- "tasks": [
- "GraniteMoeForCausalLM",
- "GraniteMoeModel",
- "GraniteMoePreTrainedModel"
- ]
- }
- },
- "info.aet.bert-uncased": {
- "*": {
- "repo": "google-bert/bert-base-uncased",
- "pkg": {
- "0": {
- "transformers": "BertModel"
- }
- },
- "file_256": [
- "c6c6348af2cb4d5852fe51102ce39605903dbe7925c005cf8995506cc21ea914"
- ],
- "layer_b3": [
- "30d7d2cc3ec9e4ba45844e005d0bbcb5887b6a0976042f73da916237dc5c4c12"
- ],
- "layer_256": [
- "94fd2508680ff684eff57e4a5a8ca46bf338fc356a9cf6fe8db2b84543dd7971"
- ],
- "tasks": [
- "BertForMaskedLM",
- "BertForMultipleChoice",
- "BertForNextSentencePrediction",
- "BertForPreTraining",
- "BertForQuestionAnswering",
- "BertForSequenceClassification",
- "BertForTokenClassification",
- "BertLayer",
- "BertLMHeadModel",
- "BertModel",
- "BertPreTrainedModel"
- ]
- }
- },
- "info.art.persimmon": {
- "*": {
- "repo": "adept/persimmon-8b-base",
- "pkg": {
- "0": {
- "transformers": "PersimmonModel"
- }
- },
- "tasks": [
- "PersimmonForCausalLM",
- "PersimmonModel",
- "PersimmonPreTrainedModel",
- "PersimmonForSequenceClassification",
- "PersimmonForTokenClassification"
- ]
- }
- },
- "info.aet.s2t-librispeech-asr": {
- "*": {
- "repo": "facebook/s2t-small-librispeech-asr",
- "pkg": {
- "0": {
- "transformers": "Speech2TextModel"
- }
- },
- "tasks": [
- "Speech2TextForConditionalGeneration",
- "Speech2TextModel",
- "Speech2TextPreTrainedModel"
- ]
- }
- },
- "info.aet.wav2vec2-conformer-rel-pos": {
- "*": {
- "repo": "facebook/wav2vec2-conformer-rel-pos-large",
- "pkg": {
- "0": {
- "transformers": "Wav2Vec2ConformerModel"
- }
- },
- "tasks": [
- "Wav2Vec2ConformerForAudioFrameClassification",
- "Wav2Vec2ConformerForCTC",
- "Wav2Vec2ConformerForPreTraining",
- "Wav2Vec2ConformerForSequenceClassification",
- "Wav2Vec2ConformerForXVector",
- "Wav2Vec2ConformerModel",
- "Wav2Vec2ConformerPreTrainedModel"
- ]
- }
- },
- "info.gan.univnet-dev": {
- "*": {
- "repo": "dg845/univnet-dev",
- "pkg": {
- "0": {
- "transformers": "UnivNetModel"
- }
- },
- "tasks": [
- "UnivNetModel"
- ]
- }
- },
- "info.vit.xclip-patch32": {
- "*": {
- "repo": "microsoft/xclip-base-patch32",
- "pkg": {
- "0": {
- "transformers": "XCLIPModel"
- }
- },
- "tasks": [
- "XCLIPModel",
- "XCLIPPreTrainedModel",
- "XCLIPTextModel",
- "XCLIPVisionModel"
- ]
- }
- },
- "info.art.gemma3-text": {
- "*": {
- "repo": "google/gemma-3-12b-it",
- "pkg": {
- "0": {
- "transformers": "Gemma3TextModel"
- }
- },
- "tasks": [
- "Gemma3PreTrainedModel",
- "Gemma3TextModel",
- "Gemma3ForCausalLM",
- "Gemma3ForConditionalGeneration",
- "Gemma3Model",
- "Gemma3ForSequenceClassification"
- ]
- }
- },
- "info.vit.vilt-b32-mlm": {
- "*": {
- "repo": "dandelin/vilt-b32-mlm",
- "pkg": {
- "0": {
- "transformers": "ViltModel"
- }
- },
- "tasks": [
- "ViltForImageAndTextRetrieval",
- "ViltForImagesAndTextClassification",
- "ViltForTokenClassification",
- "ViltForMaskedLM",
- "ViltForQuestionAnswering",
- "ViltLayer",
- "ViltModel",
- "ViltPreTrainedModel"
- ]
- }
- },
- "info.stst.switch-8": {
- "*": {
- "repo": "google/switch-base-8",
- "pkg": {
- "0": {
- "transformers": "SwitchTransformersModel"
- }
- },
- "tasks": [
- "SwitchTransformersEncoderModel",
- "SwitchTransformersForConditionalGeneration",
- "SwitchTransformersModel",
- "SwitchTransformersPreTrainedModel",
- "SwitchTransformersTop1Router",
- "SwitchTransformersSparseMLP"
- ]
- }
- },
- "info.detr.grounding-dino": {
- "*": {
- "repo": "IDEA-Research/grounding-dino-tiny",
- "pkg": {
- "0": {
- "transformers": "GroundingDinoModel"
- }
- },
- "tasks": [
- "GroundingDinoForObjectDetection",
- "GroundingDinoModel",
- "GroundingDinoPreTrainedModel"
- ]
- }
- },
- "info.art.mpnet": {
- "*": {
- "repo": "microsoft/mpnet-base",
- "pkg": {
- "0": {
- "transformers": "MPNetModel"
- }
- },
- "tasks": [
- "MPNetForMaskedLM",
- "MPNetForMultipleChoice",
- "MPNetForQuestionAnswering",
- "MPNetForSequenceClassification",
- "MPNetForTokenClassification",
- "MPNetLayer",
- "MPNetModel",
- "MPNetPreTrainedModel"
- ]
- }
- },
- "info.vit.chinese-clip-vit-patch16": {
- "*": {
- "repo": "OFA-Sys/chinese-clip-vit-base-patch16",
- "pkg": {
- "0": {
- "transformers": "ChineseCLIPModel"
- }
- },
- "tasks": [
- "ChineseCLIPModel",
- "ChineseCLIPPreTrainedModel",
- "ChineseCLIPTextModel",
- "ChineseCLIPVisionModel"
- ]
- }
- },
- "info.vit.hiera-224": {
- "*": {
- "repo": "facebook/hiera-base-224-hf",
- "pkg": {
- "0": {
- "transformers": "HieraModel"
- }
- },
- "tasks": [
- "HieraForImageClassification",
- "HieraForPreTraining",
- "HieraBackbone",
- "HieraModel",
- "HieraPreTrainedModel"
- ]
- }
- },
- "info.vit.vit-patch16-224": {
- "*": {
- "repo": "google/vit-base-patch16-224",
- "pkg": {
- "0": {
- "transformers": "ViTModel"
- }
- },
- "tasks": [
- "ViTForImageClassification",
- "ViTForMaskedImageModeling",
- "ViTModel",
- "ViTPreTrainedModel"
- ]
- }
- },
- "info.vit.vivit16x2-kinetics400": {
- "*": {
- "repo": "google/vivit-b-16x2-kinetics400",
- "pkg": {
- "0": {
- "transformers": "VivitModel"
- }
- },
- "tasks": [
- "VivitModel",
- "VivitPreTrainedModel",
- "VivitForVideoClassification"
- ]
- }
- },
- "info.art.starcoder2": {
- "*": {
- "repo": "bigcode/starcoder2-7b",
- "pkg": {
- "0": {
- "transformers": "Starcoder2Model"
- }
- },
- "tasks": [
- "Starcoder2ForCausalLM",
- "Starcoder2Model",
- "Starcoder2PreTrainedModel",
- "Starcoder2ForSequenceClassification",
- "Starcoder2ForTokenClassification"
- ]
- }
- },
- "info.vit.glm-4v-thinking": {
- "*": {
- "repo": "zai-org/GLM-4.1V-9B-Thinking",
- "pkg": {
- "0": {
- "transformers": "Glm4vModel"
- }
- },
- "tasks": [
- "Glm4vForConditionalGeneration",
- "Glm4vModel",
- "Glm4vPreTrainedModel",
- "Glm4vTextModel"
- ]
- }
- },
- "info.ssm.jamba-v0": {
- "*": {
- "repo": "ai21labs/Jamba-v0.1",
- "pkg": {
- "0": {
- "transformers": "JambaModel"
- }
- },
- "tasks": [
- "JambaForCausalLM",
- "JambaForSequenceClassification",
- "JambaModel",
- "JambaPreTrainedModel"
- ]
- }
- },
- "info.aet.lilt-roberta-en": {
- "*": {
- "repo": "SCUT-DLVCLab/lilt-roberta-en-base",
- "pkg": {
- "0": {
- "transformers": "LiltModel"
- }
- },
- "tasks": [
- "LiltForQuestionAnswering",
- "LiltForSequenceClassification",
- "LiltForTokenClassification",
- "LiltModel",
- "LiltPreTrainedModel"
- ]
- }
- },
- "info.vit.video-llava-hf": {
- "*": {
- "repo": "LanguageBind/Video-LLaVA-7B-hf",
- "pkg": {
- "0": {
- "transformers": "VideoLlavaModel"
- }
- },
- "tasks": [
- "VideoLlavaPreTrainedModel",
- "VideoLlavaModel",
- "VideoLlavaForConditionalGeneration"
- ]
- }
- },
- "info.vit.aria-chat": {
- "*": {
- "repo": "rhymes-ai/Aria-Chat",
- "pkg": {
- "0": {
- "transformers": "AriaModel"
- }
- },
- "tasks": [
- "AriaForConditionalGeneration",
- "AriaPreTrainedModel",
- "AriaTextPreTrainedModel",
- "AriaTextModel",
- "AriaModel",
- "AriaTextForCausalLM"
- ]
- }
- },
- "info.art.open-llama-v1": {
- "*": {
- "repo": "openlm-research/open_llama_3b",
- "pkg": {
- "0": {
- "transformers": "OpenLlamaModel"
- }
- },
- "tasks": [
- "OpenLlamaPreTrainedModel",
- "OpenLlamaModel",
- "OpenLlamaForCausalLM",
- "OpenLlamaForSequenceClassification"
- ]
- }
- },
- "info.art.helium": {
- "*": {
- "repo": "kyutai/helium-1-2b",
- "pkg": {
- "0": {
- "transformers": "HeliumModel"
- }
- },
- "tasks": [
- "HeliumPreTrainedModel",
- "HeliumModel",
- "HeliumForCausalLM",
- "HeliumForSequenceClassification",
- "HeliumForTokenClassification"
- ]
- }
- },
- "info.stst.autoformer-tourism-monthly": {
- "*": {
- "repo": "huggingface/autoformer-tourism-monthly",
- "pkg": {
- "0": {
- "transformers": "AutoformerModel"
- }
- },
- "tasks": [
- "AutoformerForPrediction",
- "AutoformerModel",
- "AutoformerPreTrainedModel"
- ]
- }
- },
- "info.stst.mvp": {
- "*": {
- "repo": "RUCAIBox/mvp",
- "pkg": {
- "0": {
- "transformers": "MvpModel"
- }
- },
- "tasks": [
- "MvpForCausalLM",
- "MvpForConditionalGeneration",
- "MvpForQuestionAnswering",
- "MvpForSequenceClassification",
- "MvpModel",
- "MvpPreTrainedModel"
- ]
- }
- },
- "info.vit.focalnet": {
- "*": {
- "repo": "microsoft/focalnet-tiny",
- "pkg": {
- "0": {
- "transformers": "FocalNetModel"
- }
- },
- "tasks": [
- "FocalNetForImageClassification",
- "FocalNetForMaskedImageModeling",
- "FocalNetBackbone",
- "FocalNetModel",
- "FocalNetPreTrainedModel"
- ]
- }
- },
- "info.vit.mobilenet-v1-1--224": {
- "*": {
- "repo": "google/mobilenet_v1_1.0_224",
- "pkg": {
- "0": {
- "transformers": "MobileNetV1Model"
- }
- },
- "tasks": [
- "MobileNetV1ForImageClassification",
- "MobileNetV1Model",
- "MobileNetV1PreTrainedModel"
- ]
- }
- },
- "info.stst.wmt19-en-ru": {
- "*": {
- "repo": "facebook/wmt19-en-ru",
- "pkg": {
- "0": {
- "transformers": "FSMTModel"
- }
- },
- "tasks": [
- "FSMTForConditionalGeneration",
- "FSMTModel",
- "PretrainedFSMTModel"
- ]
- }
- },
- "info.vit.vip-llava-hf": {
- "*": {
- "repo": "ybelkada/vip-llava-7b-hf",
- "pkg": {
- "0": {
- "transformers": "VipLlavaModel"
- }
- },
- "tasks": [
- "VipLlavaModel",
- "VipLlavaForConditionalGeneration",
- "VipLlavaPreTrainedModel"
- ]
- }
- },
- "info.vit.owlv2-patch16": {
- "*": {
- "repo": "google/owlv2-base-patch16",
- "pkg": {
- "0": {
- "transformers": "Owlv2Model"
- }
- },
- "tasks": [
- "Owlv2Model",
- "Owlv2PreTrainedModel",
- "Owlv2TextModel",
- "Owlv2VisionModel",
- "Owlv2ForObjectDetection"
- ]
- }
- },
- "info.vit.dinov2-with-registers": {
- "*": {
- "repo": "facebook/dinov2-with-registers-base",
- "pkg": {
- "0": {
- "transformers": "Dinov2WithRegistersModel"
- }
- },
- "tasks": [
- "Dinov2WithRegistersPreTrainedModel",
- "Dinov2WithRegistersModel",
- "Dinov2WithRegistersForImageClassification",
- "Dinov2WithRegistersBackbone"
- ]
- }
- },
- "info.detr.dpt": {
- "*": {
- "repo": "Intel/dpt-large",
- "pkg": {
- "0": {
- "transformers": "DPTModel"
- }
- },
- "tasks": [
- "DPTForDepthEstimation",
- "DPTForSemanticSegmentation",
- "DPTModel",
- "DPTPreTrainedModel"
- ]
- }
- },
- "info.ssm.falcon": {
- "*": {
- "repo": "tiiuae/falcon-7b",
- "pkg": {
- "0": {
- "transformers": "FalconModel"
- }
- },
- "tasks": [
- "FalconForCausalLM",
- "FalconModel",
- "FalconPreTrainedModel",
- "FalconForSequenceClassification",
- "FalconForTokenClassification",
- "FalconForQuestionAnswering"
- ]
- }
- },
- "info.vit.llama-3-vision": {
- "*": {
- "repo": "meta-llama/Llama-3.2-11B-Vision",
- "pkg": {
- "0": {
- "transformers": "MllamaModel"
- }
- },
- "tasks": [
- "MllamaForConditionalGeneration",
- "MllamaForCausalLM",
- "MllamaTextModel",
- "MllamaVisionModel",
- "MllamaPreTrainedModel",
- "MllamaModel"
- ]
- }
- },
- "info.art.hf-moshiko": {
- "*": {
- "repo": "kmhf/hf-moshiko",
- "pkg": {
- "0": {
- "transformers": "MoshiModel"
- }
- },
- "tasks": [
- "MoshiForCausalLM",
- "MoshiForConditionalGeneration",
- "MoshiModel",
- "MoshiPreTrainedModel"
- ]
- }
- },
- "info.art.layoutlmv2-uncased": {
- "*": {
- "repo": "microsoft/layoutlmv2-base-uncased",
- "pkg": {
- "0": {
- "transformers": "LayoutLMv2Model"
- }
- },
- "tasks": [
- "LayoutLMv2ForQuestionAnswering",
- "LayoutLMv2ForSequenceClassification",
- "LayoutLMv2ForTokenClassification",
- "LayoutLMv2Layer",
- "LayoutLMv2Model",
- "LayoutLMv2PreTrainedModel"
- ]
- }
- },
- "info.vit.sam-vit-huge": {
- "*": {
- "repo": "facebook/sam-vit-huge",
- "pkg": {
- "0": {
- "transformers": "SamVisionModel"
- }
- },
- "tasks": [
- "SamVisionModel",
- "SamModel",
- "SamPreTrainedModel"
- ]
- }
- },
- "info.art.trajectory-transformer-halfcheetah-v2": {
- "*": {
- "repo": "CarlCochet/trajectory-transformer-halfcheetah-medium-v2",
- "pkg": {
- "0": {
- "transformers": "TrajectoryTransformerModel"
- }
- },
- "tasks": [
- "TrajectoryTransformerModel",
- "TrajectoryTransformerPreTrainedModel"
- ]
- }
- },
- "info.art.canine-s": {
- "*": {
- "repo": "google/canine-s",
- "pkg": {
- "0": {
- "transformers": "CanineModel"
- }
- },
- "tasks": [
- "CanineForMultipleChoice",
- "CanineForQuestionAnswering",
- "CanineForSequenceClassification",
- "CanineForTokenClassification",
- "CanineLayer",
- "CanineModel",
- "CaninePreTrainedModel"
- ]
- }
- },
- "info.gan.dac": {
- "*": {
- "repo": "descript/dac_16khz",
- "pkg": {
- "0": {
- "transformers": "DacModel"
- }
- },
- "tasks": [
- "DacModel",
- "DacPreTrainedModel"
- ]
- }
- },
- "info.vit.ast-finetuned-audioset-10-10-0593": {
- "*": {
- "repo": "MIT/ast-finetuned-audioset-10-10-0.4593",
- "pkg": {
- "0": {
- "transformers": "ASTModel"
- }
- },
- "tasks": [
- "ASTForAudioClassification",
- "ASTModel",
- "ASTPreTrainedModel"
- ]
- }
- },
- "info.vit.idefics3-llama3": {
- "*": {
- "repo": "HuggingFaceM4/Idefics3-8B-Llama3",
- "pkg": {
- "0": {
- "transformers": "Idefics3Model"
- }
- },
- "tasks": [
- "Idefics3ForConditionalGeneration",
- "Idefics3PreTrainedModel",
- "Idefics3Model",
- "Idefics3VisionTransformer"
- ]
- }
- },
- "info.detr.detr-resnet-50": {
- "*": {
- "repo": "facebook/detr-resnet-50",
- "pkg": {
- "0": {
- "transformers": "DetrModel"
- }
- },
- "tasks": [
- "DetrForObjectDetection",
- "DetrForSegmentation",
- "DetrModel",
- "DetrPreTrainedModel"
- ]
- }
- },
- "info.vit.owlvit-patch32": {
- "*": {
- "repo": "google/owlvit-base-patch32",
- "pkg": {
- "0": {
- "transformers": "OwlViTModel"
- }
- },
- "tasks": [
- "OwlViTModel",
- "OwlViTPreTrainedModel",
- "OwlViTTextModel",
- "OwlViTVisionModel",
- "OwlViTForObjectDetection"
- ]
- }
- },
- "info.moe.phi-3-moe": {
- "*": {
- "repo": "microsoft/Phi-3.5-MoE-instruct",
- "pkg": {
- "0": {
- "transformers": "PhimoeModel"
- }
- },
- "tasks": [
- "PhimoePreTrainedModel",
- "PhimoeModel",
- "PhimoeForCausalLM",
- "PhimoeForSequenceClassification"
- ]
- }
- },
- "info.art.deberta": {
- "*": {
- "repo": "microsoft/deberta-base",
- "pkg": {
- "0": {
- "transformers": "DebertaModel"
- }
- },
- "tasks": [
- "DebertaForMaskedLM",
- "DebertaForQuestionAnswering",
- "DebertaForSequenceClassification",
- "DebertaForTokenClassification",
- "DebertaModel",
- "DebertaPreTrainedModel"
- ]
- }
- },
- "info.art.gpt2": {
- "*": {
- "repo": "openai-community/gpt2",
- "pkg": {
- "0": {
- "transformers": "GPT2Model"
- }
- },
- "tasks": [
- "GPT2DoubleHeadsModel",
- "GPT2ForQuestionAnswering",
- "GPT2ForSequenceClassification",
- "GPT2ForTokenClassification",
- "GPT2LMHeadModel",
- "GPT2Model",
- "GPT2PreTrainedModel"
- ]
- }
- },
- "info.vit.sam-hq-vit-h": {
- "*": {
- "repo": "sushmanth/sam_hq_vit_h",
- "pkg": {
- "0": {
- "transformers": "SamHQModel"
- }
- },
- "tasks": [
- "SamHQModel",
- "SamHQPreTrainedModel",
- "SamHQVisionModel"
- ]
- }
- },
- "info.gan.dinat-in-224": {
- "*": {
- "repo": "shi-labs/dinat-mini-in1k-224",
- "pkg": {
- "0": {
- "transformers": "DinatModel"
- }
- },
- "tasks": [
- "DinatForImageClassification",
- "DinatModel",
- "DinatPreTrainedModel",
- "DinatBackbone"
- ]
- }
- },
- "info.vit.llava-v1-mistral-hf": {
- "*": {
- "repo": "llava-hf/llava-v1.6-mistral-7b-hf",
- "pkg": {
- "0": {
- "transformers": "LlavaNextModel"
- }
- },
- "tasks": [
- "LlavaNextForConditionalGeneration",
- "LlavaNextPreTrainedModel",
- "LlavaNextModel"
- ]
- }
- },
- "info.art.xlm-mlm-en-2048": {
- "*": {
- "repo": "FacebookAI/xlm-mlm-en-2048",
- "pkg": {
- "0": {
- "transformers": "XLMModel"
- }
- },
- "tasks": [
- "XLMForMultipleChoice",
- "XLMForQuestionAnswering",
- "XLMForQuestionAnsweringSimple",
- "XLMForSequenceClassification",
- "XLMForTokenClassification",
- "XLMModel",
- "XLMPreTrainedModel",
- "XLMWithLMHeadModel"
- ]
- }
- },
- "info.vit.llava": {
- "*": {
- "repo": "llava-hf/llava-9b",
- "pkg": {
- "0": {
- "transformers": "LlavaModel"
- }
- },
- "file_256": [
- "f5ad57d3eda300a3195bc9c0bb36ab76ebe88831f128e9851e63440aff4a6741"
- ],
- "layer_b3": [
- "d7d6ccb9dbba90b64e4cd259b6309e56708b3f4fbd6e9f85e9f0410e549133ef"
- ],
- "layer_256": [
- "9969c41152aba689413b7f63888ecdc0c0badad2c2960e689ebc4c0e4a696c73"
- ],
- "tasks": [
- "LlavaForConditionalGeneration",
- "LlavaPreTrainedModel",
- "LlavaModel"
- ]
- }
- },
- "info.vit.udop": {
- "*": {
- "repo": "microsoft/udop-large",
- "pkg": {
- "0": {
- "transformers": "UdopModel"
- }
- },
- "tasks": [
- "UdopForConditionalGeneration",
- "UdopPreTrainedModel",
- "UdopModel",
- "UdopEncoderModel"
- ]
- }
- },
- "info.detr.rtdetr-r50vd": {
- "*": {
- "repo": "PekingU/rtdetr_r50vd",
- "pkg": {
- "0": {
- "transformers": "RTDetrModel"
- }
- },
- "tasks": [
- "RTDetrForObjectDetection",
- "RTDetrModel",
- "RTDetrPreTrainedModel"
- ]
- }
- },
- "info.vit.qwen2-vl": {
- "*": {
- "repo": "Qwen/Qwen2-VL-7B-Instruct",
- "pkg": {
- "0": {
- "transformers": "Qwen2_5_VLModel"
- }
- },
- "file_256": [
- "1f48ac458d6fbd0aec53a116065a7ee3f1d34bddde544e25c16a05c9d5392b78",
- "0e85c7111ce849293e97aa09ce1172352ecece023a3ecea7ac8311e326b47f3a",
- "d725335e4ea2399be706469e4b8807716a8fa64bd03468252e9f7acf2415fee4",
- "e10bd9583a77250376d9134cd6b46799029dfa3b4d7989c1050b3ec149cc7cf5"
- ],
- "layer_b3": [
- "e4f681bde70a753f30f83495a2aa340d251bf3d818eb5a1cbe58f85fd6ea0d40",
- "47b062ce8ddb14845fb1a71d2fd88fd52a82e26561ba3eb05be057915a867775",
- "b6386f70b528ffa9e09fdd8db8a7b91a7c462ed97b06963576c6139e25fdcf31",
- "4cd449df9f9004a7e53005583a7e4cfa6de42912f03647d2ea799d489e9c1406"
- ],
- "layer_256": [
- "ed36a4a11c4ebebb10d1e010cb93e2e43fcaf975cd42bb6c9958537593d0d44d",
- "f7f6f64e7b6d7826400a2fc0eef942a47c47bd5914e051ad0c8cd9ff5ff7982b",
- "f341ed0f792cf0570ceb21d3b64ed14bf9875e9fcb90116851364eeed683a6ca",
- "ba031d0da78afe24ae63558ad29b8028244a7bd4750a5615dab9079fe32a5fd7"
- ],
- "tasks": [
- "Qwen2_5_VLForConditionalGeneration",
- "Qwen2_5_VLModel",
- "Qwen2_5_VLPreTrainedModel",
- "Qwen2_5_VLTextModel"
- ]
- }
- },
- "info.moe.moe-active-shared-experts": {
- "*": {
- "repo": "ibm-research/moe-7b-1b-active-shared-experts",
- "pkg": {
- "0": {
- "transformers": "GraniteMoeSharedModel"
- }
- },
- "tasks": [
- "GraniteMoeSharedForCausalLM",
- "GraniteMoeSharedModel",
- "GraniteMoeSharedPreTrainedModel"
- ]
- }
- },
- "info.stst.granite-speech": {
- "*": {
- "repo": "ibm-granite/granite-speech-3.3-8b",
- "pkg": {
- "0": {
- "transformers": "GraniteSpeechForConditionalGeneration"
- }
- },
- "tasks": [
- "GraniteSpeechCTCEncoder",
- "GraniteSpeechForConditionalGeneration",
- "GraniteSpeechPreTrainedModel"
- ]
- }
- },
- "info.aet.layoutlm-uncased": {
- "*": {
- "repo": "microsoft/layoutlm-base-uncased",
- "pkg": {
- "0": {
- "transformers": "LayoutLMModel"
- }
- },
- "tasks": [
- "LayoutLMForMaskedLM",
- "LayoutLMForSequenceClassification",
- "LayoutLMForTokenClassification",
- "LayoutLMForQuestionAnswering",
- "LayoutLMModel",
- "LayoutLMPreTrainedModel"
- ]
- }
- },
- "info.vit.align": {
- "*": {
- "repo": "kakaobrain/align-base",
- "pkg": {
- "0": {
- "transformers": "AlignModel"
- }
- },
- "tasks": [
- "AlignPreTrainedModel",
- "AlignTextModel",
- "AlignVisionModel",
- "AlignModel"
- ]
- }
- },
- "info.vit.aya-vision": {
- "*": {
- "repo": "CohereForAI/aya-vision-8b",
- "pkg": {
- "0": {
- "transformers": "AyaVisionModel"
- }
- },
- "tasks": [
- "AyaVisionForConditionalGeneration",
- "AyaVisionPreTrainedModel",
- "AyaVisionModel"
- ]
- }
- },
- "info.vit.altclip": {
- "*": {
- "repo": "BAAI/AltCLIP",
- "pkg": {
- "0": {
- "transformers": "AltCLIPModel"
- }
- },
- "tasks": [
- "AltCLIPPreTrainedModel",
- "AltCLIPVisionModel",
- "AltCLIPTextModel",
- "AltCLIPModel"
- ]
- }
- },
- "info.ssm.falcon-mamba": {
- "*": {
- "repo": "tiiuae/falcon-mamba-7b",
- "pkg": {
- "0": {
- "transformers": "FalconMambaModel"
- }
- },
- "tasks": [
- "FalconMambaForCausalLM",
- "FalconMambaModel",
- "FalconMambaPreTrainedModel",
- "FalconMambaCache"
- ]
- }
- },
- "info.vit.retribert-uncased": {
- "*": {
- "repo": "yjernite/retribert-base-uncased",
- "pkg": {
- "0": {
- "transformers": "RetriBertModel"
- }
- },
- "tasks": [
- "RetriBertModel",
- "RetriBertPreTrainedModel"
- ]
- }
- },
- "info.art.ctrl": {
- "*": {
- "repo": "Salesforce/ctrl",
- "pkg": {
- "0": {
- "transformers": "CTRLModel"
- }
- },
- "tasks": [
- "CTRLForSequenceClassification",
- "CTRLLMHeadModel",
- "CTRLModel",
- "CTRLPreTrainedModel"
- ]
- }
- },
- "info.art.evolla-hf": {
- "*": {
- "repo": "westlake-repl/Evolla-10B-hf",
- "pkg": {
- "0": {
- "transformers": "EvollaModel"
- }
- },
- "tasks": [
- "EvollaForProteinText2Text",
- "EvollaModel",
- "EvollaPreTrainedModel"
- ]
- }
- },
- "info.vit.cvt-13": {
- "*": {
- "repo": "microsoft/cvt-13",
- "pkg": {
- "0": {
- "transformers": "CvtModel"
- }
- },
- "tasks": [
- "CvtForImageClassification",
- "CvtModel",
- "CvtPreTrainedModel"
- ]
- }
- },
- "info.moe.dots-llm1": {
- "*": {
- "repo": "rednote-hilab/dots.llm1.base",
- "pkg": {
- "0": {
- "transformers": "Dots1Model"
- }
- },
- "tasks": [
- "Dots1PreTrainedModel",
- "Dots1Model",
- "Dots1ForCausalLM"
- ]
- }
- },
- "info.art.smollm3": {
- "*": {
- "repo": "HuggingFaceTB/SmolLM3-3B",
- "pkg": {
- "0": {
- "transformers": "SmolLM3Model"
- }
- },
- "tasks": [
- "SmolLM3PreTrainedModel",
- "SmolLM3Model",
- "SmolLM3ForCausalLM",
- "SmolLM3ForSequenceClassification",
- "SmolLM3ForTokenClassification",
- "SmolLM3ForQuestionAnswering"
- ]
- }
- },
- "info.vit.vit-mae": {
- "*": {
- "repo": "facebook/vit-mae-base",
- "pkg": {
- "0": {
- "transformers": "ViTMAEModel"
- }
- },
- "tasks": [
- "ViTMAEForPreTraining",
- "ViTMAELayer",
- "ViTMAEModel",
- "ViTMAEPreTrainedModel"
- ]
- }
- },
- "info.vit.mobilevit": {
- "*": {
- "repo": "apple/mobilevit-small",
- "pkg": {
- "0": {
- "transformers": "MobileViTModel"
- }
- },
- "tasks": [
- "MobileViTForImageClassification",
- "MobileViTForSemanticSegmentation",
- "MobileViTModel",
- "MobileViTPreTrainedModel"
- ]
- }
- },
- "info.aet.roc-bert-zh": {
- "*": {
- "repo": "weiweishi/roc-bert-base-zh",
- "pkg": {
- "0": {
- "transformers": "RoCBertModel"
- }
- },
- "tasks": [
- "RoCBertForCausalLM",
- "RoCBertForMaskedLM",
- "RoCBertForMultipleChoice",
- "RoCBertForPreTraining",
- "RoCBertForQuestionAnswering",
- "RoCBertForSequenceClassification",
- "RoCBertForTokenClassification",
- "RoCBertLayer",
- "RoCBertModel",
- "RoCBertPreTrainedModel"
- ]
- }
- },
- "info.vit.layoutlmv3": {
- "*": {
- "repo": "microsoft/layoutlmv3-base",
- "pkg": {
- "0": {
- "transformers": "LayoutLMv3Model"
- }
- },
- "tasks": [
- "LayoutLMv3ForQuestionAnswering",
- "LayoutLMv3ForSequenceClassification",
- "LayoutLMv3ForTokenClassification",
- "LayoutLMv3Model",
- "LayoutLMv3PreTrainedModel"
- ]
- }
- },
- "info.gan.nat-in-224": {
- "*": {
- "repo": "shi-labs/nat-mini-in1k-224",
- "pkg": {
- "0": {
- "transformers": "NatModel"
- }
- },
- "tasks": [
- "NatForImageClassification",
- "NatModel",
- "NatPreTrainedModel",
- "NatBackbone"
- ]
- }
- },
- "info.vit.mobilevitv2-1": {
- "*": {
- "repo": "apple/mobilevitv2-1.0-imagenet1k-256",
- "pkg": {
- "0": {
- "transformers": "MobileViTV2Model"
- }
- },
- "tasks": [
- "MobileViTV2ForImageClassification",
- "MobileViTV2ForSemanticSegmentation",
- "MobileViTV2Model",
- "MobileViTV2PreTrainedModel"
- ]
- }
- },
- "info.art.openai-gpt": {
- "*": {
- "repo": "openai-community/openai-gpt",
- "pkg": {
- "0": {
- "transformers": "OpenAIGPTModel"
- }
- },
- "tasks": [
- "OpenAIGPTDoubleHeadsModel",
- "OpenAIGPTForSequenceClassification",
- "OpenAIGPTLMHeadModel",
- "OpenAIGPTModel",
- "OpenAIGPTPreTrainedModel"
- ]
- }
- },
- "info.vit.dfine-x-coco": {
- "*": {
- "repo": "ustc-community/dfine_x_coco",
- "pkg": {
- "0": {
- "transformers": "HGNetV2Backbone"
- }
- },
- "tasks": [
- "HGNetV2Backbone",
- "HGNetV2PreTrainedModel",
- "HGNetV2ForImageClassification"
- ]
- }
- },
- "info.art.m-ctc-t": {
- "*": {
- "repo": "speechbrain/m-ctc-t-large",
- "pkg": {
- "0": {
- "transformers": "MCTCTModel"
- }
- },
- "tasks": [
- "MCTCTForCTC",
- "MCTCTModel",
- "MCTCTPreTrainedModel"
- ]
- }
- },
- "info.aet.unispeech-1500h-cv": {
- "*": {
- "repo": "microsoft/unispeech-large-1500h-cv",
- "pkg": {
- "0": {
- "transformers": "UniSpeechModel"
- }
- },
- "tasks": [
- "UniSpeechForCTC",
- "UniSpeechForPreTraining",
- "UniSpeechForSequenceClassification",
- "UniSpeechModel",
- "UniSpeechPreTrainedModel"
- ]
- }
- },
- "info.vit.blip-flan-t5": {
- "*": {
- "repo": "Salesforce/instructblip-flan-t5-xl",
- "pkg": {
- "0": {
- "transformers": "InstructBlipModel"
- }
- },
- "tasks": [
- "InstructBlipQFormerModel",
- "InstructBlipPreTrainedModel",
- "InstructBlipModel",
- "InstructBlipForConditionalGeneration",
- "InstructBlipVisionModel"
- ]
- }
- },
- "info.vit.siglip-patch16-224": {
- "*": {
- "repo": "google/siglip-base-patch16-224",
- "pkg": {
- "0": {
- "transformers": "SiglipModel"
- }
- },
- "tasks": [
- "SiglipModel",
- "SiglipPreTrainedModel",
- "SiglipTextModel",
- "SiglipVisionModel",
- "SiglipForImageClassification"
- ]
- }
- },
- "info.detr.omdet-turbo-swin-hf": {
- "*": {
- "repo": "omlab/omdet-turbo-swin-tiny-hf",
- "pkg": {
- "0": {
- "transformers": "OmDetTurboForObjectDetection"
- }
- },
- "tasks": [
- "OmDetTurboForObjectDetection",
- "OmDetTurboPreTrainedModel"
- ]
- }
- },
- "info.art.musicgen-melody": {
- "*": {
- "repo": "facebook/musicgen-melody",
- "pkg": {
- "0": {
- "transformers": "MusicgenMelodyModel"
- }
- },
- "tasks": [
- "MusicgenMelodyForConditionalGeneration",
- "MusicgenMelodyForCausalLM",
- "MusicgenMelodyModel",
- "MusicgenMelodyPreTrainedModel"
- ]
- }
- },
- "info.aet.funnel": {
- "*": {
- "repo": "funnel-transformer/small",
- "pkg": {
- "0": {
- "transformers": "FunnelModel"
- }
- },
- "tasks": [
- "FunnelBaseModel",
- "FunnelForMaskedLM",
- "FunnelForMultipleChoice",
- "FunnelForPreTraining",
- "FunnelForQuestionAnswering",
- "FunnelForSequenceClassification",
- "FunnelForTokenClassification",
- "FunnelModel",
- "FunnelPreTrainedModel"
- ]
- }
- },
- "info.vit.convnextv2-224": {
- "*": {
- "repo": "facebook/convnextv2-tiny-1k-224",
- "pkg": {
- "0": {
- "transformers": "ConvNextV2Model"
- }
- },
- "tasks": [
- "ConvNextV2ForImageClassification",
- "ConvNextV2Model",
- "ConvNextV2PreTrainedModel",
- "ConvNextV2Backbone"
- ]
- }
- },
- "info.moe.glm-4-a": {
- "*": {
- "repo": "zai-org/GLM-4.5-Air",
- "pkg": {
- "0": {
- "transformers": "Glm4MoeModel"
- }
- },
- "tasks": [
- "Glm4MoePreTrainedModel",
- "Glm4MoeModel",
- "Glm4MoeForCausalLM"
- ]
- }
- },
- "info.detr.vit--384": {
- "*": {
- "repo": "google/vit-hybrid-base-bit-384",
- "pkg": {
- "0": {
- "transformers": "ViTHybridModel"
- }
- },
- "tasks": [
- "ViTHybridForImageClassification",
- "ViTHybridModel",
- "ViTHybridPreTrainedModel"
- ]
- }
- },
- "info.art.qwen2": {
- "*": {
- "repo": "Qwen/Qwen2-7B",
- "pkg": {
- "0": {
- "transformers": "Qwen2Model"
- }
- },
- "tasks": [
- "Qwen2PreTrainedModel",
- "Qwen2Model",
- "Qwen2ForCausalLM",
- "Qwen2ForSequenceClassification",
- "Qwen2ForTokenClassification",
- "Qwen2ForQuestionAnswering"
- ]
- },
- "bagel-mot": {
- "repo": "ByteDance-Seed/BAGEL-7B-MoT",
- "pkg": {
- "0": {
- "Bagel": "app"
- }
- }
- }
- },
- "info.vit.aimv2-patch14-224-lit": {
- "*": {
- "repo": "apple/aimv2-large-patch14-224-lit",
- "pkg": {
- "0": {
- "transformers": "Aimv2Model"
- }
- },
- "tasks": [
- "Aimv2VisionModel",
- "Aimv2Model",
- "Aimv2PreTrainedModel",
- "Aimv2TextModel"
- ]
- }
- },
- "info.aet.data2vec-text": {
- "*": {
- "repo": "facebook/data2vec-text-base",
- "pkg": {
- "0": {
- "transformers": "Data2VecTextModel"
- }
- },
- "tasks": [
- "Data2VecTextForCausalLM",
- "Data2VecTextForMaskedLM",
- "Data2VecTextForMultipleChoice",
- "Data2VecTextForQuestionAnswering",
- "Data2VecTextForSequenceClassification",
- "Data2VecTextForTokenClassification",
- "Data2VecTextModel",
- "Data2VecTextPreTrainedModel"
- ]
- }
- },
- "info.detr.conditional-detr-resnet-50": {
- "*": {
- "repo": "microsoft/conditional-detr-resnet-50",
- "pkg": {
- "0": {
- "transformers": "ConditionalDetrModel"
- }
- },
- "tasks": [
- "ConditionalDetrForObjectDetection",
- "ConditionalDetrForSegmentation",
- "ConditionalDetrModel",
- "ConditionalDetrPreTrainedModel"
- ]
- }
- },
- "info.aet.efficient-mlm-m0-0": {
- "*": {
- "repo": "andreasmadsen/efficient_mlm_m0.40",
- "pkg": {
- "0": {
- "transformers": "RobertaPreLayerNormModel"
- }
- },
- "tasks": [
- "RobertaPreLayerNormForCausalLM",
- "RobertaPreLayerNormForMaskedLM",
- "RobertaPreLayerNormForMultipleChoice",
- "RobertaPreLayerNormForQuestionAnswering",
- "RobertaPreLayerNormForSequenceClassification",
- "RobertaPreLayerNormForTokenClassification",
- "RobertaPreLayerNormModel",
- "RobertaPreLayerNormPreTrainedModel"
- ]
- }
- },
- "info.art.gemma2": {
- "*": {
- "repo": "google/gemma-2-9b",
- "pkg": {
- "0": {
- "transformers": "Gemma2Model"
- }
- },
- "file_256": [
- "e909230aabafad02d097c7dc02f2ae062b4e6b0593477c1f07679d277e09ce71",
- "d61628bc793240439e608c5ae744f55ec8770f684abb63602648a24cb6da60bc"
- ],
- "layer_b3": [
- "55a3c812ac0832d154867f5927365bcc776926e48e65f7f35a81fc11f4bb81da",
- "543572889beb25cad83a43ce70cdd255d2c82951d6595e8c97ff62fd05871c99"
- ],
- "layer_256": [
- "a0d820c39578cf888f398579d9a00d69b31c81e049795ba70008dad8fe5b3a33",
- "abc83b04a04467579ea1952a7efbdd252b8641ac0e2a6a9be2a5a73e371111d6"
- ],
- "tasks": [
- "Gemma2ForCausalLM",
- "Gemma2Model",
- "Gemma2PreTrainedModel",
- "Gemma2ForSequenceClassification",
- "Gemma2ForTokenClassification"
- ]
- }
- },
- "info.aet.yoso-4096": {
- "*": {
- "repo": "uw-madison/yoso-4096",
- "pkg": {
- "0": {
- "transformers": "YosoModel"
- }
- },
- "tasks": [
- "YosoForMaskedLM",
- "YosoForMultipleChoice",
- "YosoForQuestionAnswering",
- "YosoForSequenceClassification",
- "YosoForTokenClassification",
- "YosoLayer",
- "YosoModel",
- "YosoPreTrainedModel"
- ]
- }
- },
- "info.aet.unispeech-sat-100h-libri-ft": {
- "*": {
- "repo": "microsoft/unispeech-sat-base-100h-libri-ft",
- "pkg": {
- "0": {
- "transformers": "UniSpeechSatModel"
- }
- },
- "tasks": [
- "UniSpeechSatForAudioFrameClassification",
- "UniSpeechSatForCTC",
- "UniSpeechSatForPreTraining",
- "UniSpeechSatForSequenceClassification",
- "UniSpeechSatForXVector",
- "UniSpeechSatModel",
- "UniSpeechSatPreTrainedModel"
- ]
- }
- },
- "info.vit.llama-4-scout-16e": {
- "*": {
- "repo": "meta-llama/Llama-4-Scout-17B-16E",
- "pkg": {
- "0": {
- "transformers": "Llama4ForConditionalGeneration"
- }
- },
- "tasks": [
- "Llama4PreTrainedModel",
- "Llama4TextModel",
- "Llama4VisionModel",
- "Llama4ForCausalLM",
- "Llama4ForConditionalGeneration"
- ]
- }
- },
- "info.art.olmo-hf": {
- "*": {
- "repo": "allenai/OLMo-7B-hf",
- "pkg": {
- "0": {
- "transformers": "OlmoModel"
- }
- },
- "tasks": [
- "OlmoForCausalLM",
- "OlmoModel",
- "OlmoPreTrainedModel"
- ]
- }
- },
- "info.aet.sew": {
- "*": {
- "repo": "asapp/sew-tiny-100k",
- "pkg": {
- "0": {
- "transformers": "SEWModel"
- }
- },
- "tasks": [
- "SEWForCTC",
- "SEWForSequenceClassification",
- "SEWModel",
- "SEWPreTrainedModel"
- ]
- }
- },
- "info.stst.plbart": {
- "*": {
- "repo": "uclanlp/plbart-base",
- "pkg": {
- "0": {
- "transformers": "PLBartModel"
- }
- },
- "tasks": [
- "PLBartForCausalLM",
- "PLBartForConditionalGeneration",
- "PLBartForSequenceClassification",
- "PLBartModel",
- "PLBartPreTrainedModel"
- ]
- }
- },
- "info.detr.deformable-detr": {
- "*": {
- "repo": "SenseTime/deformable-detr",
- "pkg": {
- "0": {
- "transformers": "DetaModel"
- }
- },
- "tasks": [
- "DetaForObjectDetection",
- "DetaModel",
- "DetaPreTrainedModel"
- ]
- }
- },
- "info.moe.mixtral-8x": {
- "*": {
- "repo": "mistralai/Mixtral-8x7B-v0.1",
- "pkg": {
- "0": {
- "transformers": "MixtralModel"
- }
- },
- "tasks": [
- "MixtralForCausalLM",
- "MixtralForQuestionAnswering",
- "MixtralModel",
- "MixtralPreTrainedModel",
- "MixtralForSequenceClassification",
- "MixtralForTokenClassification"
- ]
- }
- },
- "info.gan.mimi": {
- "*": {
- "repo": "kyutai/mimi",
- "pkg": {
- "0": {
- "transformers": "MimiModel"
- }
- },
- "tasks": [
- "MimiModel",
- "MimiPreTrainedModel"
- ]
- }
- },
- "info.art.bitnet-b18-4t": {
- "*": {
- "repo": "microsoft/bitnet-b1.58-2B-4T",
- "pkg": {
- "0": {
- "transformers": "BitNetModel"
- }
- },
- "tasks": [
- "BitNetForCausalLM",
- "BitNetModel",
- "BitNetPreTrainedModel"
- ]
- }
- },
- "info.art.bigbird-roberta": {
- "*": {
- "repo": "google/bigbird-roberta-base",
- "pkg": {
- "0": {
- "transformers": "BigBirdModel"
- }
- },
- "tasks": [
- "BigBirdForCausalLM",
- "BigBirdForMaskedLM",
- "BigBirdForMultipleChoice",
- "BigBirdForPreTraining",
- "BigBirdForQuestionAnswering",
- "BigBirdForSequenceClassification",
- "BigBirdForTokenClassification",
- "BigBirdLayer",
- "BigBirdModel",
- "BigBirdPreTrainedModel"
- ]
- }
- },
- "info.art.lxmert-uncased": {
- "*": {
- "repo": "unc-nlp/lxmert-base-uncased",
- "pkg": {
- "0": {
- "transformers": "LxmertModel"
- }
- },
- "tasks": [
- "LxmertEncoder",
- "LxmertForPreTraining",
- "LxmertForQuestionAnswering",
- "LxmertModel",
- "LxmertPreTrainedModel",
- "LxmertVisualFeatureEncoder",
- "LxmertXLayer"
- ]
- }
- },
- "info.art.gpt-neox-japanese": {
- "*": {
- "repo": "abeja/gpt-neox-japanese-2.7b",
- "pkg": {
- "0": {
- "transformers": "GPTNeoXJapaneseModel"
- }
- },
- "tasks": [
- "GPTNeoXJapaneseForCausalLM",
- "GPTNeoXJapaneseLayer",
- "GPTNeoXJapaneseModel",
- "GPTNeoXJapanesePreTrainedModel"
- ]
- }
- },
- "info.stst.m": {
- "*": {
- "repo": "facebook/m2m100_418M",
- "pkg": {
- "0": {
- "transformers": "M2M100Model"
- }
- },
- "tasks": [
- "M2M100ForConditionalGeneration",
- "M2M100Model",
- "M2M100PreTrainedModel"
- ]
- }
- },
- "info.art.chameleon": {
- "*": {
- "repo": "meta/chameleon-7B",
- "pkg": {
- "0": {
- "transformers": "ChameleonModel"
- }
- },
- "tasks": [
- "ChameleonForConditionalGeneration",
- "ChameleonModel",
- "ChameleonPreTrainedModel",
- "ChameleonVQVAE"
- ]
- },
- "lumina-mgpt-1024": {
- "repo": "Alpha-VLLM/Lumina-mGPT-7B-1024",
- "pkg": {
- "0": {
- "inference_solver": {
- "FlexARInferenceSolver": {
- "precision": "bf16",
- "target_size": 768
- }
- },
- "generation": {
- "images": [],
- "qas": [
- [
- "q1",
- null
- ]
- ],
- "max_gen_len": 8192,
- "temperature": 1.0
- }
- },
- "1": {
- "inference_solver": "ChameleonXLLMXForConditionalGeneration"
- }
- },
- "file_256": [
- "6b71408a7c574d98f00114ab770ac6addc71471770456e482e7b5ec641c02345",
- "1d5d8d5532bae0f32ba35d10d411e506d61e4378dc9fc338f2b1e6af2aa322ec",
- "a8fe636bbee30fef06dcd8e806ffc65b2aed0ad08a07fdc62f35717d0f851be5",
- "6420fa13483576d46263996627ba7add2237a01f46dedd3b7750112c0cc2d95b"
- ],
- "layer_256": [
- "eaa882db6a69cf8ed0104a15b2cdbbb570a23a06ab8c8f65f4c6c21719c6ba25"
- ],
- "layer_b3": [
- "6cd6b3caaea270feb5aff8e9fec205a27da4f48a1e740e63dc9a08f16e70a656"
- ]
- }
- },
- "info.aet.albert-xx-v2": {
- "*": {
- "repo": "albert/albert-xxlarge-v2",
- "pkg": {
- "0": {
- "transformers": "AlbertModel"
- }
- },
- "tasks": [
- "AlbertPreTrainedModel",
- "AlbertModel",
- "AlbertForPreTraining",
- "AlbertForMaskedLM",
- "AlbertForSequenceClassification",
- "AlbertForTokenClassification",
- "AlbertForQuestionAnswering",
- "AlbertForMultipleChoice"
- ]
- }
- },
- "info.stst.pegasus": {
- "*": {
- "repo": "google/pegasus-large",
- "pkg": {
- "0": {
- "transformers": "PegasusModel"
- }
- },
- "tasks": [
- "PegasusForCausalLM",
- "PegasusForConditionalGeneration",
- "PegasusModel",
- "PegasusPreTrainedModel"
- ]
- }
- },
- "info.aet.ernie-3-zh": {
- "*": {
- "repo": "nghuyong/ernie-3.0-base-zh",
- "pkg": {
- "0": {
- "transformers": "ErnieModel"
- }
- },
- "tasks": [
- "ErnieForCausalLM",
- "ErnieForMaskedLM",
- "ErnieForMultipleChoice",
- "ErnieForNextSentencePrediction",
- "ErnieForPreTraining",
- "ErnieForQuestionAnswering",
- "ErnieForSequenceClassification",
- "ErnieForTokenClassification",
- "ErnieModel",
- "ErniePreTrainedModel"
- ]
- }
- },
- "info.vit.clvp-dev": {
- "*": {
- "repo": "susnato/clvp_dev",
- "pkg": {
- "0": {
- "transformers": "ClvpModelForConditionalGeneration"
- }
- },
- "tasks": [
- "ClvpModelForConditionalGeneration",
- "ClvpForCausalLM",
- "ClvpModel",
- "ClvpPreTrainedModel",
- "ClvpEncoder",
- "ClvpDecoder"
- ]
- }
- },
- "info.vit.smolvlm": {
- "*": {
- "repo": "HuggingFaceTB/SmolVLM2-2.2B-Instruct",
- "pkg": {
- "0": {
- "transformers": "SmolVLMModel"
- }
- },
- "tasks": [
- "SmolVLMForConditionalGeneration",
- "SmolVLMPreTrainedModel",
- "SmolVLMModel",
- "SmolVLMVisionTransformer"
- ]
- }
- },
- "info.aet.esm": {
- "*": {
- "repo": "facebook/esm-1b",
- "pkg": {
- "0": {
- "transformers": "EsmModel"
- }
- },
- "tasks": [
- "EsmForMaskedLM",
- "EsmForSequenceClassification",
- "EsmForTokenClassification",
- "EsmModel",
- "EsmPreTrainedModel"
- ]
- }
- },
- "info.vit.tvlt": {
- "*": {
- "repo": "ZinengTang/tvlt-base",
- "pkg": {
- "0": {
- "transformers": "TvltModel"
- }
- },
- "tasks": [
- "TvltModel",
- "TvltForPreTraining",
- "TvltForAudioVisualClassification",
- "TvltPreTrainedModel"
- ]
- }
- },
- "info.moe.gpt-oss": {
- "*": {
- "repo": "openai/gpt-oss-120b",
- "pkg": {
- "0": {
- "transformers": "GptOssModel"
- }
- },
- "file_256": [
- "68a8dc1f8e2e5996cb702f14332a25ddf3463daeab2df68e21ca09ef181203c3",
- "a881aa5f561b26a22b14a8262aa61849ace349ffd73d74769e030ac90a1fcf8a"
- ],
- "layer_b3": [
- "b52807536902cabbf84f99e4fa2f8713fb4ef77e739f06367ee0d486e3222faa",
- "43c618018db1fd6e915dead610652da261d9058b73bc5355c85c6ac69af4d913",
- "ab27ce7391b7fbd6ce3c319faa119afdac68f746af6a0ce2c3400a132f36f6ac"
- ],
- "layer_256": [
- "de5dcad822be5ed6196f0f3f6965739993118d14db97b33a94a269f4f1b7a363",
- "575f1977ed42d95a050e13dadaafc05a6d94c8aadca8364dca8a62aa4f2b146c"
- ],
- "tasks": [
- "GptOssForCausalLM",
- "GptOssModel",
- "GptOssPreTrainedModel"
- ]
- }
- },
- "info.art.rembert": {
- "*": {
- "repo": "google/rembert",
- "pkg": {
- "0": {
- "transformers": "RemBertModel"
- }
- },
- "tasks": [
- "RemBertForCausalLM",
- "RemBertForMaskedLM",
- "RemBertForMultipleChoice",
- "RemBertForQuestionAnswering",
- "RemBertForSequenceClassification",
- "RemBertForTokenClassification",
- "RemBertLayer",
- "RemBertModel",
- "RemBertPreTrainedModel"
- ]
- }
- },
- "info.art.c4ai-command-r-v01": {
- "*": {
- "repo": "CohereForAI/c4ai-command-r-v01",
- "pkg": {
- "0": {
- "transformers": "CohereModel"
- }
- },
- "tasks": [
- "CohereForCausalLM",
- "CohereModel",
- "CoherePreTrainedModel"
- ]
- }
- },
- "info.rnn.rwkv-4-pile": {
- "*": {
- "repo": "RWKV/rwkv-4-169m-pile",
- "pkg": {
- "0": {
- "transformers": "RwkvModel"
- }
- },
- "tasks": [
- "RwkvForCausalLM",
- "RwkvModel",
- "RwkvPreTrainedModel"
- ]
- }
- },
- "info.stst.umt5": {
- "*": {
- "repo": "google/umt5-small",
- "pkg": {
- "0": {
- "transformers": "UMT5Model"
- }
- },
- "identifiers": [
- "encoder.block.1.layer.0.SelfAttention.relative_attention_bias.weight"
- ],
- "file_256": [
- "a8e861969c7433e707cc5a74065d795d36cca07ec96eb6763eb4083df7248f58",
- "decf9b70814ed5e9965bfca9fbd0483462e2bf743790663025b7742f8c014c72",
- "0a07449cf1141c0ec86e653c00465f6f0d79c6e58a2c60c8bcf4203d0e4ec4f6",
- "c0ef3a140898e228a3520c9adec60743d2e8e5b3d229651bb37f1a3921919f99",
- "7b8850f1961e1cf8a77cca4c964a358d303f490833c6c087d0cff4b2f99db2af",
- "c3355d30191f1f066b26d93fba017ae9809dce6c627dda5f6a66eaa651204f68",
- "fa1d36fd54f171ae60fea915c23bd77986b330bbed9729f0d2f8ecbe9168bc48",
- "4a3176f32fd70c0a335b4419fcbf8c86cc875e23498c0fc06f5b4aa0930889e0",
- "adbc782b9145a27e15d63dfa25057efca0ac75e2db7d372c901ddaa130ca2def",
- "b7e2ca4c493c9d51fa951005e8ceba2f4b6b6877cfb4c36a8955c6cd68a1dba7",
- "2521d4de0bf9e1cc6549866463ceae85e4ec3239bc6063f7488810be39033bbc",
- "9209b4c77b34ad8cf3f06b04c6eaa27e7beeebb348a31f85e3b38a1d719b09ed",
- "8bc12d80bc0413573fa58a93626117440b4528f640dd9cb310732e05fa9e6c3e",
- "f64f8d6dc4d8a24276df69d0ccea789aae686f7417950a41e6568c30cb478a5c",
- "17cf97a5bbbc60a646d6105b832b6f657ce904a8a1ad970e4b59df0c67584a40",
- "eaea358bb438c5d211721a4feecc162000e3636e9cb96f51e216f1f44ebd12ce"
- ],
- "layer_b3": [
- "cd92b29c9099a640e3f5d4a76e64b3467f87f6c056119e0defdff94d311ad6de",
- "1c943dbcb8b328a7c6c852921ddaefbd84c9df8c83bc51fe303c1f06cb734102",
- "1639a6467af0db1e15828d33b878e568cba1335947eeadd481170bcdc9ba8e33",
- "72a0329740dee29a2c099eec3c320b3945590a74293356014c30249fe69652e5",
- "0374cba03c607ffe8ab8f04994d82f82e80901dc7578f1a9a6cb2637608be5d5",
- "d75a407f873e1cfa1a0a36214b53b14bfebe9253ea263465151c07f0d57f3f29",
- "621153502b985c143d304318c91dc3d10296d24268c81e3538fc336fdc84c915",
- "43bb052945d38a68bec27c3d26162e88e306e6074d027d3b4b2b8ae2b1851691",
- "98f50ea5d55e61c1478df47e567e48bdd036d240b9129e64d53a826406900adc",
- "9400313b8eae31699473daa5f840d25a4ef660f68de9a7894f1a28f214f23384",
- "9f13826b8e4ddde24d80de6a947a7868e26cea25dda52790ee6ed695ff72b9bb",
- "475773ab108a537ff904b84e7f3a80129ba4983deb7170b6b52c922ece6069ce",
- "5ef27b3c1eddb08cfe41b452cf9529d86dff811645d40c165bae324486d19e96",
- "e170559d8551cfe651344594e54c0a9a90c0068b00f3866f6e9a3737e20925cb",
- "e8dc7442a20bcdc7b6e5dd0265939d88896eab5ddd33ee16f1f09537e65914b8",
- "4d3d5049857d01741780daf01e96617092973305637b435f4895499a26bbaede",
- "7a2adadc2372feda23b2169337276adda6d1fdef82ba69f0d3321c4c6ba8c604",
- "0a7c61a85bb3f51f75924de48ef3f5e87cbf8901f600cbfcae97f5e2919c4148"
- ],
- "layer_256": [
- "467916d35f3053dce1d40d998fcaf6aa03feda75aa578d964dd61461e23641a3",
- "58deeef888d4ded4ffababfbf8da27227a4a6ff8adfa42016e12c0180f713816",
- "178ebd3fa3418d33a2e45a80d8b9d3662ff4a8e75f3de3f0332f82c505d8152a",
- "8700dcb651465fe6c925b7ad6068b58b32951832fff0ed19819510f8d0713ee5",
- "954f2129ba166e746c71433f717b572d8869ec14b32b7f214d1701d3b1120047",
- "32f5fc1daea014b6488b96c2a1330e0aad87e074844fa3e2e3f20b9e58440395",
- "9245abaf6df8a4b5fcc828ecbcd7b21a1b19bf5f3c4388fb5c8eabc140276dce",
- "172d0fbbd379ae014a7008e148813818494e9e645db802fd000d443369df9d17",
- "2fa68a26b0386aaf9123d2b4067dafc8631ee724602197dd353f3ea5a61dac8a",
- "16f0054014e6d07b86b0526d5bcfed7d2aa3aebe3e44e6758933d90cbd3da46e",
- "fd62047f5d27ff43210c117dc0f253c101e694a5331d6b684688606c92c65ccf",
- "ddc4f38db9f132fb1b736c1d693b5c039a2d6fe83bdf4f1c1e7a2745b5d79124",
- "9e9ab11b3ea059b84ae2bcc5be76ab3f730a486d92a16f1fd2a959bdc2ede08f",
- "bfb178b1ce27f00e122d2328c662fdef6cc239c07efc749aa61ae2d395441b02",
- "50addf6a911b90194a75b0212429d1af55eb2f9d24715479b9ccc4a40adc299b",
- "2e46e9f1b714d72160d3b3b775a845b3049a01396fab935f1278d9e8de2ef0c6",
- "db8d2b49d9042e39d6531b33ec3bebb9cdf42b9e6ad56163f08da2a7da2a53cd",
- "2d81d19ad5440422b85e0b17c71914269f6c25c9b1fa321c0dd6119ddb41d62d"
- ],
- "tasks": [
- "UMT5EncoderModel",
- "UMT5ForConditionalGeneration",
- "UMT5ForQuestionAnswering",
- "UMT5ForSequenceClassification",
- "UMT5ForTokenClassification",
- "UMT5Model",
- "UMT5PreTrainedModel"
- ]
- }
- },
- "info.art.gemma": {
- "*": {
- "repo": "google/gemma-7b",
- "pkg": {
- "0": {
- "transformers": "GemmaModel"
- }
- },
- "file_256": [
- "01676b4c6e765f737a5e9854a315de3887e939c370cae116d505777729099a68"
- ],
- "layer_b3": [
- "438d82c867240f194a4e15798eef2886a911c8f57fa2d9f4ffad1d56e7bd1ccf",
- "1de38e09f5f2c5345de48b8cd4dddcfff3e341cc0059752446e186b3863f0981"
- ],
- "layer_256": [
- "e4835a72d582b4ae066d6ff0519f2ee9f8b21fb02e8c28d8eaa317f8d1e9ea75",
- "1657c7180b48672004f4463308dfdd56d92eedeb23d1408ea766985ca208e5aa"
- ],
- "tasks": [
- "GemmaModel",
- "GemmaForCausalLM",
- "GemmaForSequenceClassification",
- "GemmaForTokenClassification",
- "GemmaPreTrainedModel"
- ]
- }
- },
- "info.vit.pvt-v2-b0": {
- "*": {
- "repo": "OpenGVLab/pvt_v2_b0",
- "pkg": {
- "0": {
- "transformers": "PvtV2Model"
- }
- },
- "tasks": [
- "PvtV2ForImageClassification",
- "PvtV2Model",
- "PvtV2PreTrainedModel",
- "PvtV2Backbone"
- ]
- }
- },
- "info.aet.test-dec": {
- "*": {
- "repo": "blab-jhu/test-32m-dec",
- "pkg": {
- "0": {
- "transformers": "ModernBertDecoderModel"
- }
- },
- "tasks": [
- "ModernBertDecoderModel",
- "ModernBertDecoderPreTrainedModel",
- "ModernBertDecoderForCausalLM",
- "ModernBertDecoderForSequenceClassification"
- ]
- }
- },
- "info.vit.deepseek-vl-chat": {
- "*": {
- "repo": "deepseek-community/deepseek-vl-1.3b-chat",
- "pkg": {
- "0": {
- "transformers": "DeepseekVLModel"
- }
- },
- "tasks": [
- "DeepseekVLPreTrainedModel",
- "DeepseekVLModel",
- "DeepseekVLForConditionalGeneration"
- ]
- }
- },
- "info.aet.voxtral-2507": {
- "*": {
- "repo": "mistralai/Voxtral-Mini-3B-2507",
- "pkg": {
- "0": {
- "transformers": "VoxtralEncoder"
- }
- },
- "tasks": [
- "VoxtralPreTrainedModel",
- "VoxtralEncoder",
- "VoxtralForConditionalGeneration"
- ]
- }
- },
- "info.vit.command-a-vision-07-2025": {
- "*": {
- "repo": "CohereLabs/command-a-vision-07-2025",
- "pkg": {
- "0": {
- "transformers": "Cohere2VisionModel"
- }
- },
- "tasks": [
- "Cohere2VisionForConditionalGeneration",
- "Cohere2VisionPreTrainedModel",
- "Cohere2VisionModel"
- ]
- }
- },
- "info.art.reformer-crime-and-punishment": {
- "*": {
- "repo": "google/reformer-crime-and-punishment",
- "pkg": {
- "0": {
- "transformers": "ReformerModel"
- }
- },
- "tasks": [
- "ReformerAttention",
- "ReformerForMaskedLM",
- "ReformerForQuestionAnswering",
- "ReformerForSequenceClassification",
- "ReformerLayer",
- "ReformerModel",
- "ReformerModelWithLMHead",
- "ReformerPreTrainedModel"
- ]
- }
- },
- "info.aet.wav2vec2-960h": {
- "*": {
- "repo": "facebook/wav2vec2-base-960h",
- "pkg": {
- "0": {
- "transformers": "Wav2Vec2Model"
- }
- },
- "tasks": [
- "Wav2Vec2ForAudioFrameClassification",
- "Wav2Vec2ForCTC",
- "Wav2Vec2ForMaskedLM",
- "Wav2Vec2ForPreTraining",
- "Wav2Vec2ForSequenceClassification",
- "Wav2Vec2ForXVector",
- "Wav2Vec2Model",
- "Wav2Vec2PreTrainedModel"
- ]
- }
- },
- "info.detr.resnet18-a1-in": {
- "*": {
- "repo": "timm/resnet18.a1_in1k",
- "pkg": {
- "0": {
- "transformers": "TimmWrapperModel"
- }
- },
- "tasks": [
- "TimmWrapperPreTrainedModel",
- "TimmWrapperModel",
- "TimmWrapperForImageClassification"
- ]
- }
- },
- "info.vit.videomae": {
- "*": {
- "repo": "MCG-NJU/videomae-base",
- "pkg": {
- "0": {
- "transformers": "VideoMAEModel"
- }
- },
- "tasks": [
- "VideoMAEForPreTraining",
- "VideoMAEModel",
- "VideoMAEPreTrainedModel",
- "VideoMAEForVideoClassification"
- ]
- }
- },
- "info.art.nezha-cn": {
- "*": {
- "repo": "sijunhe/nezha-cn-base",
- "pkg": {
- "0": {
- "transformers": "NezhaModel"
- }
- },
- "tasks": [
- "NezhaForNextSentencePrediction",
- "NezhaForMaskedLM",
- "NezhaForPreTraining",
- "NezhaForMultipleChoice",
- "NezhaForQuestionAnswering",
- "NezhaForSequenceClassification",
- "NezhaForTokenClassification",
- "NezhaModel",
- "NezhaPreTrainedModel"
- ]
- }
- },
- "info.vit.git": {
- "*": {
- "repo": "microsoft/git-base",
- "pkg": {
- "0": {
- "transformers": "GitModel"
- }
- },
- "tasks": [
- "GitForCausalLM",
- "GitModel",
- "GitPreTrainedModel",
- "GitVisionModel"
- ]
- }
- },
- "info.vit.kosmos-2-patch14-224": {
- "*": {
- "repo": "microsoft/kosmos-2-patch14-224",
- "pkg": {
- "0": {
- "transformers": "Kosmos2Model"
- }
- },
- "tasks": [
- "Kosmos2ForConditionalGeneration",
- "Kosmos2Model",
- "Kosmos2PreTrainedModel"
- ]
- }
- },
- "info.art.stt-en-trfs": {
- "*": {
- "repo": "kyutai/stt-2.6b-en-trfs",
- "pkg": {
- "0": {
- "transformers": "KyutaiSpeechToTextModel"
- }
- },
- "tasks": [
- "KyutaiSpeechToTextPreTrainedModel",
- "KyutaiSpeechToTextModel",
- "KyutaiSpeechToTextForConditionalGeneration"
- ]
- }
- },
- "info.art.csm": {
- "*": {
- "repo": "sesame/csm-1b",
- "pkg": {
- "0": {
- "transformers": "CsmForConditionalGeneration"
- }
- },
- "tasks": [
- "CsmPreTrainedModel",
- "CsmBackboneModel",
- "CsmDepthDecoderModel",
- "CsmDepthDecoderForCausalLM",
- "CsmForConditionalGeneration"
- ]
- }
- },
- "info.detr.dab-detr": {
- "*": {
- "repo": "IDEA-Research/dab-detr-resnet-50",
- "pkg": {
- "0": {
- "transformers": "DabDetrModel"
- }
- },
- "tasks": [
- "DabDetrForObjectDetection",
- "DabDetrModel",
- "DabDetrPreTrainedModel"
- ]
- }
- },
- "info.vit.timesformer-finetuned-k600": {
- "*": {
- "repo": "facebook/timesformer-base-finetuned-k600",
- "pkg": {
- "0": {
- "transformers": "TimesformerModel"
- }
- },
- "tasks": [
- "TimesformerModel",
- "TimesformerForVideoClassification",
- "TimesformerPreTrainedModel"
- ]
- }
- },
- "info.ssm.falconh1-t-hf": {
- "*": {
- "repo": "tiiuae/Falcon-H1-34B-Instruct",
- "pkg": {
- "0": {
- "transformers": "FalconH1Model"
- }
- },
- "tasks": [
- "FalconH1Model",
- "FalconH1ForCausalLM",
- "FalconH1PreTrainedModel"
- ]
- }
- },
- "info.moe.olmoe-0924": {
- "*": {
- "repo": "allenai/OLMoE-1B-7B-0924",
- "pkg": {
- "0": {
- "transformers": "OlmoeModel"
- }
- },
- "tasks": [
- "OlmoeForCausalLM",
- "OlmoeModel",
- "OlmoePreTrainedModel"
- ]
- }
- },
- "info.moe.deepseek-v2-lite": {
- "*": {
- "repo": "deepseek-ai/DeepSeek-V2-Lite",
- "pkg": {
- "0": {
- "transformers": "DeepseekV2Model"
- }
- },
- "tasks": [
- "DeepseekV2PreTrainedModel",
- "DeepseekV2Model",
- "DeepseekV2ForCausalLM",
- "DeepseekV2ForSequenceClassification"
- ]
- }
- },
- "info.art.phi-1": {
- "*": {
- "repo": "microsoft/phi-1",
- "pkg": {
- "0": {
- "transformers": "PhiModel"
- }
- },
- "tasks": [
- "PhiPreTrainedModel",
- "PhiModel",
- "PhiForCausalLM",
- "PhiForSequenceClassification",
- "PhiForTokenClassification"
- ]
- }
- },
- "info.vit.swin-patch4-window7-224": {
- "*": {
- "repo": "microsoft/swin-tiny-patch4-window7-224",
- "pkg": {
- "0": {
- "transformers": "MaskFormerSwinModel"
- }
- },
- "tasks": [
- "MaskFormerSwinBackbone",
- "MaskFormerSwinModel",
- "MaskFormerSwinPreTrainedModel"
- ]
- }
- },
- "info.art.codegen-mono": {
- "*": {
- "repo": "Salesforce/codegen-2B-mono",
- "pkg": {
- "0": {
- "transformers": "CodeGenModel"
- }
- },
- "tasks": [
- "CodeGenForCausalLM",
- "CodeGenModel",
- "CodeGenPreTrainedModel"
- ]
- }
- },
- "info.aet.data2vec-audio-960h": {
- "*": {
- "repo": "facebook/data2vec-audio-base-960h",
- "pkg": {
- "0": {
- "transformers": "Data2VecAudioModel"
- }
- },
- "tasks": [
- "Data2VecAudioForAudioFrameClassification",
- "Data2VecAudioForCTC",
- "Data2VecAudioForSequenceClassification",
- "Data2VecAudioForXVector",
- "Data2VecAudioModel",
- "Data2VecAudioPreTrainedModel"
- ]
- }
- },
- "info.detr.upernet-convnext": {
- "*": {
- "repo": "openmmlab/upernet-convnext-tiny",
- "pkg": {
- "0": {
- "transformers": "UperNetForSemanticSegmentation"
- }
- },
- "tasks": [
- "UperNetForSemanticSegmentation",
- "UperNetPreTrainedModel"
- ]
- }
- },
- "info.art.musicgen": {
- "*": {
- "repo": "facebook/musicgen-small",
- "pkg": {
- "0": {
- "transformers": "MusicgenModel"
- }
- },
- "tasks": [
- "MusicgenForConditionalGeneration",
- "MusicgenForCausalLM",
- "MusicgenModel",
- "MusicgenPreTrainedModel"
- ]
- }
- },
- "info.art.olmo2-1124-hf": {
- "*": {
- "repo": "allenai/Olmo-2-1124-7B",
- "pkg": {
- "0": {
- "transformers": "Olmo2Model"
- }
- },
- "tasks": [
- "Olmo2ForCausalLM",
- "Olmo2Model",
- "Olmo2PreTrainedModel"
- ]
- }
- },
- "info.art.splinter": {
- "*": {
- "repo": "tau/splinter-base",
- "pkg": {
- "0": {
- "transformers": "SplinterModel"
- }
- },
- "tasks": [
- "SplinterForQuestionAnswering",
- "SplinterForPreTraining",
- "SplinterLayer",
- "SplinterModel",
- "SplinterPreTrainedModel"
- ]
- }
- },
- "info.art.luke": {
- "*": {
- "repo": "studio-ousia/luke-base",
- "pkg": {
- "0": {
- "transformers": "LukeModel"
- }
- },
- "tasks": [
- "LukeForEntityClassification",
- "LukeForEntityPairClassification",
- "LukeForEntitySpanClassification",
- "LukeForMultipleChoice",
- "LukeForQuestionAnswering",
- "LukeForSequenceClassification",
- "LukeForTokenClassification",
- "LukeForMaskedLM",
- "LukeModel",
- "LukePreTrainedModel"
- ]
- }
- },
- "info.art.glm-4-chat": {
- "*": {
- "repo": "zai-org/glm-4-9b-chat",
- "pkg": {
- "0": {
- "transformers": "GlmModel"
- }
- },
- "tasks": [
- "GlmPreTrainedModel",
- "GlmModel",
- "GlmForCausalLM",
- "GlmForSequenceClassification",
- "GlmForTokenClassification"
- ]
- }
- },
- "info.vit.vit-msn": {
- "*": {
- "repo": "facebook/vit-msn-base",
- "pkg": {
- "0": {
- "transformers": "ViTMSNModel"
- }
- },
- "tasks": [
- "ViTMSNModel",
- "ViTMSNForImageClassification",
- "ViTMSNPreTrainedModel"
- ]
- }
- },
- "info.cnn.yolos": {
- "*": {
- "repo": "hustvl/yolos-base",
- "pkg": {
- "0": {
- "transformers": "YolosModel"
- }
- },
- "tasks": [
- "YolosForObjectDetection",
- "YolosModel",
- "YolosPreTrainedModel"
- ]
- }
- },
- "info.vit.vitpose-simple": {
- "*": {
- "repo": "usyd-community/vitpose-base-simple",
- "pkg": {
- "0": {
- "transformers": "VitPoseBackbone"
- }
- },
- "tasks": [
- "VitPoseBackbonePreTrainedModel",
- "VitPoseBackbone"
- ]
- }
- },
- "info.aet.blip2-opt": {
- "*": {
- "repo": "Salesforce/blip2-opt-2.7b",
- "pkg": {
- "0": {
- "transformers": "Blip2QFormerModel"
- }
- },
- "tasks": [
- "Blip2Model",
- "Blip2VisionModelWithProjection",
- "Blip2QFormerModel",
- "Blip2PreTrainedModel",
- "Blip2ForConditionalGeneration",
- "Blip2ForImageTextRetrieval",
- "Blip2VisionModel",
- "Blip2TextModelWithProjection"
- ]
- }
- },
- "info.stst.long-t5-local": {
- "*": {
- "repo": "google/long-t5-local-base",
- "pkg": {
- "0": {
- "transformers": "LongT5Model"
- }
- },
- "tasks": [
- "LongT5EncoderModel",
- "LongT5ForConditionalGeneration",
- "LongT5Model",
- "LongT5PreTrainedModel"
- ]
- }
- },
- "info.art.nemotron-3-hf": {
- "*": {
- "repo": "mgoin/nemotron-3-8b-chat-4k-sft-hf",
- "pkg": {
- "0": {
- "transformers": "NemotronModel"
- }
- },
- "tasks": [
- "NemotronForQuestionAnswering",
- "NemotronForCausalLM",
- "NemotronModel",
- "NemotronPreTrainedModel",
- "NemotronForSequenceClassification",
- "NemotronForTokenClassification"
- ]
- }
- },
- "info.aet.wav2vec2-bert-rel-pos": {
- "*": {
- "repo": "facebook/w2v-bert-2.0",
- "pkg": {
- "0": {
- "transformers": "Wav2Vec2BertModel"
- }
- },
- "tasks": [
- "Wav2Vec2BertForAudioFrameClassification",
- "Wav2Vec2BertForCTC",
- "Wav2Vec2BertForSequenceClassification",
- "Wav2Vec2BertForXVector",
- "Wav2Vec2BertModel",
- "Wav2Vec2BertPreTrainedModel"
- ]
- }
- },
- "info.art.trocr-handwritten": {
- "*": {
- "repo": "microsoft/trocr-base-handwritten",
- "pkg": {
- "0": {
- "transformers": "TrOCRForCausalLM"
- }
- },
- "tasks": [
- "TrOCRForCausalLM",
- "TrOCRPreTrainedModel"
- ]
- }
- },
- "info.art.llama-2-hf": {
- "*": {
- "repo": "meta-llama/Llama-2-7b-hf",
- "pkg": {
- "0": {
- "transformers": "LlamaModel"
- }
- },
- "tasks": [
- "LlamaForCausalLM",
- "LlamaModel",
- "LlamaPreTrainedModel",
- "LlamaForSequenceClassification",
- "LlamaForQuestionAnswering",
- "LlamaForTokenClassification"
- ]
- }
- },
- "info.rnn.recurrentgemma": {
- "*": {
- "repo": "google/recurrentgemma-2b",
- "pkg": {
- "0": {
- "transformers": "RecurrentGemmaModel"
- }
- },
- "tasks": [
- "RecurrentGemmaForCausalLM",
- "RecurrentGemmaModel",
- "RecurrentGemmaPreTrainedModel"
- ]
- }
- },
- "info.stst.led-16384": {
- "*": {
- "repo": "allenai/led-base-16384",
- "pkg": {
- "0": {
- "transformers": "LEDModel"
- }
- },
- "tasks": [
- "LEDForConditionalGeneration",
- "LEDForQuestionAnswering",
- "LEDForSequenceClassification",
- "LEDModel",
- "LEDPreTrainedModel"
- ]
- }
- },
- "info.detr.dfine-x-coco": {
- "*": {
- "repo": "ustc-community/dfine-xlarge-coco",
- "pkg": {
- "0": {
- "transformers": "DFineModel"
- }
- },
- "tasks": [
- "DFineModel",
- "DFinePreTrainedModel",
- "DFineForObjectDetection"
- ]
- }
- },
- "info.vit.vjepa2-vitl-fpc64-256": {
- "*": {
- "repo": "facebook/vjepa2-vitl-fpc64-256",
- "pkg": {
- "0": {
- "transformers": "VJEPA2Model"
- }
- },
- "tasks": [
- "VJEPA2Model",
- "VJEPA2PreTrainedModel",
- "VJEPA2ForVideoClassification"
- ]
- }
- },
- "info.aet.lightglue-superpoint": {
- "*": {
- "repo": "ETH-CVG/lightglue_superpoint",
- "pkg": {
- "0": {
- "transformers": "LightGlueForKeypointMatching"
- }
- },
- "tasks": [
- "LightGluePreTrainedModel",
- "LightGlueForKeypointMatching"
- ]
- }
- },
- "info.ssm.mamba": {
- "*": {
- "repo": "state-spaces/mamba-2.8b",
- "pkg": {
- "0": {
- "transformers": "MambaModel"
- }
- },
- "tasks": [
- "MambaForCausalLM",
- "MambaModel",
- "MambaPreTrainedModel",
- "MambaCache"
- ]
- }
- },
- "info.vit.idefics2": {
- "*": {
- "repo": "HuggingFaceM4/idefics2-8b",
- "pkg": {
- "0": {
- "transformers": "Idefics2Model"
- }
- },
- "tasks": [
- "Idefics2ForConditionalGeneration",
- "Idefics2PreTrainedModel",
- "Idefics2Model"
- ]
- }
- },
- "info.art.squeezebert-uncased": {
- "*": {
- "repo": "squeezebert/squeezebert-uncased",
- "pkg": {
- "0": {
- "transformers": "SqueezeBertModel"
- }
- },
- "tasks": [
- "SqueezeBertForMaskedLM",
- "SqueezeBertForMultipleChoice",
- "SqueezeBertForQuestionAnswering",
- "SqueezeBertForSequenceClassification",
- "SqueezeBertForTokenClassification",
- "SqueezeBertModel",
- "SqueezeBertModule",
- "SqueezeBertPreTrainedModel"
- ]
- }
- },
- "info.art.mms-tts-eng": {
- "*": {
- "repo": "facebook/mms-tts-eng",
- "pkg": {
- "0": {
- "transformers": "VitsModel"
- }
- },
- "tasks": [
- "VitsModel",
- "VitsPreTrainedModel"
- ]
- }
- },
- "info.aet.fastspeech2-conformer": {
- "*": {
- "repo": "espnet/fastspeech2_conformer",
- "pkg": {
- "0": {
- "transformers": "FastSpeech2ConformerModel"
- }
- },
- "tasks": [
- "FastSpeech2ConformerWithHifiGan",
- "FastSpeech2ConformerHifiGan",
- "FastSpeech2ConformerModel",
- "FastSpeech2ConformerPreTrainedModel"
- ]
- }
- },
- "info.vit.textnet": {
- "*": {
- "repo": "czczup/textnet-base",
- "pkg": {
- "0": {
- "transformers": "TextNetModel"
- }
- },
- "tasks": [
- "TextNetBackbone",
- "TextNetModel",
- "TextNetPreTrainedModel",
- "TextNetForImageClassification"
- ]
- }
- },
- "info.vit.depth": {
- "*": {
- "repo": "apple/DepthPro",
- "pkg": {
- "0": {
- "transformers": "DepthProModel"
- }
- },
- "tasks": [
- "DepthProPreTrainedModel",
- "DepthProModel",
- "DepthProForDepthEstimation"
- ]
- }
- },
- "info.stst.bigbird-pegasus-arxiv": {
- "*": {
- "repo": "google/bigbird-pegasus-large-arxiv",
- "pkg": {
- "0": {
- "transformers": "BigBirdPegasusModel"
- }
- },
- "tasks": [
- "BigBirdPegasusForCausalLM",
- "BigBirdPegasusForConditionalGeneration",
- "BigBirdPegasusForQuestionAnswering",
- "BigBirdPegasusForSequenceClassification",
- "BigBirdPegasusModel",
- "BigBirdPegasusPreTrainedModel"
- ]
- }
- },
- "info.stst.mt5": {
- "*": {
- "repo": "google/mt5-small",
- "pkg": {
- "0": {
- "transformers": "MT5Model"
- }
- },
- "identifiers": [
- [
- 250112,
- 2048
- ],
- "text_encoders.mt5xl.transformer.shared.weight"
- ],
- "file_256": [
- "0524484ec81425ba9deef6fac1393a78ba9b1c9bfed704a4be5f9c7255975cc1",
- "32f70f1d187e131a5fc3e4f0edc97ce89360d8e2f1d90177a443a05296097acc"
- ],
- "layer_b3": [
- "a1d616c37711ec7b9073d04734af2f5fd02f9035a322eb46efeace922e104c51"
- ],
- "layer_256": [
- "bd337daf0c1aa36896013109b406a0580aa3bb8ab9291d89df3015d737358e95",
- "2e40c48c96fc7df636aad96d3e78ed0ba9f68c3059e21b7fcf917f284c569a61"
- ],
- "tasks": [
- "MT5EncoderModel",
- "MT5ForConditionalGeneration",
- "MT5ForQuestionAnswering",
- "MT5ForSequenceClassification",
- "MT5ForTokenClassification",
- "MT5Model",
- "MT5PreTrainedModel"
- ]
- }
- },
- "info.vit.mobilenet-v2-1--224": {
- "*": {
- "repo": "google/mobilenet_v2_1.0_224",
- "pkg": {
- "0": {
- "transformers": "MobileNetV2Model"
- }
- },
- "tasks": [
- "MobileNetV2ForImageClassification",
- "MobileNetV2ForSemanticSegmentation",
- "MobileNetV2Model",
- "MobileNetV2PreTrainedModel"
- ]
- }
- },
- "info.vit.paligemma": {
- "*": {
- "repo": "google/paligemma2-3b-mix-224",
- "pkg": {
- "0": {
- "transformers": "PaliGemmaModel"
- }
- },
- "tasks": [
- "PaliGemmaForConditionalGeneration",
- "PaliGemmaPreTrainedModel",
- "PaliGemmaModel"
- ]
- }
- },
- "info.stst.speecht5-asr": {
- "*": {
- "repo": "microsoft/speecht5_asr",
- "pkg": {
- "0": {
- "transformers": "SpeechT5Model"
- }
- },
- "tasks": [
- "SpeechT5ForSpeechToText",
- "SpeechT5ForSpeechToSpeech",
- "SpeechT5ForTextToSpeech",
- "SpeechT5Model",
- "SpeechT5PreTrainedModel",
- "SpeechT5HifiGan"
- ]
- }
- },
- "info.art.conv-bert": {
- "*": {
- "repo": "YituTech/conv-bert-base",
- "pkg": {
- "0": {
- "transformers": "ConvBertModel"
- }
- },
- "tasks": [
- "ConvBertForMaskedLM",
- "ConvBertForMultipleChoice",
- "ConvBertForQuestionAnswering",
- "ConvBertForSequenceClassification",
- "ConvBertForTokenClassification",
- "ConvBertLayer",
- "ConvBertModel",
- "ConvBertPreTrainedModel"
- ]
- }
- },
- "info.ssm.mamba2": {
- "*": {
- "repo": "AntonV/mamba2-2.7b-hf",
- "pkg": {
- "0": {
- "transformers": "Mamba2Model"
- }
- },
- "tasks": [
- "Mamba2ForCausalLM",
- "Mamba2Model",
- "Mamba2PreTrainedModel"
- ]
- }
- },
- "info.art.c4ai-command-r-12-2024": {
- "*": {
- "repo": "CohereLabs/c4ai-command-r7b-12-2024",
- "pkg": {
- "0": {
- "transformers": "Cohere2Model"
- }
- },
- "tasks": [
- "Cohere2ForCausalLM",
- "Cohere2Model",
- "Cohere2PreTrainedModel"
- ]
- }
- },
- "info.vit.regnet-y-040": {
- "*": {
- "repo": "facebook/regnet-y-040",
- "pkg": {
- "0": {
- "transformers": "RegNetModel"
- }
- },
- "tasks": [
- "RegNetForImageClassification",
- "RegNetModel",
- "RegNetPreTrainedModel"
- ]
- }
- },
- "info.aet.xmod": {
- "*": {
- "repo": "facebook/xmod-base",
- "pkg": {
- "0": {
- "transformers": "XmodModel"
- }
- },
- "tasks": [
- "XmodForCausalLM",
- "XmodForMaskedLM",
- "XmodForMultipleChoice",
- "XmodForQuestionAnswering",
- "XmodForSequenceClassification",
- "XmodForTokenClassification",
- "XmodModel",
- "XmodPreTrainedModel"
- ]
- }
- },
- "info.stst.bart": {
- "*": {
- "repo": "facebook/bart-large",
- "pkg": {
- "0": {
- "transformers": "BartModel"
- }
- },
- "tasks": [
- "BartForCausalLM",
- "BartForConditionalGeneration",
- "BartForQuestionAnswering",
- "BartForSequenceClassification",
- "BartModel",
- "BartPreTrainedModel",
- "BartPretrainedModel",
- "PretrainedBartModel"
- ]
- }
- },
- "info.detr.table-transformer-detection": {
- "*": {
- "repo": "microsoft/table-transformer-detection",
- "pkg": {
- "0": {
- "transformers": "TableTransformerModel"
- }
- },
- "tasks": [
- "TableTransformerForObjectDetection",
- "TableTransformerModel",
- "TableTransformerPreTrainedModel"
- ]
- }
- },
- "info.vit.resnet-50": {
- "*": {
- "repo": "microsoft/resnet-50",
- "pkg": {
- "0": {
- "transformers": "ResNetModel"
- }
- },
- "tasks": [
- "ResNetForImageClassification",
- "ResNetModel",
- "ResNetPreTrainedModel",
- "ResNetBackbone"
- ]
- }
- },
- "info.art.bert-uncased": {
- "*": {
- "repo": "google-bert/bert-base-uncased",
- "pkg": {
- "0": {
- "transformers": "QDQBertModel"
- }
- },
- "tasks": [
- "QDQBertForMaskedLM",
- "QDQBertForMultipleChoice",
- "QDQBertForNextSentencePrediction",
- "QDQBertForQuestionAnswering",
- "QDQBertForSequenceClassification",
- "QDQBertForTokenClassification",
- "QDQBertLayer",
- "QDQBertLMHeadModel",
- "QDQBertModel",
- "QDQBertPreTrainedModel"
- ]
- }
- },
- "info.stst.voxtral-2507": {
- "*": {
- "repo": "mistralai/Voxtral-Mini-3B-2507",
- "pkg": {
- "0": {
- "transformers": "VoxtralForConditionalGeneration"
- }
- },
- "tasks": [
- "VoxtralPreTrainedModel",
- "VoxtralEncoder",
- "VoxtralForConditionalGeneration"
- ]
- }
- },
- "info.vit.pixtral": {
- "*": {
- "repo": "mistralai/Pixtral-12B-Base-2409",
- "pkg": {
- "0": {
- "transformers": "PixtralVisionModel"
- }
- },
- "tasks": [
- "PixtralVisionModel",
- "PixtralPreTrainedModel"
- ]
- }
- },
- "info.aet.whisper": {
- "*": {
- "repo": "openai/whisper-tiny",
- "pkg": {
- "0": {
- "transformers": "WhisperModel"
- }
- },
- "tasks": [
- "WhisperForCausalLM",
- "WhisperForConditionalGeneration",
- "WhisperModel",
- "WhisperPreTrainedModel",
- "WhisperForAudioClassification"
- ]
- }
- },
- "info.detr.rtdetr-r18vd": {
- "*": {
- "repo": "PekingU/rtdetr_r18vd",
- "pkg": {
- "0": {
- "transformers": "RTDetrV2Model"
- }
- },
- "tasks": [
- "RTDetrV2Model",
- "RTDetrV2PreTrainedModel",
- "RTDetrV2ForObjectDetection"
- ]
- }
- },
- "info.aet.wavlm": {
- "*": {
- "repo": "microsoft/wavlm-base",
- "pkg": {
- "0": {
- "transformers": "WavLMModel"
- }
- },
- "tasks": [
- "WavLMForAudioFrameClassification",
- "WavLMForCTC",
- "WavLMForSequenceClassification",
- "WavLMForXVector",
- "WavLMModel",
- "WavLMPreTrainedModel"
- ]
- }
- },
- "info.vit.beit-patch16-224-pt": {
- "*": {
- "repo": "microsoft/beit-base-patch16-224-pt22k",
- "pkg": {
- "0": {
- "transformers": "BeitModel"
- }
- },
- "tasks": [
- "BeitForImageClassification",
- "BeitForMaskedImageModeling",
- "BeitForSemanticSegmentation",
- "BeitModel",
- "BeitPreTrainedModel",
- "BeitBackbone"
- ]
- }
- },
- "info.vit.blip-vqa": {
- "*": {
- "repo": "Salesforce/blip-vqa-base",
- "pkg": {
- "0": {
- "transformers": "BlipModel"
- }
- },
- "tasks": [
- "BlipModel",
- "BlipPreTrainedModel",
- "BlipForConditionalGeneration",
- "BlipForQuestionAnswering",
- "BlipVisionModel",
- "BlipTextModel",
- "BlipForImageTextRetrieval"
- ]
- }
- },
- "info.art.flaubert-uncased": {
- "*": {
- "repo": "flaubert/flaubert_base_uncased",
- "pkg": {
- "0": {
- "transformers": "FlaubertModel"
- }
- },
- "tasks": [
- "FlaubertForMultipleChoice",
- "FlaubertForQuestionAnswering",
- "FlaubertForQuestionAnsweringSimple",
- "FlaubertForSequenceClassification",
- "FlaubertForTokenClassification",
- "FlaubertModel",
- "FlaubertWithLMHeadModel",
- "FlaubertPreTrainedModel"
- ]
- }
- },
- "info.art.transfo-xl-wt103": {
- "*": {
- "repo": "transfo-xl/transfo-xl-wt103",
- "pkg": {
- "0": {
- "transformers": "TransfoXLModel"
- }
- },
- "tasks": [
- "AdaptiveEmbedding",
- "TransfoXLForSequenceClassification",
- "TransfoXLLMHeadModel",
- "TransfoXLModel",
- "TransfoXLPreTrainedModel"
- ]
- }
- },
- "info.art.ernie-45-pt": {
- "*": {
- "repo": "baidu/ERNIE-4.5-0.3B-PT",
- "pkg": {
- "0": {
- "transformers": "Ernie4_5Model"
- }
- },
- "tasks": [
- "Ernie4_5ForCausalLM",
- "Ernie4_5Model",
- "Ernie4_5PreTrainedModel"
- ]
- }
- },
- "info.stst.moonshine": {
- "*": {
- "repo": "UsefulSensors/moonshine-tiny",
- "pkg": {
- "0": {
- "transformers": "MoonshineModel"
- }
- },
- "tasks": [
- "MoonshineModel",
- "MoonshinePreTrainedModel",
- "MoonshineForConditionalGeneration"
- ]
- }
- },
- "info.detr.tvp": {
- "*": {
- "repo": "Intel/tvp-base",
- "pkg": {
- "0": {
- "transformers": "TvpModel"
- }
- },
- "tasks": [
- "TvpModel",
- "TvpPreTrainedModel",
- "TvpForVideoGrounding"
- ]
- }
- },
- "info.art.biogpt": {
- "*": {
- "repo": "microsoft/biogpt",
- "pkg": {
- "0": {
- "transformers": "BioGptModel"
- }
- },
- "tasks": [
- "BioGptForCausalLM",
- "BioGptForTokenClassification",
- "BioGptForSequenceClassification",
- "BioGptModel",
- "BioGptPreTrainedModel"
- ]
- }
- },
- "info.art.opt": {
- "*": {
- "repo": "facebook/opt-350m",
- "pkg": {
- "0": {
- "transformers": "OPTModel"
- }
- },
- "tasks": [
- "OPTForCausalLM",
- "OPTModel",
- "OPTPreTrainedModel",
- "OPTForSequenceClassification",
- "OPTForQuestionAnswering"
- ]
- }
- },
- "info.vit.ijepa-vith14": {
- "*": {
- "repo": "facebook/ijepa_vith14_1k",
- "pkg": {
- "0": {
- "transformers": "IJepaModel"
- }
- },
- "tasks": [
- "IJepaPreTrainedModel",
- "IJepaModel",
- "IJepaForImageClassification"
- ]
- }
- },
- "info.vit.van": {
- "*": {
- "repo": "Visual-Attention-Network/van-base",
- "pkg": {
- "0": {
- "transformers": "VanModel"
- }
- },
- "tasks": [
- "VanForImageClassification",
- "VanModel",
- "VanPreTrainedModel"
- ]
- }
- },
- "info.vit.blip2-opt": {
- "*": {
- "repo": "Salesforce/blip2-opt-2.7b",
- "pkg": {
- "0": {
- "transformers": "Blip2Model"
- }
- },
- "tasks": [
- "Blip2Model",
- "Blip2VisionModelWithProjection",
- "Blip2QFormerModel",
- "Blip2PreTrainedModel",
- "Blip2ForConditionalGeneration",
- "Blip2ForImageTextRetrieval",
- "Blip2VisionModel",
- "Blip2TextModelWithProjection"
- ]
- }
- },
- "info.vit.poolformer-s12": {
- "*": {
- "repo": "sail/poolformer_s12",
- "pkg": {
- "0": {
- "transformers": "PoolFormerModel"
- }
- },
- "tasks": [
- "PoolFormerForImageClassification",
- "PoolFormerModel",
- "PoolFormerPreTrainedModel"
- ]
- }
- },
- "info.stst.gptsan-japanese": {
- "*": {
- "repo": "Tanrei/GPTSAN-japanese",
- "pkg": {
- "0": {
- "transformers": "GPTSanJapaneseForConditionalGeneration"
- }
- },
- "tasks": [
- "GPTSanJapaneseForConditionalGeneration",
- "GPTSanJapaneseModel",
- "GPTSanJapanesePreTrainedModel"
- ]
- }
- },
- "info.art.ernie-4-a-pt": {
- "*": {
- "repo": "baidu/ERNIE-4.5-21B-A3B-PT",
- "pkg": {
- "0": {
- "transformers": "Ernie4_5_MoeModel"
- }
- },
- "tasks": [
- "Ernie4_5_MoeForCausalLM",
- "Ernie4_5_MoeModel",
- "Ernie4_5_MoePreTrainedModel"
- ]
- }
- },
- "info.vit.swiftformer-xs": {
- "*": {
- "repo": "MBZUAI/swiftformer-xs",
- "pkg": {
- "0": {
- "transformers": "SwiftFormerModel"
- }
- },
- "tasks": [
- "SwiftFormerForImageClassification",
- "SwiftFormerModel",
- "SwiftFormerPreTrainedModel"
- ]
- }
- },
- "info.aet.dpr-question-encoder-single-nq": {
- "*": {
- "repo": "facebook/dpr-question_encoder-single-nq-base",
- "pkg": {
- "0": {
- "transformers": "DPRQuestionEncoder"
- }
- },
- "tasks": [
- "DPRContextEncoder",
- "DPRPretrainedContextEncoder",
- "DPRPreTrainedModel",
- "DPRPretrainedQuestionEncoder",
- "DPRPretrainedReader",
- "DPRQuestionEncoder",
- "DPRReader"
- ]
- }
- },
- "info.stst.mbart-cc25": {
- "*": {
- "repo": "facebook/mbart-large-cc25",
- "pkg": {
- "0": {
- "transformers": "MBartModel"
- }
- },
- "tasks": [
- "MBartForCausalLM",
- "MBartForConditionalGeneration",
- "MBartForQuestionAnswering",
- "MBartForSequenceClassification",
- "MBartModel",
- "MBartPreTrainedModel"
- ]
- }
- },
- "info.vit.idefics": {
- "*": {
- "repo": "HuggingFaceM4/idefics-9b",
- "pkg": {
- "0": {
- "transformers": "IdeficsModel"
- }
- },
- "tasks": [
- "IdeficsForVisionText2Text",
- "IdeficsModel",
- "IdeficsPreTrainedModel"
- ]
- }
- },
- "info.art.diffllama-handcut": {
- "*": {
- "repo": "kajuma/DiffLlama-0.3B-handcut",
- "pkg": {
- "0": {
- "transformers": "DiffLlamaModel"
- }
- },
- "tasks": [
- "DiffLlamaPreTrainedModel",
- "DiffLlamaModel",
- "DiffLlamaForCausalLM",
- "DiffLlamaForSequenceClassification",
- "DiffLlamaForQuestionAnswering",
- "DiffLlamaForTokenClassification"
- ]
- }
- },
- "info.moe.doge": {
- "*": {
- "repo": "SmallDoge/Doge-320M",
- "pkg": {
- "0": {
- "transformers": "DogeModel"
- }
- },
- "tasks": [
- "DogeForCausalLM",
- "DogeModel",
- "DogePreTrainedModel",
- "DogeForSequenceClassification"
- ]
- }
- },
- "info.vit.siglip-so-patch14-384": {
- "*": {
- "repo": "google/siglip-so400m-patch14-384",
- "pkg": {
- "0": {
- "transformers": "SmolVLMVisionTransformer"
- }
- },
- "tasks": [
- "SmolVLMForConditionalGeneration",
- "SmolVLMPreTrainedModel",
- "SmolVLMModel",
- "SmolVLMVisionTransformer"
- ]
- }
- },
- "info.moe.qwen3-a": {
- "*": {
- "repo": "Qwen/Qwen3-30B-A3B",
- "pkg": {
- "0": {
- "transformers": "Qwen3MoeModel"
- }
- },
- "file_256": [
- "c56947057481fb5e7cdf766e442da81717b34addc88bbe8f3728fd25bd03cbae"
- ],
- "layer_b3": [
- "d2d1e0875202f5c9c84c781a2105620250733bd01832f67b2c17bc981d1eb508"
- ],
- "layer_256": [
- "408c01da57c4968b7b0e36d98a74e321153e7aeb058fea63ffd140e323526476"
- ],
- "tasks": [
- "Qwen3MoeForCausalLM",
- "Qwen3MoeForQuestionAnswering",
- "Qwen3MoeModel",
- "Qwen3MoePreTrainedModel",
- "Qwen3MoeForSequenceClassification",
- "Qwen3MoeForTokenClassification"
- ]
- }
- },
- "info.vit.clip-vit-patch32": {
- "*": {
- "repo": "openai/clip-vit-base-patch32",
- "pkg": {
- "0": {
- "transformers": "CLIPTextModel"
- }
- },
- "tasks": [
- "CLIPModel",
- "CLIPPreTrainedModel",
- "CLIPTextModel",
- "CLIPTextModelWithProjection",
- "CLIPVisionModel",
- "CLIPVisionModelWithProjection",
- "CLIPForImageClassification"
- ]
- }
- },
- "info.vit.data2vec-vision": {
- "*": {
- "repo": "facebook/data2vec-vision-base",
- "pkg": {
- "0": {
- "transformers": "Data2VecVisionModel"
- }
- },
- "tasks": [
- "Data2VecVisionForImageClassification",
- "Data2VecVisionForSemanticSegmentation",
- "Data2VecVisionModel",
- "Data2VecVisionPreTrainedModel"
- ]
- }
- },
- "info.art.deberta-v2-x": {
- "*": {
- "repo": "microsoft/deberta-v2-xlarge",
- "pkg": {
- "0": {
- "transformers": "DebertaV2Model"
- }
- },
- "tasks": [
- "DebertaV2ForMaskedLM",
- "DebertaV2ForMultipleChoice",
- "DebertaV2ForQuestionAnswering",
- "DebertaV2ForSequenceClassification",
- "DebertaV2ForTokenClassification",
- "DebertaV2Model",
- "DebertaV2PreTrainedModel"
- ]
- }
- },
- "info.art.granite": {
- "*": {
- "repo": "ibm-granite/granite-3.3-2b-base",
- "pkg": {
- "0": {
- "transformers": "GraniteModel"
- }
- },
- "tasks": [
- "GraniteForCausalLM",
- "GraniteModel",
- "GranitePreTrainedModel"
- ]
- }
- },
- "info.art.aria": {
- "*": {
- "repo": "rhymes-ai/Aria",
- "pkg": {
- "0": {
- "transformers": "AriaTextModel"
- }
- },
- "tasks": [
- "AriaForConditionalGeneration",
- "AriaPreTrainedModel",
- "AriaTextPreTrainedModel",
- "AriaTextModel",
- "AriaModel",
- "AriaTextForCausalLM"
- ]
- }
- },
- "info.vit.fuyu": {
- "*": {
- "repo": "adept/fuyu-8b",
- "pkg": {
- "0": {
- "transformers": "FuyuModel"
- }
- },
- "tasks": [
- "FuyuForCausalLM",
- "FuyuPreTrainedModel",
- "FuyuModel"
- ]
- }
- },
- "info.vit.donut": {
- "*": {
- "repo": "naver-clova-ix/donut-base",
- "pkg": {
- "0": {
- "transformers": "DonutSwinModel"
- }
- },
- "tasks": [
- "DonutSwinModel",
- "DonutSwinPreTrainedModel",
- "DonutSwinForImageClassification"
- ]
- }
- },
- "info.vit.internvl3-hf": {
- "*": {
- "repo": "OpenGVLab/InternVL3-1B-hf",
- "pkg": {
- "0": {
- "transformers": "InternVLModel"
- }
- },
- "tasks": [
- "InternVLVisionPreTrainedModel",
- "InternVLVisionModel",
- "InternVLPreTrainedModel",
- "InternVLModel",
- "InternVLForConditionalGeneration"
- ]
- }
- },
- "info.vit.bit-50": {
- "*": {
- "repo": "google/bit-50",
- "pkg": {
- "0": {
- "transformers": "BitModel"
- }
- },
- "tasks": [
- "BitForImageClassification",
- "BitModel",
- "BitPreTrainedModel",
- "BitBackbone"
- ]
- }
- },
- "info.vit.convnext-224": {
- "*": {
- "repo": "facebook/convnext-tiny-224",
- "pkg": {
- "0": {
- "transformers": "ConvNextModel"
- }
- },
- "tasks": [
- "ConvNextForImageClassification",
- "ConvNextModel",
- "ConvNextPreTrainedModel",
- "ConvNextBackbone"
- ]
- }
- },
- "info.art.fnet": {
- "*": {
- "repo": "google/fnet-base",
- "pkg": {
- "0": {
- "transformers": "FNetModel"
- }
- },
- "tasks": [
- "FNetForMaskedLM",
- "FNetForMultipleChoice",
- "FNetForNextSentencePrediction",
- "FNetForPreTraining",
- "FNetForQuestionAnswering",
- "FNetForSequenceClassification",
- "FNetForTokenClassification",
- "FNetLayer",
- "FNetModel",
- "FNetPreTrainedModel"
- ]
- }
- },
- "info.vit.llava-onevision-qwen2-ov-hf": {
- "*": {
- "repo": "llava-hf/llava-onevision-qwen2-7b-ov-hf",
- "pkg": {
- "0": {
- "transformers": "LlavaOnevisionModel"
- }
- },
- "tasks": [
- "LlavaOnevisionModel",
- "LlavaOnevisionForConditionalGeneration",
- "LlavaOnevisionPreTrainedModel"
- ]
- }
- },
- "info.stst.pegasus-x": {
- "*": {
- "repo": "google/pegasus-x-large",
- "pkg": {
- "0": {
- "transformers": "PegasusXModel"
- }
- },
- "tasks": [
- "PegasusXForConditionalGeneration",
- "PegasusXModel",
- "PegasusXPreTrainedModel"
- ]
- }
- },
- "info.vit.swin2sr-classicalsr-x2-64": {
- "*": {
- "repo": "caidas/swin2sr-classicalsr-x2-64",
- "pkg": {
- "0": {
- "transformers": "Swin2SRModel"
- }
- },
- "tasks": [
- "Swin2SRForImageSuperResolution",
- "Swin2SRModel",
- "Swin2SRPreTrainedModel"
- ]
- }
- },
- "info.stst.blenderbot": {
- "*": {
- "repo": "facebook/blenderbot-3B",
- "pkg": {
- "0": {
- "transformers": "BlenderbotModel"
- }
- },
- "tasks": [
- "BlenderbotForCausalLM",
- "BlenderbotForConditionalGeneration",
- "BlenderbotModel",
- "BlenderbotPreTrainedModel"
- ]
- }
- },
- "info.vit.clipseg-rd64": {
- "*": {
- "repo": "CIDAS/clipseg-rd64",
- "pkg": {
- "0": {
- "transformers": "CLIPSegModel"
- }
- },
- "tasks": [
- "CLIPSegModel",
- "CLIPSegPreTrainedModel",
- "CLIPSegTextModel",
- "CLIPSegVisionModel",
- "CLIPSegForImageSegmentation"
- ]
- }
- },
- "info.moe.jetmoe": {
- "*": {
- "repo": "jetmoe/jetmoe-8b",
- "pkg": {
- "0": {
- "transformers": "JetMoeModel"
- }
- },
- "tasks": [
- "JetMoeForCausalLM",
- "JetMoeModel",
- "JetMoePreTrainedModel",
- "JetMoeForSequenceClassification"
- ]
- }
- },
- "info.art.mobilebert-uncased": {
- "*": {
- "repo": "google/mobilebert-uncased",
- "pkg": {
- "0": {
- "transformers": "MobileBertModel"
- }
- },
- "tasks": [
- "MobileBertForMaskedLM",
- "MobileBertForMultipleChoice",
- "MobileBertForNextSentencePrediction",
- "MobileBertForPreTraining",
- "MobileBertForQuestionAnswering",
- "MobileBertForSequenceClassification",
- "MobileBertForTokenClassification",
- "MobileBertLayer",
- "MobileBertModel",
- "MobileBertPreTrainedModel"
- ]
- }
- },
- "info.vit.groupvit-gcc-yfcc": {
- "*": {
- "repo": "nvidia/groupvit-gcc-yfcc",
- "pkg": {
- "0": {
- "transformers": "GroupViTModel"
- }
- },
- "tasks": [
- "GroupViTModel",
- "GroupViTPreTrainedModel",
- "GroupViTTextModel",
- "GroupViTVisionModel"
- ]
- }
- },
- "info.aet.ibert-roberta": {
- "*": {
- "repo": "kssteven/ibert-roberta-base",
- "pkg": {
- "0": {
- "transformers": "IBertModel"
- }
- },
- "tasks": [
- "IBertForMaskedLM",
- "IBertForMultipleChoice",
- "IBertForQuestionAnswering",
- "IBertForSequenceClassification",
- "IBertForTokenClassification",
- "IBertModel",
- "IBertPreTrainedModel"
- ]
- }
- },
- "info.stst.pop2piano": {
- "*": {
- "repo": "sweetcocoa/pop2piano",
- "pkg": {
- "0": {
- "transformers": "Pop2PianoForConditionalGeneration"
- }
- },
- "tasks": [
- "Pop2PianoForConditionalGeneration",
- "Pop2PianoPreTrainedModel"
- ]
- }
- },
- "info.moe.deepseek-v3": {
- "*": {
- "repo": "bzantium/tiny-deepseek-v3",
- "pkg": {
- "0": {
- "transformers": "DeepseekV3Model"
- }
- },
- "tasks": [
- "DeepseekV3PreTrainedModel",
- "DeepseekV3Model",
- "DeepseekV3ForCausalLM"
- ]
- }
- },
- "info.art.nystromformer-512": {
- "*": {
- "repo": "uw-madison/nystromformer-512",
- "pkg": {
- "0": {
- "transformers": "NystromformerModel"
- }
- },
- "tasks": [
- "NystromformerForMaskedLM",
- "NystromformerForMultipleChoice",
- "NystromformerForQuestionAnswering",
- "NystromformerForSequenceClassification",
- "NystromformerForTokenClassification",
- "NystromformerLayer",
- "NystromformerModel",
- "NystromformerPreTrainedModel"
- ]
- }
- },
- "info.vit.sam-hq-vit-huge": {
- "*": {
- "repo": "syscv-community/sam-hq-vit-huge",
- "pkg": {
- "0": {
- "transformers": "SamHQVisionModel"
- }
- },
- "tasks": [
- "SamHQModel",
- "SamHQPreTrainedModel",
- "SamHQVisionModel"
- ]
- }
- },
- "info.aet.qwen2-audio": {
- "*": {
- "repo": "Qwen/Qwen2-Audio-7B",
- "pkg": {
- "0": {
- "transformers": "Qwen2AudioEncoder"
- }
- },
- "tasks": [
- "Qwen2AudioForConditionalGeneration",
- "Qwen2AudioPreTrainedModel",
- "Qwen2AudioEncoder"
- ]
- }
- },
- "info.aet.ernie-m--pytorch": {
- "*": {
- "repo": "susnato/ernie-m-base_pytorch",
- "pkg": {
- "0": {
- "transformers": "ErnieMModel"
- }
- },
- "tasks": [
- "ErnieMForMultipleChoice",
- "ErnieMForQuestionAnswering",
- "ErnieMForSequenceClassification",
- "ErnieMForTokenClassification",
- "ErnieMModel",
- "ErnieMPreTrainedModel",
- "ErnieMForInformationExtraction"
- ]
- }
- },
- "info.art.imagegpt": {
- "*": {
- "repo": "openai/imagegpt-small",
- "pkg": {
- "0": {
- "transformers": "ImageGPTModel"
- }
- },
- "tasks": [
- "ImageGPTForCausalImageModeling",
- "ImageGPTForImageClassification",
- "ImageGPTModel",
- "ImageGPTPreTrainedModel"
- ]
- }
- },
- "info.vit.got-ocr-2-hf": {
- "*": {
- "repo": "stepfun-ai/GOT-OCR-2.0-hf",
- "pkg": {
- "0": {
- "transformers": "GotOcr2Model"
- }
- },
- "tasks": [
- "GotOcr2PreTrainedModel",
- "GotOcr2Model",
- "GotOcr2ForConditionalGeneration"
- ]
- }
- },
- "info.art.gpt-j": {
- "*": {
- "repo": "EleutherAI/gpt-j-6B",
- "pkg": {
- "0": {
- "transformers": "GPTJModel"
- }
- },
- "tasks": [
- "GPTJForCausalLM",
- "GPTJForQuestionAnswering",
- "GPTJForSequenceClassification",
- "GPTJModel",
- "GPTJPreTrainedModel"
- ]
- }
- },
- "info.art.stablelm-4e1t": {
- "*": {
- "repo": "stabilityai/stablelm-3b-4e1t",
- "pkg": {
- "0": {
- "transformers": "StableLmModel"
- }
- },
- "tasks": [
- "StableLmForCausalLM",
- "StableLmModel",
- "StableLmPreTrainedModel",
- "StableLmForSequenceClassification",
- "StableLmForTokenClassification"
- ]
- }
- },
- "info.aet.hubert-ls960": {
- "*": {
- "repo": "facebook/hubert-base-ls960",
- "pkg": {
- "0": {
- "transformers": "HubertModel"
- }
- },
- "tasks": [
- "HubertForCTC",
- "HubertForSequenceClassification",
- "HubertModel",
- "HubertPreTrainedModel"
- ]
- }
- },
- "info.art.mpt": {
- "*": {
- "repo": "mosaicml/mpt-7b",
- "pkg": {
- "0": {
- "transformers": "MptModel"
- }
- },
- "tasks": [
- "MptForCausalLM",
- "MptModel",
- "MptPreTrainedModel",
- "MptForSequenceClassification",
- "MptForTokenClassification",
- "MptForQuestionAnswering"
- ]
- }
- },
- "info.lstm.xlstm": {
- "*": {
- "repo": "NX-AI/xLSTM-7b",
- "pkg": {
- "0": {
- "transformers": "xLSTMModel"
- }
- },
- "tasks": [
- "xLSTMForCausalLM",
- "xLSTMModel",
- "xLSTMPreTrainedModel"
- ]
- }
- },
- "info.art.xglm": {
- "*": {
- "repo": "facebook/xglm-564M",
- "pkg": {
- "0": {
- "transformers": "XGLMModel"
- }
- },
- "tasks": [
- "XGLMForCausalLM",
- "XGLMModel",
- "XGLMPreTrainedModel"
- ]
- }
- },
- "info.art.afm": {
- "*": {
- "repo": "arcee-ai/AFM-4.5B",
- "pkg": {
- "0": {
- "transformers": "ArceeModel"
- }
- },
- "tasks": [
- "ArceeForCausalLM",
- "ArceeForQuestionAnswering",
- "ArceeForSequenceClassification",
- "ArceeForTokenClassification",
- "ArceeModel",
- "ArceePreTrainedModel"
- ]
- }
- },
- "info.vit.clap-htsat-fused": {
- "*": {
- "repo": "laion/clap-htsat-fused",
- "pkg": {
- "0": {
- "transformers": "ClapModel"
- }
- },
- "file_256": [
- "c92b5a2bee69ff5dd05820d9e0a5cddbc9c9b9dd19a6cb3214f0cf4f29a4d1b0",
- "ae69f555e7f1a2333b8e684c9fa8233f44a47bbadf76d484f941b74f74d2753d"
- ],
- "layer_b3": [
- "a4d26450ac399d51b9abbe37859615bb02a5cbf63521da4c7cdc549d04a2872c",
- "ddf310d8eb2d4e3f61e605978675a9d3a748cad9406b9aee8335eae013e77573"
- ],
- "layer_256": [
- "843ba86000971d6067bfc4f3ed6dd01bd6f6726188aaa15d86b05554f4fe8481",
- "27529e30442d030a28badf9d62710f4b74e38e9c4424ed169c7e0ac072f5a771"
- ],
- "tasks": [
- "ClapModel",
- "ClapPreTrainedModel",
- "ClapTextModel",
- "ClapTextModelWithProjection",
- "ClapAudioModel",
- "ClapAudioModelWithProjection"
- ]
- }
- },
- "info.ssm.zamba2": {
- "*": {
- "repo": "Zyphra/Zamba2-2.7B",
- "pkg": {
- "0": {
- "transformers": "Zamba2Model"
- }
- },
- "tasks": [
- "Zamba2ForCausalLM",
- "Zamba2ForSequenceClassification",
- "Zamba2Model",
- "Zamba2PreTrainedModel"
- ]
- }
- },
- "info.vit.perception-lm": {
- "*": {
- "repo": "facebook/Perception-LM-1B",
- "pkg": {
- "0": {
- "transformers": "PerceptionLMModel"
- }
- },
- "tasks": [
- "PerceptionLMForConditionalGeneration",
- "PerceptionLMPreTrainedModel",
- "PerceptionLMModel"
- ]
- }
- },
- "info.art.xlnet-cased": {
- "*": {
- "repo": "xlnet/xlnet-large-cased",
- "pkg": {
- "0": {
- "transformers": "XLNetModel"
- }
- },
- "tasks": [
- "XLNetForMultipleChoice",
- "XLNetForQuestionAnswering",
- "XLNetForQuestionAnsweringSimple",
- "XLNetForSequenceClassification",
- "XLNetForTokenClassification",
- "XLNetLMHeadModel",
- "XLNetModel",
- "XLNetPreTrainedModel"
- ]
- }
- },
- "info.stst.nllb-moe": {
- "*": {
- "repo": "facebook/nllb-moe-54b",
- "pkg": {
- "0": {
- "transformers": "NllbMoeModel"
- }
- },
- "tasks": [
- "NllbMoeForConditionalGeneration",
- "NllbMoeModel",
- "NllbMoePreTrainedModel",
- "NllbMoeTop2Router",
- "NllbMoeSparseMLP"
- ]
- }
- },
- "info.art.jukebox-lyrics": {
- "*": {
- "repo": "openai/jukebox-1b-lyrics",
- "pkg": {
- "0": {
- "transformers": "JukeboxModel"
- }
- },
- "tasks": [
- "JukeboxModel",
- "JukeboxPreTrainedModel",
- "JukeboxVQVAE",
- "JukeboxPrior"
- ]
- }
- },
- "info.art.mistral-v0": {
- "*": {
- "repo": "mistralai/Mistral-7B-v0.1",
- "pkg": {
- "0": {
- "transformers": "MistralModel"
- }
- },
- "tasks": [
- "MistralForCausalLM",
- "MistralForQuestionAnswering",
- "MistralModel",
- "MistralPreTrainedModel",
- "MistralForSequenceClassification",
- "MistralForTokenClassification"
- ]
- }
- },
- "info.vit.colpali-v1": {
- "*": {
- "repo": "vidore/colpali-v1.2",
- "pkg": {
- "0": {
- "transformers": "ColPaliForRetrieval"
- }
- },
- "tasks": [
- "ColPaliForRetrieval",
- "ColPaliPreTrainedModel"
- ]
- }
- },
- "info.aet.xlm-roberta-xl": {
- "*": {
- "repo": "facebook/xlm-roberta-xl",
- "pkg": {
- "0": {
- "transformers": "XLMRobertaXLModel"
- }
- },
- "tasks": [
- "XLMRobertaXLForCausalLM",
- "XLMRobertaXLForMaskedLM",
- "XLMRobertaXLForMultipleChoice",
- "XLMRobertaXLForQuestionAnswering",
- "XLMRobertaXLForSequenceClassification",
- "XLMRobertaXLForTokenClassification",
- "XLMRobertaXLModel",
- "XLMRobertaXLPreTrainedModel"
- ]
- }
- },
- "info.stst.t5": {
- "*": {
- "repo": "google-t5/t5-small",
- "pkg": {
- "0": {
- "transformers": "T5Model"
- }
- },
- "identifiers": [
- [
- 4096
- ],
- "encoder.embed_tokens.weight",
- "text_encoders.t5xxl.transformer.shared.weight",
- "t5xxl",
- "encoder.block.0.layer.1.DenseReluDense.wi.weight"
- ],
- "file_256": [
- "ec87bffd1923e8b2774a6d240c922a41f6143081d52cf83b8fe39e9d838c893e",
- "565cb2487351282e8e4dbeb88e63f4ad28217ce0439f5a8e6525a924807d2d9b",
- "6e480b09fae049a72d2a8c5fbccb8d3e92febeb233bbe9dfe7256958a9167635",
- "4f2751ceeb2a96edd693e539dc5d6bba0b8d3814f49a9b3798403a0cec4b2e3d",
- "83690f3cc37cecb5e907f41ab0f7abb0855ef24a0a8aab9259f2888ce85a34e2",
- "7d330da4816157540d6bb7838bf63a0f02f573fc48ca4d8de34bb0cbfd514f09",
- "8490f7a22615c20651a63dbe7b4241929826a4de20292dc8e63bfc3c61e3654f",
- "d8720addef2596fef86b1b22e4b62875c9118779ba8723759a75dfcbc649ffd5",
- "7d0eac95abe8daae454bcd3d166b8bfc6a35fe68278f97479d62dbb6850f38c0",
- "ceabd6f71c7112cfaa4dfca8711dda97b79fb9b25983f1c95532de226045f1f8",
- "49e139f50824fef40908ef4307c851e7adaa8b91bed44054c4829600dbedfdda",
- "211ade1d474f5dc83190aec8be5c4baf52643777790d64de0cbd84f63613e5e9",
- "7894547154ba3fd6e364e66e2951ee82b4c3fc1ae0f95df6a4f9d1c5a4e98f17",
- "eb529f693f4b17773a24e787fcba29486d5e1700dadcc20bb91e4c8b00212d08",
- "d80116f6fc39801e4eef425a584e7a7a41cbe5119797bef2dad67299909fe2ae",
- "31ebe18e901bfb6e5709a20ec1c95fce29bce2b9545073231e0f909a53239f5c",
- "6be2b0b7e2de7cf2919340c88cb802a103a997ce46c53131cec91958c1db1af4",
- "b51cbb10b1a7aac6dd1c3b62f0ed908bfd06e0b42d2f3577d43e061361f51dae",
- "9ec60f6028534b7fe5af439fcb535d75a68592a9ca3fcdeb175ef89e3ee99825",
- "8f5ab879234384235d56732f0cda07bf8801f30a49645248c5bfdeeb1665f64b",
- "86427a1f4dba48940e45bf78d6db5bf0d48fce8b4656f5aba27955f06af9628e",
- "88b696cfae098f03bb078cc5944ef03aec1e91ec020a6b016b723a0f0532558c",
- "1dc600961d3c5ed081f6700485cdc7ed9cfb4631f2dc385b7ac6bd3c80846d0d",
- "f28631189911f8d7931e8fe642a4cb2a3c51f50da7cabbfa06b89bafc19c00d0",
- "de9dfdd19d7ba6859993cadec5100665dc7a4fb71e1c6c8970959cbdaf4366e3",
- "7a68b2c8c080696a10109612a649bc69330991ecfea65930ccfdfbdb011f2686",
- "2c0c539ab8e8fba3877cc94bc483e427f74c525f817a809b028ebc8d96d75a94"
- ],
- "layer_b3": [
- "ca94e03b7b1fdcb0d6ff5205eac56f145d2dff8a9c489faf80935bfec8387f18",
- "c0e2b054bedd782909191b05748a88c28d1538fa91789fec63f036ba01dcc001",
- "672de9b79d14001de7d1109ffc52e4d0cccc3bfee6f45648fa347703b58e2b99",
- "abdb187a996c51cb0469630c124b14eeb0bb8f5f635aca6c71dea264f8bd61ae",
- "8926f862b7763fd9688af317eba7809aa71a478484be0c738c269de368ace4a7",
- "e616b754cf55e55b3f9f17ab7e1fff95f0607c81782822fc1223ae22fb1e9f36",
- "b79e5f1878a62cd726bb4f9fc1415cacb071d278440e9026290c7b36cb41e1d4",
- "77619d5278d9f547ddac17d4d99df56cb6a3a9e660ae31b2f896a4297907e62e",
- "c87c9d3cc7becc46ee34821299cf8551a6df5541582a45469a031bccdc4bd340",
- "7e6c32c01c89fc5d1610c410135aa9708e77a7444510e5e479fa677ff2b53643",
- "a49c2bc301733967ddff113790e301773dc5dd71368b657af4141458de593ced",
- "c2ea94030ea362e03d73d448fa5353ace0a449dc38c51a4a49fb148444ebb8ef",
- "4a90463350f08ef41479da1d561ab41b8f8b792f1603a092226a838156aebfb0",
- "f86cd0324eebbffb81b15ad47dc8b63fedfa51dc222e44e1a958a7becce2bcb0",
- "48c54c61c5f14e42761c6177539b2da3a22222516dab053952ca8d8e92f93d65",
- "311332d9738773669128814d944b1e860a8e3176b37abf43370bc06b43b454d0",
- "3f4e51dec6d542759cdea49b3bec14c090a4908f953fa3e182e2ea43b5b05402",
- "beb25461e168359108add77263ea5cc121b7584cc4aa304ffc4e134783bb1d88",
- "43313f90a359c8c1c787a7a833b1ab9f7a38204ba36d0ba587c658d0d9bf0852",
- "fa9e97cdad26f55fedab83a3f114e0338c9cca3ea2bf8f1b168a6dfc5919bf8e",
- "93108d67f8829a7e1e8f3773e9ce53c67f365889c2acfd69816ac80fd43f8e08",
- "fc65a6cc55e89394d7bc0fa4ee952d63ce3bdc143b84b5aa4bb3edf7722a6b83",
- "8163bc781a7e013dfeb806bbb828a36913cf119363ea5fcd9071d87a0c227cda",
- "ad2ba63e1134bad1b15ee339313bc130708b2995e8b4b76fb44d727f28c26ad9",
- "4a844772638ffed2f61d45eaac984094b92540fa1391a4098608fc73a6cd4fd8",
- "76c31e1fd35da7de7cee97c1e7c5ccde640e6fac3e17a62e115ecf484c7196c3",
- "a4d672e22b5bdd8f8b0885cec4a173d0466bb1dcbfbf8400cedcc41c2494f16c",
- "d1860c3f01dc9f260d98b50d3d2bbc8dc2d3eefaa93778a8de9d7adfb897fc6e",
- "b8719092fc58487406211f52dc55bf40b573ccfd29933a989c33a36b694f6f0a",
- "795e272409bc4fa55f402485acf86b607256f91aa965295c5bb771c61f8e9e74"
- ],
- "layer_256": [
- "bb20f7805209379aea4d6548f17e551cf27d0f8426ca169e4df8234f718ed5ef",
- "431580c2d86f9a9ed3500f776a4c997223e5644aed211f965354869ccfa4d76e",
- "2ccd548c4ffe34168c60779ebd497b9b410981a2fda813c8723a24a805c94ea0",
- "a608fc4e1cc9762e46187a1ce66e98e8ba4bc3a604cbfd96174bd876baea0fa1",
- "dc9e74cdf535e0b7a17e1335d0d8b38a00f94facf0cb01363baee09945a25278",
- "f07409710a69b2247aa4723a9b40d2225d5e5bfba7b60c51f0ea901fc2ef5ad9",
- "ed28f8b6cc472f352fc840b5a9f841ff17d76ae6918f0676464dca20529aa92b",
- "97c1a08f87c59b4c55ad4672841977cfce43ca7730bcd11d8c178a9330de1855",
- "968972839b859a9c4457f190fad2e17e8585ce27d9ef318df4f5b4e902143944",
- "4dbdeadc957c898c327197a3d8770188535672e9208beb29bbf48dfdf51c8955",
- "669172c2b5e8b97774d9dd0227ede40c4d25cae3adae97d9f281d03531e7e137",
- "39fff130b9ee240102c28a78ee1c4a643e9f800b734ff133f3ab2ad1357bd2f6",
- "6e047ed8cb7007034ff15840dd53c92096f0e7ed5befa07808de8afa35d35874",
- "adbd0baa059074501b7686db2b0c01715f3a317275c2657c5dfbfd6ee92389b7",
- "eb63790fb32b5660de34fa42c2e608df58f7aa3680b4984f0ee9008fe613729c",
- "f125c20a33b0ff2dbd4e8ad9acebc34383cb2ef98668169ef79a8c06655ced35",
- "e64e0ac83a785ef584a0e86b347fae8f9e2bd84324a49396ca8a9fe7532a947b",
- "70001b3ac1b66522142bb86e4c3e87e20c2bbd07276c763878e0838ef6184aad",
- "f46fd1e2b5fef3b9f7ae80d183cc77f7be181117a72a0bb933bdef0bc6cd679e",
- "83676d73726d101325a47c7f8a60cedf10bab99ea79a6bedad7761220cb4a625",
- "a621a907586e5e270e7c7873b167364d8a935ff347d8240fa9bab319678da690",
- "f0af1a089f40d8611db5c59469314f1547e2df23c6eff24860359b37ea9bd966",
- "72478320b8dbfd9aeaea010dcf0896e3116fa5ab940f3b472882d9f9d2d7333f",
- "9c1a88e36334a48d8482fec54b14ea1d5fd31f0dbb65d13cc616e63dc7c42be5",
- "d0689f727e8ac4fef3ec4b1f29e8a3bd12e1116559eeefb2a1a457cd4e676d1e",
- "fea158a4afcfaa6e95e04799bae0287de0c4fcb188f3b41768a46ce48c71c9df",
- "2e5bc4e73312b5aec4c1a55631cb4ed69cf34ccaa6d1f28f7045f137a579b439",
- "015fdecbc3b5369dbcb2302e4b79985437ac4496d1b9ad63316423a222fb0803"
- ],
- "tasks": [
- "T5EncoderModel",
- "T5ForConditionalGeneration",
- "T5Model",
- "T5PreTrainedModel",
- "T5ForQuestionAnswering",
- "T5ForSequenceClassification",
- "T5ForTokenClassification"
- ]
- }
- },
- "info.vit.mgp-str": {
- "*": {
- "repo": "alibaba-damo/mgp-str-base",
- "pkg": {
- "0": {
- "transformers": "MgpstrForSceneTextRecognition"
- }
- },
- "tasks": [
- "MgpstrModel",
- "MgpstrPreTrainedModel",
- "MgpstrForSceneTextRecognition"
- ]
- }
- },
- "info.art.visualbert-vqa-coco-pre": {
- "*": {
- "repo": "uclanlp/visualbert-vqa-coco-pre",
- "pkg": {
- "0": {
- "transformers": "VisualBertModel"
- }
- },
- "tasks": [
- "VisualBertForMultipleChoice",
- "VisualBertForPreTraining",
- "VisualBertForQuestionAnswering",
- "VisualBertForRegionToPhraseAlignment",
- "VisualBertForVisualReasoning",
- "VisualBertLayer",
- "VisualBertModel",
- "VisualBertPreTrainedModel"
- ]
- }
- },
- "info.moe.llama-4-scout-16e": {
- "*": {
- "repo": "meta-llama/Llama-4-Scout-17B-16E",
- "pkg": {
- "0": {
- "transformers": "Llama4TextModel"
- }
- },
- "tasks": [
- "Llama4PreTrainedModel",
- "Llama4TextModel",
- "Llama4VisionModel",
- "Llama4ForCausalLM",
- "Llama4ForConditionalGeneration"
- ]
- }
- },
- "info.stst.opus-mt-en-de": {
- "*": {
- "repo": "Helsinki-NLP/opus-mt-en-de",
- "pkg": {
- "0": {
- "transformers": "MarianModel"
- }
- },
- "tasks": [
- "MarianForCausalLM",
- "MarianModel",
- "MarianMTModel",
- "MarianPreTrainedModel"
- ]
- }
- },
- "info.art.glm-4-0414": {
- "*": {
- "repo": "zai-org/GLM-4-9B-0414",
- "pkg": {
- "0": {
- "transformers": "Glm4Model"
- }
- },
- "tasks": [
- "Glm4PreTrainedModel",
- "Glm4Model",
- "Glm4ForCausalLM",
- "Glm4ForSequenceClassification",
- "Glm4ForTokenClassification"
- ]
- }
- },
- "info.art.mega-wikitext": {
- "*": {
- "repo": "mnaylor/mega-base-wikitext",
- "pkg": {
- "0": {
- "transformers": "MegaModel"
- }
- },
- "tasks": [
- "MegaForCausalLM",
- "MegaForMaskedLM",
- "MegaForMultipleChoice",
- "MegaForQuestionAnswering",
- "MegaForSequenceClassification",
- "MegaForTokenClassification",
- "MegaModel",
- "MegaPreTrainedModel"
- ]
- }
- },
- "info.vit.janus": {
- "*": {
- "repo": "deepseek-community/Janus-Pro-1B",
- "pkg": {
- "0": {
- "transformers": "JanusModel"
- }
- },
- "tasks": [
- "JanusPreTrainedModel",
- "JanusForConditionalGeneration",
- "JanusModel",
- "JanusVQVAE",
- "JanusVisionModel"
- ]
- }
- },
- "info.art.roformer-chinese": {
- "*": {
- "repo": "junnyu/roformer_chinese_base",
- "pkg": {
- "0": {
- "transformers": "RoFormerModel"
- }
- },
- "tasks": [
- "RoFormerForCausalLM",
- "RoFormerForMaskedLM",
- "RoFormerForMultipleChoice",
- "RoFormerForQuestionAnswering",
- "RoFormerForSequenceClassification",
- "RoFormerForTokenClassification",
- "RoFormerLayer",
- "RoFormerModel",
- "RoFormerPreTrainedModel"
- ]
- }
- },
- "info.moe.qwen15-moe-a": {
- "*": {
- "repo": "Qwen/Qwen1.5-MoE-A2.7B",
- "pkg": {
- "0": {
- "transformers": "Qwen2MoeModel"
- }
- },
- "tasks": [
- "Qwen2MoeForCausalLM",
- "Qwen2MoeForQuestionAnswering",
- "Qwen2MoeModel",
- "Qwen2MoePreTrainedModel",
- "Qwen2MoeForSequenceClassification",
- "Qwen2MoeForTokenClassification"
- ]
- }
- },
- "info.vit.gemma-3": {
- "*": {
- "repo": "google/gemma-3-4b-it",
- "pkg": {
- "0": {
- "transformers": "ShieldGemma2ForImageClassification"
- }
- },
- "tasks": [
- "ShieldGemma2ForImageClassification"
- ]
- }
- },
- "info.art.qwen3": {
- "*": {
- "repo": "Qwen/Qwen3-8B",
- "pkg": {
- "0": {
- "transformers": "Qwen3Model"
- }
- },
- "tasks": [
- "Qwen3ForCausalLM",
- "Qwen3ForQuestionAnswering",
- "Qwen3PreTrainedModel",
- "Qwen3Model",
- "Qwen3ForSequenceClassification",
- "Qwen3ForTokenClassification"
- ]
- }
- },
- "info.stst.qwen2-audio": {
- "*": {
- "repo": "Qwen/Qwen2-Audio-7B",
- "pkg": {
- "0": {
- "transformers": "Qwen2AudioForConditionalGeneration"
- }
- },
- "tasks": [
- "Qwen2AudioForConditionalGeneration",
- "Qwen2AudioPreTrainedModel",
- "Qwen2AudioEncoder"
- ]
- }
- },
- "info.vit.siglip2-patch16-224": {
- "*": {
- "repo": "google/siglip2-base-patch16-224",
- "pkg": {
- "0": {
- "transformers": "Siglip2Model"
- }
- },
- "tasks": [
- "Siglip2Model",
- "Siglip2PreTrainedModel",
- "Siglip2TextModel",
- "Siglip2VisionModel",
- "Siglip2ForImageClassification"
- ]
- }
- },
- "info.vit.mlcd-vit-bigg-patch14-336": {
- "*": {
- "repo": "DeepGlint-AI/mlcd-vit-bigG-patch14-336",
- "pkg": {
- "0": {
- "transformers": "MLCDVisionModel"
- }
- },
- "tasks": [
- "MLCDPreTrainedModel",
- "MLCDVisionModel"
- ]
- }
- },
- "info.aet.mra-512-4": {
- "*": {
- "repo": "uw-madison/mra-base-512-4",
- "pkg": {
- "0": {
- "transformers": "MraModel"
- }
- },
- "tasks": [
- "MraForMaskedLM",
- "MraForMultipleChoice",
- "MraForQuestionAnswering",
- "MraForSequenceClassification",
- "MraForTokenClassification",
- "MraLayer",
- "MraModel",
- "MraPreTrainedModel"
- ]
- }
- },
- "info.ssm.powermoe": {
- "*": {
- "repo": "ibm-research/PowerMoE-3b",
- "pkg": {
- "0": {
- "transformers": "GraniteMoeHybridModel"
- }
- },
- "tasks": [
- "GraniteMoeHybridForCausalLM",
- "GraniteMoeHybridModel",
- "GraniteMoeHybridPreTrainedModel"
- ]
- }
- },
- "info.vit.swinv2-patch4-window8-256": {
- "*": {
- "repo": "microsoft/swinv2-tiny-patch4-window8-256",
- "pkg": {
- "0": {
- "transformers": "Swinv2Model"
- }
- },
- "tasks": [
- "Swinv2ForImageClassification",
- "Swinv2ForMaskedImageModeling",
- "Swinv2Model",
- "Swinv2PreTrainedModel",
- "Swinv2Backbone"
- ]
- }
- },
- "info.art.tapas-finetuned-sqa": {
- "*": {
- "repo": "google/tapas-base-finetuned-sqa",
- "pkg": {
- "0": {
- "transformers": "TapasModel"
- }
- },
- "tasks": [
- "TapasForMaskedLM",
- "TapasForQuestionAnswering",
- "TapasForSequenceClassification",
- "TapasModel",
- "TapasPreTrainedModel"
- ]
- }
- },
- "info.vit.vitdet-patch16-224": {
- "*": {
- "repo": "google/vitdet-base-patch16-224",
- "pkg": {
- "0": {
- "transformers": "VitDetModel"
- }
- },
- "tasks": [
- "VitDetModel",
- "VitDetPreTrainedModel",
- "VitDetBackbone"
- ]
- }
- },
- "info.art.gpt-neox": {
- "*": {
- "repo": "EleutherAI/gpt-neox-20b",
- "pkg": {
- "0": {
- "transformers": "GPTNeoXModel"
- }
- },
- "tasks": [
- "GPTNeoXForCausalLM",
- "GPTNeoXForQuestionAnswering",
- "GPTNeoXForSequenceClassification",
- "GPTNeoXForTokenClassification",
- "GPTNeoXLayer",
- "GPTNeoXModel",
- "GPTNeoXPreTrainedModel"
- ]
- }
- },
- "info.ssm.zamba-v1": {
- "*": {
- "repo": "Zyphra/Zamba-7B-v1",
- "pkg": {
- "0": {
- "transformers": "ZambaModel"
- }
- },
- "tasks": [
- "ZambaForCausalLM",
- "ZambaForSequenceClassification",
- "ZambaModel",
- "ZambaPreTrainedModel"
- ]
- }
- },
- "info.aet.markuplm": {
- "*": {
- "repo": "microsoft/markuplm-base",
- "pkg": {
- "0": {
- "transformers": "MarkupLMModel"
- }
- },
- "tasks": [
- "MarkupLMForQuestionAnswering",
- "MarkupLMForSequenceClassification",
- "MarkupLMForTokenClassification",
- "MarkupLMModel",
- "MarkupLMPreTrainedModel"
- ]
- }
- },
- "info.art.bark": {
- "*": {
- "repo": "suno/bark",
- "pkg": {
- "0": {
- "transformers": "BarkModel"
- }
- },
- "tasks": [
- "BarkFineModel",
- "BarkSemanticModel",
- "BarkCoarseModel",
- "BarkModel",
- "BarkPreTrainedModel",
- "BarkCausalModel"
- ]
- }
- },
- "info.aet.roberta": {
- "*": {
- "repo": "FacebookAI/roberta-base",
- "pkg": {
- "0": {
- "transformers": "RobertaModel"
- }
- },
- "tasks": [
- "RobertaForCausalLM",
- "RobertaForMaskedLM",
- "RobertaForMultipleChoice",
- "RobertaForQuestionAnswering",
- "RobertaForSequenceClassification",
- "RobertaForTokenClassification",
- "RobertaModel",
- "RobertaPreTrainedModel"
- ]
- }
- },
- "info.aet.sew-d": {
- "*": {
- "repo": "asapp/sew-d-tiny-100k",
- "pkg": {
- "0": {
- "transformers": "SEWDModel"
- }
- },
- "tasks": [
- "SEWDForCTC",
- "SEWDForSequenceClassification",
- "SEWDModel",
- "SEWDPreTrainedModel"
- ]
- }
- },
- "info.vit.dinov2-patch16-224": {
- "*": {
- "repo": "google/dinov2-base-patch16-224",
- "pkg": {
- "0": {
- "transformers": "Dinov2Model"
- }
- },
- "tasks": [
- "Dinov2ForImageClassification",
- "Dinov2Model",
- "Dinov2PreTrainedModel",
- "Dinov2Backbone"
- ]
- }
- },
- "info.aet.electra-discriminator": {
- "*": {
- "repo": "google/electra-small-discriminator",
- "pkg": {
- "0": {
- "transformers": "ElectraModel"
- }
- },
- "tasks": [
- "ElectraForCausalLM",
- "ElectraForMaskedLM",
- "ElectraForMultipleChoice",
- "ElectraForPreTraining",
- "ElectraForQuestionAnswering",
- "ElectraForSequenceClassification",
- "ElectraForTokenClassification",
- "ElectraModel",
- "ElectraPreTrainedModel"
- ]
- }
- },
- "info.vit.language-perceiver": {
- "*": {
- "repo": "deepmind/language-perceiver",
- "pkg": {
- "0": {
- "transformers": "PerceiverModel"
- }
- },
- "tasks": [
- "PerceiverForImageClassificationConvProcessing",
- "PerceiverForImageClassificationFourier",
- "PerceiverForImageClassificationLearned",
- "PerceiverForMaskedLM",
- "PerceiverForMultimodalAutoencoding",
- "PerceiverForOpticalFlow",
- "PerceiverForSequenceClassification",
- "PerceiverLayer",
- "PerceiverModel",
- "PerceiverPreTrainedModel"
- ]
- }
- },
- "info.vit.glpn-kitti": {
- "*": {
- "repo": "vinvino02/glpn-kitti",
- "pkg": {
- "0": {
- "transformers": "GLPNModel"
- }
- },
- "tasks": [
- "GLPNForDepthEstimation",
- "GLPNLayer",
- "GLPNModel",
- "GLPNPreTrainedModel"
- ]
- }
- },
- "info.vit.segformer-b0-finetuned-ade-512-512": {
- "*": {
- "repo": "nvidia/segformer-b0-finetuned-ade-512-512",
- "pkg": {
- "0": {
- "transformers": "SegformerModel"
- }
- },
- "tasks": [
- "SegformerDecodeHead",
- "SegformerForImageClassification",
- "SegformerForSemanticSegmentation",
- "SegformerLayer",
- "SegformerModel",
- "SegformerPreTrainedModel"
- ]
- }
- },
- "info.detr.mm-grounding-dino-o365v1-goldg-v3det": {
- "*": {
- "repo": "openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det",
- "pkg": {
- "0": {
- "transformers": "MMGroundingDinoModel"
- }
- },
- "tasks": [
- "MMGroundingDinoForObjectDetection",
- "MMGroundingDinoModel",
- "MMGroundingDinoPreTrainedModel"
- ]
- }
- },
- "info.vit.llava-next-video-hf": {
- "*": {
- "repo": "llava-hf/LLaVA-NeXT-Video-7B-hf",
- "pkg": {
- "0": {
- "transformers": "LlavaNextVideoModel"
- }
- },
- "tasks": [
- "LlavaNextVideoForConditionalGeneration",
- "LlavaNextVideoModel",
- "LlavaNextVideoPreTrainedModel"
- ]
- }
- },
- "info.gan.encodec": {
- "*": {
- "repo": "facebook/encodec_24khz",
- "pkg": {
- "0": {
- "transformers": "EncodecModel"
- }
- },
- "tasks": [
- "EncodecModel",
- "EncodecPreTrainedModel"
- ]
- }
- },
- "info.aet.camembert": {
- "*": {
- "repo": "almanach/camembert-base",
- "pkg": {
- "0": {
- "transformers": "CamembertModel"
- }
- },
- "tasks": [
- "CamembertForCausalLM",
- "CamembertForMaskedLM",
- "CamembertForMultipleChoice",
- "CamembertForQuestionAnswering",
- "CamembertForSequenceClassification",
- "CamembertForTokenClassification",
- "CamembertModel",
- "CamembertPreTrainedModel"
- ]
- }
- },
- "info.art.exaone-4": {
- "*": {
- "repo": "LGAI-EXAONE/EXAONE-4.0-32B",
- "pkg": {
- "0": {
- "transformers": "Exaone4Model"
- }
- },
- "tasks": [
- "Exaone4PreTrainedModel",
- "Exaone4Model",
- "Exaone4ForCausalLM",
- "Exaone4ForSequenceClassification",
- "Exaone4ForTokenClassification",
- "Exaone4ForQuestionAnswering"
- ]
- }
- },
- "info.art.bloom": {
- "*": {
- "repo": "bigscience/bloom",
- "pkg": {
- "0": {
- "transformers": "BloomModel"
- }
- },
- "tasks": [
- "BloomForCausalLM",
- "BloomModel",
- "BloomPreTrainedModel",
- "BloomForSequenceClassification",
- "BloomForTokenClassification",
- "BloomForQuestionAnswering"
- ]
- }
- },
- "info.ssm.bamba-t-hf": {
- "*": {
- "repo": "ibm-fms/Bamba-9.8b-2.2T-hf",
- "pkg": {
- "0": {
- "transformers": "BambaModel"
- }
- },
- "tasks": [
- "BambaModel",
- "BambaForCausalLM",
- "BambaPreTrainedModel"
- ]
- }
- },
- "info.vit.seggpt-vit": {
- "*": {
- "repo": "BAAI/seggpt-vit-large",
- "pkg": {
- "0": {
- "transformers": "SegGptModel"
- }
- },
- "tasks": [
- "SegGptModel",
- "SegGptPreTrainedModel",
- "SegGptForImageSegmentation"
- ]
- }
- },
- "info.art.bros-uncased": {
- "*": {
- "repo": "jinho8345/bros-base-uncased",
- "pkg": {
- "0": {
- "transformers": "BrosModel"
- }
- },
- "tasks": [
- "BrosPreTrainedModel",
- "BrosModel",
- "BrosForTokenClassification",
- "BrosSpadeEEForTokenClassification",
- "BrosSpadeELForTokenClassification"
- ]
- }
- },
- "info.vit.mistral-3-2503": {
- "*": {
- "repo": "mistralai/Mistral-Small-3.1-24B-Instruct-2503",
- "pkg": {
- "0": {
- "transformers": "Mistral3Model"
- }
- },
- "tasks": [
- "Mistral3Model",
- "Mistral3PreTrainedModel",
- "Mistral3ForConditionalGeneration"
- ]
- }
- },
- "info.vit.phi-4": {
- "*": {
- "repo": "microsoft/Phi-4-multimodal-instruct",
- "pkg": {
- "0": {
- "transformers": "Phi4MultimodalModel"
- }
- },
- "file_256": [
- "bc703090b63eda16f639fa4de7ac54635c23105ab1da2f6ec4d3403151d38ee6"
- ],
- "layer_b3": [
- "cf4add4ada6082f448788eaf2937f645b5212db88e06ee81475b8be0e99063dc"
- ],
- "layer_256": [
- "7ff992b780b2f8993dd6bb9612207943638b2a42badc976ce80893bc205e801b"
- ],
- "tasks": [
- "Phi4MultimodalAudioPreTrainedModel",
- "Phi4MultimodalAudioModel",
- "Phi4MultimodalVisionPreTrainedModel",
- "Phi4MultimodalVisionModel",
- "Phi4MultimodalPreTrainedModel",
- "Phi4MultimodalModel",
- "Phi4MultimodalForCausalLM"
- ]
- }
- },
- "info.aet.megatron-bert-uncased": {
- "*": {
- "repo": "nvidia/megatron-bert-uncased-345m",
- "pkg": {
- "0": {
- "transformers": "MegatronBertModel"
- }
- },
- "tasks": [
- "MegatronBertForCausalLM",
- "MegatronBertForMaskedLM",
- "MegatronBertForMultipleChoice",
- "MegatronBertForNextSentencePrediction",
- "MegatronBertForPreTraining",
- "MegatronBertForQuestionAnswering",
- "MegatronBertForSequenceClassification",
- "MegatronBertForTokenClassification",
- "MegatronBertModel",
- "MegatronBertPreTrainedModel"
- ]
- }
- },
- "info.gan.levit-128s": {
- "*": {
- "repo": "facebook/levit-128S",
- "pkg": {
- "0": {
- "transformers": "LevitModel"
- }
- },
- "tasks": [
- "LevitForImageClassification",
- "LevitForImageClassificationWithTeacher",
- "LevitModel",
- "LevitPreTrainedModel"
- ]
- }
- },
- "info.art.gpt-neo": {
- "*": {
- "repo": "EleutherAI/gpt-neo-1.3B",
- "pkg": {
- "0": {
- "transformers": "GPTNeoModel"
- }
- },
- "tasks": [
- "GPTNeoForCausalLM",
- "GPTNeoForQuestionAnswering",
- "GPTNeoForSequenceClassification",
- "GPTNeoForTokenClassification",
- "GPTNeoModel",
- "GPTNeoPreTrainedModel"
- ]
- }
- },
- "info.aet.bert-for-seq-generation-l-24-bbc-encoder": {
- "*": {
- "repo": "google/bert_for_seq_generation_L-24_bbc_encoder",
- "pkg": {
- "0": {
- "transformers": "BertGenerationEncoder"
- }
- },
- "tasks": [
- "BertGenerationDecoder",
- "BertGenerationEncoder",
- "BertGenerationPreTrainedModel"
- ]
- }
- },
- "info.aet.modernbert": {
- "*": {
- "repo": "answerdotai/ModernBERT-base",
- "pkg": {
- "0": {
- "transformers": "ModernBertModel"
- }
- },
- "tasks": [
- "ModernBertModel",
- "ModernBertPreTrainedModel",
- "ModernBertForMaskedLM",
- "ModernBertForSequenceClassification",
- "ModernBertForTokenClassification",
- "ModernBertForQuestionAnswering",
- "ModernBertForMultipleChoice"
- ]
- }
- },
- "info.moe.max-text-01-hf": {
- "*": {
- "repo": "MiniMaxAI/MiniMax-Text-01-hf",
- "pkg": {
- "0": {
- "transformers": "MiniMaxModel"
- }
- },
- "tasks": [
- "MiniMaxPreTrainedModel",
- "MiniMaxModel",
- "MiniMaxForCausalLM",
- "MiniMaxForSequenceClassification",
- "MiniMaxForTokenClassification",
- "MiniMaxForQuestionAnswering"
- ]
- }
- },
- "info.vit.bridgetower": {
- "*": {
- "repo": "BridgeTower/bridgetower-base",
- "pkg": {
- "0": {
- "transformers": "BridgeTowerModel"
- }
- },
- "tasks": [
- "BridgeTowerForContrastiveLearning",
- "BridgeTowerForImageAndTextRetrieval",
- "BridgeTowerForMaskedLM",
- "BridgeTowerModel",
- "BridgeTowerPreTrainedModel"
- ]
- }
- },
- "info.vit.deit-distilled-patch16-224": {
- "*": {
- "repo": "facebook/deit-base-distilled-patch16-224",
- "pkg": {
- "0": {
- "transformers": "DeiTModel"
- }
- },
- "tasks": [
- "DeiTForImageClassification",
- "DeiTForImageClassificationWithTeacher",
- "DeiTForMaskedImageModeling",
- "DeiTModel",
- "DeiTPreTrainedModel"
- ]
- }
- },
- "info.art.phi-3": {
- "*": {
- "repo": "microsoft/Phi-3-mini-4k-instruct",
- "pkg": {
- "0": {
- "transformers": "Phi3Model"
- }
- },
- "tasks": [
- "Phi3PreTrainedModel",
- "Phi3Model",
- "Phi3ForCausalLM",
- "Phi3ForSequenceClassification",
- "Phi3ForTokenClassification"
- ]
- }
- },
- "info.vit.colqwen2-v1-hf": {
- "*": {
- "repo": "vidore/colqwen2-v1.0-hf",
- "pkg": {
- "0": {
- "transformers": "ColQwen2ForRetrieval"
- }
- },
- "tasks": [
- "ColQwen2ForRetrieval",
- "ColQwen2PreTrainedModel"
- ]
- }
- },
- "info.aet.xlm-roberta": {
- "*": {
- "repo": "FacebookAI/xlm-roberta-base",
- "pkg": {
- "0": {
- "transformers": "XLMRobertaModel"
- }
- },
- "tasks": [
- "XLMRobertaForCausalLM",
- "XLMRobertaForMaskedLM",
- "XLMRobertaForMultipleChoice",
- "XLMRobertaForQuestionAnswering",
- "XLMRobertaForSequenceClassification",
- "XLMRobertaForTokenClassification",
- "XLMRobertaModel",
- "XLMRobertaPreTrainedModel"
- ]
- }
- },
- "ops.precision.uint": {
- "U8": {
- "pkg": {
- "0": {
- "torch": {
- "uint8": {
- "variant": "uint8"
- }
- }
- }
- }
- },
- "U16": {
- "pkg": {
- "0": {
- "torch": {
- "uint16": {
- "variant": "uint16"
- }
- }
- }
- }
- },
- "U32": {
- "pkg": {
- "0": {
- "torch": {
- "uint32": {
- "variant": "uint32"
- }
- }
- }
- }
- },
- "U64": {
- "pkg": {
- "0": {
- "torch": {
- "uint64": {
- "variant": "uint64"
- }
- }
- }
- }
- },
- "U1": {
- "pkg": {
- "0": {
- "torch": {
- "uint1": {
- "variant": "uint1"
- }
- }
- }
- }
- },
- "U2": {
- "pkg": {
- "0": {
- "torch": {
- "uint2": {
- "variant": "uint2"
- }
- }
- }
- }
- },
- "U3": {
- "pkg": {
- "0": {
- "torch": {
- "uint3": {
- "variant": "uint3"
- }
- }
- }
- }
- },
- "U4": {
- "pkg": {
- "0": {
- "torch": {
- "uint4": {
- "variant": "uint4"
- }
- }
- }
- }
- },
- "U5": {
- "pkg": {
- "0": {
- "torch": {
- "uint5": {
- "variant": "uint5"
- }
- }
- }
- }
- },
- "U6": {
- "pkg": {
- "0": {
- "torch": {
- "uint6": {
- "variant": "uint6"
- }
- }
- }
- }
- },
- "U7": {
- "pkg": {
- "0": {
- "torch": {
- "uint7": {
- "variant": "uint7"
- }
- }
- }
- }
- }
- },
- "ops.precision.int": {
- "I8": {
- "pkg": {
- "0": {
- "torch": {
- "int8": {
- "variant": "int8"
- }
- }
- }
- }
- },
- "I16": {
- "pkg": {
- "0": {
- "torch": {
- "int16": {
- "variant": "int16"
- }
- }
- }
- }
- },
- "I32": {
- "pkg": {
- "0": {
- "torch": {
- "int32": {
- "variant": "int32"
- }
- }
- }
- }
- },
- "I64": {
- "pkg": {
- "0": {
- "torch": {
- "int64": {
- "variant": "int64"
- }
- }
- }
- }
- },
- "Q8": {
- "pkg": {
- "0": {
- "torch": {
- "qint8": {
- "variant": "qint8"
- }
- }
- }
- }
- },
- "Q32": {
- "pkg": {
- "0": {
- "torch": {
- "qint32": {
- "variant": "qint32"
- }
- }
- }
- }
- },
- "I1": {
- "pkg": {
- "0": {
- "torch": {
- "int1": {
- "variant": "int1"
- }
- }
- }
- }
- },
- "I2": {
- "pkg": {
- "0": {
- "torch": {
- "int2": {
- "variant": "int2"
- }
- }
- }
- }
- },
- "I3": {
- "pkg": {
- "0": {
- "torch": {
- "int3": {
- "variant": "int3"
- }
- }
- }
- }
- },
- "I4": {
- "pkg": {
- "0": {
- "torch": {
- "int4": {
- "variant": "int4"
- }
- }
- }
- }
- },
- "I5": {
- "pkg": {
- "0": {
- "torch": {
- "int5": {
- "variant": "int5"
- }
- }
- }
- }
- },
- "I6": {
- "pkg": {
- "0": {
- "torch": {
- "int6": {
- "variant": "int6"
- }
- }
- }
- }
- },
- "I7": {
- "pkg": {
- "0": {
- "torch": {
- "int7": {
- "variant": "int7"
- }
- }
- }
- }
- }
- },
- "ops.precision.float": {
- "F16": {
- "pkg": {
- "0": {
- "torch": {
- "float16": {
- "variant": "fp16"
- }
- }
- }
- }
- },
- "F32": {
- "pkg": {
- "0": {
- "torch": {
- "float32": {
- "variant": "fp32"
- }
- }
- }
- }
- },
- "F64": {
- "pkg": {
- "0": {
- "torch": {
- "float64": {
- "variant": "fp64"
- }
- }
- }
- }
- },
- "F8_E5M2": {
- "pkg": {
- "0": {
- "torch": {
- "float8_e5m2": {
- "variant": "fp8_e5m2"
- }
- }
- }
- }
- },
- "F8_E4M3": {
- "pkg": {
- "0": {
- "torch": {
- "float8_e4m3fn": {
- "variant": "fp8_e4m3fn"
- }
- }
- }
- }
- },
- "F8_E5M2FNUZ": {
- "pkg": {
- "0": {
- "torch": {
- "float8_e5m2fnuz": {
- "variant": "fp8_e5m2fnuz"
- }
- }
- }
- }
- },
- "F8_E4M3FNUZ": {
- "pkg": {
- "0": {
- "torch": {
- "float8_e4m3fnuz": {
- "variant": "fp8_e4m3fnuz"
- }
- }
- }
- }
- },
- "F8_E8M0FNU": {
- "pkg": {
- "0": {
- "torch": {
- "float8_e8m0fnu": {
- "variant": "fp8_e8m0fnu"
- }
- }
- }
- }
- },
- "F8_E2M1": {
- "pkg": {
- "0": {
- "torch": {
- "float4_e2m1fn_x2": {
- "variant": "fp4_e2m1fn_x2"
- }
- }
- }
- }
- }
- },
- "ops.precision.complex": {
- "C32": {
- "pkg": {
- "0": {
- "torch": {
- "complex32": {
- "variant": "complex32"
- }
- }
- }
- }
- },
- "C64": {
- "pkg": {
- "0": {
- "torch": {
- "complex64": {
- "variant": "complex64"
- }
- }
- }
- }
- },
- "C128": {
- "pkg": {
- "0": {
- "torch": {
- "complex128": {
- "variant": "complex128"
- }
- }
- }
- }
- }
- },
- "ops.precision.bool": {
- "Bbool": {
- "pkg": {
- "0": {
- "torch": {
- "bool": {
- "variant": "bool"
- }
- }
- }
- }
- }
- },
- "ops.precision.quint": {
- "Q8": {
- "pkg": {
- "0": {
- "torch": {
- "quint8": {
- "variant": "quint8"
- }
- }
- }
- }
- },
- "Q4x2": {
- "pkg": {
- "0": {
- "torch": {
- "quint4x2": {
- "variant": "quint4x2"
- }
- }
- }
- }
- },
- "Q2x4": {
- "pkg": {
- "0": {
- "torch": {
- "quint2x4": {
- "variant": "quint2x4"
- }
- }
- }
- }
- }
- },
- "ops.precision.bfloat": {
- "B16": {
- "pkg": {
- "0": {
- "torch": {
- "bfloat16": {
- "variant": "bf16"
- }
- }
- }
- }
- }
- },
- "ops.precision.bits": {
- "B1x8": {
- "pkg": {
- "0": {
- "torch": {
- "bits1x8": {
- "variant": "bits1x8"
- }
- }
- }
- }
- },
- "B2x4": {
- "pkg": {
- "0": {
- "torch": {
- "bits2x4": {
- "variant": "bits2x4"
- }
- }
- }
- }
- },
- "B4x2": {
- "pkg": {
- "0": {
- "torch": {
- "bits4x2": {
- "variant": "bits4x2"
- }
- }
- }
- }
- },
- "B8": {
- "pkg": {
- "0": {
- "torch": {
- "bits8": {
- "variant": "bits8"
- }
- }
- }
- }
- },
- "B16": {
- "pkg": {
- "0": {
- "torch": {
- "bits16": {
- "variant": "bits16"
- }
- }
- }
- }
- }
- },
- "ops.scheduler.amused": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "AmusedScheduler",
- "module_path": "diffusers.schedulers.scheduling_amused"
- }
- }
- }
- },
- "ops.scheduler.cmstochasticiterative": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "CMStochasticIterativeScheduler",
- "module_path": "diffusers.schedulers.scheduling_consistency_models"
- }
- }
- }
- },
- "ops.scheduler.cogvideoxddim": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "CogVideoXDDIMScheduler",
- "module_path": "diffusers.schedulers.scheduling_ddim_cogvideox"
- }
- }
- }
- },
- "ops.scheduler.cogvideoxdpm": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "CogVideoXDPMScheduler",
- "module_path": "diffusers.schedulers.scheduling_dpm_cogvideox"
- }
- }
- }
- },
- "ops.scheduler.ddiminverse": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "DDIMInverseScheduler",
- "module_path": "diffusers.schedulers.scheduling_ddim_inverse"
- }
- }
- }
- },
- "ops.scheduler.ddimparallel": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "DDIMParallelScheduler",
- "module_path": "diffusers.schedulers.scheduling_ddim_parallel"
- }
- }
- }
- },
- "ops.scheduler.ddim": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "DDIMScheduler",
- "module_path": "diffusers.schedulers.scheduling_ddim"
- }
- }
- }
- },
- "ops.scheduler.ddpmparallel": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "DDPMParallelScheduler",
- "module_path": "diffusers.schedulers.scheduling_ddpm_parallel"
- }
- }
- }
- },
- "ops.scheduler.ddpm": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "DDPMScheduler",
- "module_path": "diffusers.schedulers.scheduling_ddpm"
- }
- }
- }
- },
- "ops.scheduler.ddpmwuerstchen": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "DDPMWuerstchenScheduler",
- "module_path": "diffusers.schedulers.scheduling_ddpm_wuerstchen"
- }
- }
- }
- },
- "ops.scheduler.deis": {
- "multistep": {
- "pkg": {
- "0": {
- "diffusers": "DEISMultistepScheduler",
- "module_path": "diffusers.schedulers.scheduling_deis_multistep"
- }
- }
- }
- },
- "ops.scheduler.dpminverse": {
- "multistep": {
- "pkg": {
- "0": {
- "diffusers": "DPMSolverMultistepInverseScheduler",
- "module_path": "diffusers.schedulers.scheduling_dpmsolver_multistep_inverse"
- }
- }
- }
- },
- "ops.scheduler.dpm": {
- "multistep": {
- "pkg": {
- "0": {
- "diffusers": "DPMSolverMultistepScheduler",
- "module_path": "diffusers.schedulers.scheduling_dpmsolver_multistep"
- }
- }
- }
- },
- "ops.scheduler.dpmsinglestep": {
- "solver": {
- "pkg": {
- "0": {
- "diffusers": "DPMSolverSinglestepScheduler",
- "module_path": "diffusers.schedulers.scheduling_dpmsolver_singlestep"
- }
- }
- }
- },
- "ops.scheduler.edmdpm": {
- "multistep": {
- "pkg": {
- "0": {
- "diffusers": "EDMDPMSolverMultistepScheduler",
- "module_path": "diffusers.schedulers.scheduling_edm_dpmsolver_multistep"
- }
- }
- }
- },
- "ops.scheduler.edmeuler": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "EDMEulerScheduler",
- "module_path": "diffusers.schedulers.scheduling_edm_euler"
- }
- }
- }
- },
- "ops.scheduler.eulerancestral": {
- "discrete": {
- "pkg": {
- "0": {
- "diffusers": "EulerAncestralDiscreteScheduler",
- "module_path": "diffusers.schedulers.scheduling_euler_ancestral_discrete"
- }
- }
- }
- },
- "ops.scheduler.euler": {
- "discrete": {
- "pkg": {
- "0": {
- "diffusers": "EulerDiscreteScheduler",
- "module_path": "diffusers.schedulers.scheduling_euler_discrete"
- }
- }
- }
- },
- "ops.scheduler.flowmatcheuler": {
- "discrete": {
- "pkg": {
- "0": {
- "diffusers": "FlowMatchEulerDiscreteScheduler",
- "module_path": "diffusers.schedulers.scheduling_flow_match_euler_discrete"
- }
- }
- }
- },
- "ops.scheduler.flowmatchheun": {
- "discrete": {
- "pkg": {
- "0": {
- "diffusers": "FlowMatchHeunDiscreteScheduler",
- "module_path": "diffusers.schedulers.scheduling_flow_match_heun_discrete"
- }
- }
- }
- },
- "ops.scheduler.flowmatchlcm": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "FlowMatchLCMScheduler",
- "module_path": "diffusers.schedulers.scheduling_flow_match_lcm"
- }
- }
- }
- },
- "ops.scheduler.heun": {
- "discrete": {
- "pkg": {
- "0": {
- "diffusers": "HeunDiscreteScheduler",
- "module_path": "diffusers.schedulers.scheduling_heun_discrete"
- }
- }
- }
- },
- "ops.scheduler.ipndm": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "IPNDMScheduler",
- "module_path": "diffusers.schedulers.scheduling_ipndm"
- }
- }
- }
- },
- "ops.scheduler.karrasve": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "KarrasVeScheduler",
- "module_path": "diffusers.schedulers.deprecated.scheduling_karras_ve"
- }
- }
- }
- },
- "ops.scheduler.kdpm2ancestral": {
- "discrete": {
- "pkg": {
- "0": {
- "diffusers": "KDPM2AncestralDiscreteScheduler",
- "module_path": "diffusers.schedulers.scheduling_k_dpm_2_ancestral_discrete"
- }
- }
- }
- },
- "ops.scheduler.kdpm2": {
- "discrete": {
- "pkg": {
- "0": {
- "diffusers": "KDPM2DiscreteScheduler",
- "module_path": "diffusers.schedulers.scheduling_k_dpm_2_discrete"
- }
- }
- }
- },
- "ops.scheduler.lcm": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "LCMScheduler",
- "module_path": "diffusers.schedulers.scheduling_lcm"
- }
- }
- }
- },
- "ops.scheduler.pndm": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "PNDMScheduler",
- "module_path": "diffusers.schedulers.scheduling_pndm"
- }
- }
- }
- },
- "ops.scheduler.repaint": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "RePaintScheduler",
- "module_path": "diffusers.schedulers.scheduling_repaint"
- }
- }
- }
- },
- "ops.scheduler.sa": {
- "solver": {
- "pkg": {
- "0": {
- "diffusers": "SASolverScheduler",
- "module_path": "diffusers.schedulers.scheduling_sasolver"
- }
- }
- }
- },
- "ops.scheduler.scm": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "SCMScheduler",
- "module_path": "diffusers.schedulers.scheduling_scm"
- }
- }
- }
- },
- "ops.scheduler.scoresdeve": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "ScoreSdeVeScheduler",
- "module_path": "diffusers.schedulers.scheduling_sde_ve"
- }
- }
- }
- },
- "ops.scheduler.tcd": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "TCDScheduler",
- "module_path": "diffusers.schedulers.scheduling_tcd"
- }
- }
- }
- },
- "ops.scheduler.unclip": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "UnCLIPScheduler",
- "module_path": "diffusers.schedulers.scheduling_unclip"
- }
- }
- }
- },
- "ops.scheduler.unipc": {
- "multistep": {
- "pkg": {
- "0": {
- "diffusers": "UniPCMultistepScheduler",
- "module_path": "diffusers.schedulers.scheduling_unipc_multistep"
- }
- }
- }
- },
- "ops.scheduler.vqdiffusion": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "VQDiffusionScheduler",
- "module_path": "diffusers.schedulers.scheduling_vq_diffusion"
- }
- }
- }
- },
- "ops.scheduler.lms": {
- "discrete": {
- "pkg": {
- "0": {
- "diffusers": "LMSDiscreteScheduler",
- "module_path": "diffusers.schedulers.scheduling_lms_discrete"
- }
- }
- }
- },
- "ops.scheduler.cosinedpm": {
- "multistep": {
- "pkg": {
- "0": {
- "diffusers": "CosineDPMSolverMultistepScheduler",
- "module_path": "diffusers.schedulers.scheduling_cosine_dpmsolver_multistep"
- }
- }
- }
- },
- "ops.scheduler.dpmsde": {
- "solver": {
- "pkg": {
- "0": {
- "diffusers": "DPMSolverSDEScheduler",
- "module_path": "diffusers.schedulers.scheduling_dpmsolver_sde"
- }
- }
- }
- },
- "ops.scheduler.karrasdiffusion": {
- "schedulers": {
- "pkg": {
- "0": {
- "diffusers": "KarrasDiffusionSchedulers",
- "module_path": "diffusers.schedulers.scheduling_utils"
- }
- }
- }
- },
- "info.lora.dmd": {
- "stable-diffusion-xl-1": {
- "repo": "tianweiy/DMD2",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "generation": {
- "num_inference_steps": 4,
- "guidance_scale": 0,
- "timesteps": [
- 999,
- 749,
- 499,
- 249
- ]
- },
- "scheduler": {
- "ops.scheduler.lcm": ""
- }
- }
- },
- "file_256": [
- "b3d9173815a4b595991c3a7a0e0e63ad821080f314a0b2a3cc31ecd7fcf2cbb8",
- "a374289e9446d7f14d2037c4b3770756b7b52c292142a691377c3c755010a1bb"
- ]
- }
- },
- "info.lora.dpo": {
- "stable-diffusion-xl-1": {
- "repo": "radames/sdxl-DPO-LoRA",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "generation": {
- "guidance_scale": 7.5,
- "num_inference_steps": 4
- },
- "scheduler": {
- "ops.scheduler.dpm": {
- "algorithm_type": "sde-dpmsolver++",
- "use_karras_sigmas": true,
- "order": 2
- }
- }
- }
- },
- "file_256": [
- "666f71a833fc41229ec7e8a264fb7b0fcb8bf47a80e366ae7486c18f38ec9fc0",
- "6b1dcbfb234d7b6000948b5b95ccebc8f903450ce2ba1b50bc3456987c9087ad"
- ]
- }
- },
- "info.lora.flash": {
- "stable-diffusion-xl-1": {
- "repo": "jasperai/flash-sdxl",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "scheduler": "ops.scheduler.lcm"
- }
- },
- "file_256": [
- "afe2ca6e27c4c6087f50ef42772c45d7b0efbc471b76e422492403f9cae724d7"
- ]
- },
- "pixart-alpha": {
- "repo": "jasperai/flash-pixart",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- }
- }
- },
- "file_256": [
- "99ef037fe3c1fb6d6bbefdbb85ad60df434fcc0577d34c768d752d60cf69681b"
- ]
- },
- "stable-diffusion-3": {
- "repo": "jasperai/flash-sd3",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- }
- }
- },
- "file_256": [
- "85fce13c36e3739aa42930f745eb9fceb6c53d53fb17e2a687e3234c1a58ee15"
- ]
- },
- "stable-diffusion-v1-5": {
- "repo": "jasperai/flash-sd",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "generation": {
- "num_inference_steps": 4,
- "guidance_scale": 0
- }
- }
- },
- "file_256": [
- "99353444c1a0f40719a1b3037049dbd24800317979a73c312025c05af3574a5f"
- ]
- }
- },
- "info.lora.hyper": {
- "stable-diffusion-xl-1": {
- "repo": "ByteDance/Hyper-SD",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {
- "fuse": 1.0
- }
- }
- }
- },
- "file_256": {
- "0b97f447b5878323a28fbe7c51ba7acebd21f4d77552ba77b04b11c8911825b6": {
- "num_inference_steps": 12
- },
- "55b51334c85061afff5eff7c550b61963c8b8607a5868bbe4f26db49374719b1": {
- "num_inference_steps": 8
- },
- "c912df184c5116792d2c604d26c6bc2aa916685f4a793755255cda1c43a3c78a": {
- "num_inference_steps": 1,
- "guidance_scale": 0.0
- },
- "69b25c0187ced301c3603c599c0bc509ac99b8ac34db89a2aecc3d5f77a35187": {
- "num_inference_steps": 2,
- "guidance_scale": 0.0
- },
- "12f81a27d00a751a40d68fd15597091896c5a90f3bd632fb6c475607cbdad76e": {
- "num_inference_steps": 4,
- "guidance_scale": 0.0
- },
- "ca689190e8c46038550384b5675488526cfe5a40d35f82b27acb75c100f417c1": {
- "num_inference_steps": 8,
- "guidance_scale": 0.0
- }
- }
- },
- "flux1-dev": {
- "repo": "ByteDance/Hyper-SD",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {
- "fuse": 0.125
- }
- }
- }
- },
- "file_256": {
- "6461f67dfc1a967ae60344c3b3f350877149ccab758c273cc37f5e8a87b5842e": {
- "num_inference_steps": 16,
- "guidance_scale": 0.0
- },
- "e0ab0fdf569cd01a382f19bd87681f628879dea7ad51fe5a3799b6c18c7b2d03": {
- "num_inference_steps": 8,
- "guidance_scale": 0.0
- }
- }
- },
- "stable-diffusion-3": {
- "repo": "ByteDance/Hyper-SD",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {
- "fuse": 0.125
- }
- }
- }
- },
- "file_256": {
- "5b4d0b99d58deb811bdbbe521a06f4dbf56a2e9148ff3211c594e0502b656bc9": {
- "num_inference_steps": 16
- },
- "0ee4e529abd17b06d4295e3bb91c0d4ddae393afad86b2b43c4f5eeb9e401602": {
- "num_inference_steps": 4
- },
- "fc6a3e73e14ed11e21e4820e960d7befcffe7e333850ada9545f239e9aa6027e": {
- "num_inference_steps": 8
- }
- }
- },
- "stable-diffusion-v1-5": {
- "repo": "ByteDance/Hyper-SD",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- }
- }
- },
- "file_256": {
- "64b98437383537cd968fda6f87a05c33160ece9c79ff4757949a1e212ff78361": {
- "num_inference_steps": 12
- },
- "f6123d5b950d5250ab6c33600e27f4dcf71b3099ebf888685e01e9e8117ce482": {
- "num_inference_steps": 8
- },
- "a04fd9a535c1e56d38f7590ee72a13fd5ca0409853b4fff021e5a9482cf1ca3b": {
- "num_inference_steps": 1,
- "guidance_scale": 0.0
- },
- "2f26dcc1d883feb07557a552315baae2ca2a04ac08556b08a355a244547e8c3a": {
- "num_inference_steps": 2,
- "guidance_scale": 0.0
- },
- "c5dd058616461ed5053e2b14eec4dbe3fa0eea3b13688642f6d6c80ea2ba5958": {
- "num_inference_steps": 4,
- "guidance_scale": 0.0
- },
- "91fc3186236e956d64dbb4357f2e120c69b968b78af7d2db9884a5ca74d3cd13": {
- "num_inference_steps": 8,
- "guidance_scale": 0.0
- }
- }
- }
- },
- "info.lora.lcm": {
- "stable-diffusion-xl-1": {
- "repo": "latent-consistency/lcm-lora-sdxl",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {
- "fuse": 1.0
- }
- },
- "scheduler": {
- "ops.scheduler.lcm": {
- "timestep_spacing": "trailing"
- }
- },
- "generation": {
- "num_inference_steps": 8
- }
- }
- },
- "file_256": [
- "a764e6859b6e04047cd761c08ff0cee96413a8e004c9f07707530cd776b19141"
- ]
- },
- "ssd": {
- "repo": "latent-consistency/lcm-lora-ssd-1b",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "generation": {
- "num_inference_steps": 8
- }
- }
- },
- "file_256": [
- "7adaaa69db6f011058a19fd1d5315fdf19ef79fcd513cdab30e173833fd5c59b"
- ]
- },
- "segmind-vega": {
- "repo": "segmind/Segmind-VegaRT",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "gen_kwargs": {
- "num_inference_steps": 8
- }
- }
- },
- "file_256": [
- "9b6e8cd833fa205eaeeed391ca623a6f2546e447470bd1c5dcce3fa8d2f26afb"
- ]
- },
- "stable-diffusion-v1-5": {
- "repo": "latent-consistency/lcm-lora-sdv1-5",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "generation": {
- "num_inference_steps": 8
- }
- }
- },
- "file_256": [
- "8f90d840e075ff588a58e22c6586e2ae9a6f7922996ee6649a7f01072333afe4"
- ]
- }
- },
- "info.lora.lightning": {
- "stable-diffusion-xl-1": {
- "repo": "ByteDance/SDXL-Lightning",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "generation": {
- "num_inference_steps": 4,
- "guidance_scale": 0
- }
- }
- }
- }
- },
- "info.lora.pcm": {
- "stable-diffusion-xl-1": {
- "repo": "wangfuyun/PCM_Weights",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- }
- }
- },
- "file_256": {
- "0365f6107250a4fed1b83e8ae6a070065e026a2ba54bff65f55a50284232bbe6": {
- "num_inference_steps": 4,
- "guidance_scale": 0.0
- },
- "04ea827435d5750e63d113dc509174b4f6e8a069ff8f91970c3d25299c10b1f8": {
- "num_inference_steps": 16
- },
- "7eb353b2abcaabab6251ba4e17d6cbe2e763feb0674b0f950555552212b44621": {
- "num_inference_steps": 16
- },
- "a85cf70ac16ed42011630a5cd6b5927722cb7c40a2107eff85e2670f9a38c893": {
- "num_inference_steps": 4
- },
- "9f7f13bb019925eacd89aeff678e4fd831f7b60245b986855dff6634aee4eba9": {
- "num_inference_steps": 4
- },
- "3b9c970a3e4c0e182931e71b3f769c1956f16c6b06db98b4d67236790d4d0b1d": {
- "num_inference_steps": 8
- },
- "7f04ba8911b4c25ef2c7cbf74abcb6daa3b4f0e4bc6a03896bdae7601f2f180b": {
- "num_inference_steps": 8
- },
- "13fb038025ce9dad93b8ee1b67fc81bac8affb59a77b67d408d286e0b0365a1d": {
- "num_inference_steps": 16,
- "guidance_scale": 0.0
- },
- "3442eff271aa3b60a094fd6f9169d03e49e4051044a974f6fcf690507959191f": {
- "num_inference_steps": 16,
- "guidance_scale": 0.0
- },
- "242cbe4695fe3f2e248faa71cf53f2ccbf248a316973e4b2f38ab9e34f35a5ab": {
- "num_inference_steps": 2,
- "guidance_scale": 0.0
- },
- "e1f600491bb8e0cd94f41144321e44fdb2cb346447f31e71f6e53f1c24cccfbf": {
- "num_inference_steps": 2,
- "guidance_scale": 0.0
- },
- "d0bf40a7f280829195563486bec7253f043a06b1f218602b20901c367641023e": {
- "num_inference_steps": 4,
- "guidance_scale": 0.0
- },
- "212150d7953627fb89df99aad579d6763645a1cb2ef26b19fee8b398d5e5ff4d": {
- "num_inference_steps": 4,
- "guidance_scale": 0.0
- },
- "e80fcf46d15f4d3821d3d9611bdb3022a4a8b647b2536833b168d317a91e4f74": {
- "num_inference_steps": 8,
- "guidance_scale": 0.0
- },
- "56ed9dc9f51f4bb0d6172e13b7947f215c347fc0da341c8951b2c12b9507d09e": {
- "num_inference_steps": 8,
- "guidance_scale": 0.0
- }
- }
- },
- "stable-diffusion-v1-5": {
- "repo": "wangfuyun/PCM_Weights",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- }
- }
- },
- "file_256": {
- "b80b27dd6504f1c3a7637237dda86bc7e26fa5766da30c4fc853c0a1d46bad31": {
- "num_inference_steps": 4,
- "guidance_scale": 0.0
- },
- "8f605ffde3616592deb37ed8c6bacb83fe98963c1fd0883c2a4f93787098aa45": {
- "num_inference_steps": 16
- },
- "fa6acb94f11dba3bf4120af5a12e3c88cd2b9572d43ec1a6fb04eede9f32829e": {
- "num_inference_steps": 4
- },
- "bff3d4499718b61455b0757b5f8d98fe23e73a768b538c82ecf91c693b69dbcd": {
- "num_inference_steps": 8
- },
- "c7ac2fa3df3a5b7080ebe63f259ab13630014f104c93c3c706d77b05cc48506b": {
- "num_inference_steps": 16,
- "guidance_scale": 0.0
- },
- "4c5f27a727d12146de4b1d987cee3343bca89b085d12b03c45297af05ce88ef4": {
- "num_inference_steps": 2,
- "guidance_scale": 0.0
- },
- "29278bc86274fdfc840961e3c250758ff5e2dc4666d940f103e78630d5b879d3": {
- "num_inference_steps": 4,
- "guidance_scale": 0.0
- },
- "41a7f0b966d18f643d16c4401f0b5ef6b9ef7362c20e17128322f17874709107": {
- "num_inference_steps": 8,
- "guidance_scale": 0.0
- }
- }
- },
- "stable-diffusion-3": {
- "repo": "wangfuyun/PCM_Weights",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- }
- }
- },
- "file_256": {
- "8a45878ecc34e53855fe21146cb6ef32682053b7c4eacc013be89fb08c4c19d8": {
- "num_inference_steps": 2,
- "guidance_scale": 1.2
- },
- "9444a5cead551c56c4d1c455ce829ba9f96f01fbcca31294277e0862a6a15b76": {
- "num_inference_steps": 4,
- "guidance_scale": 1.2
- },
- "e365902c208cbc0456ca5e7c41a490f637c15f3f7b98691cbba21f96a8c960b4": {
- "num_inference_steps": 4,
- "guidance_scale": 1.2
- },
- "3550fa018cd0b60d9e36ac94c31b30f27e402d3855ed63e47668bb181b35a0ad": {
- "num_inference_steps": 4,
- "guidance_scale": 1.2
- }
- }
- }
- },
- "info.lora.slam": {
- "stable-diffusion-xl-1": {
- "repo": "alimama-creative/slam-lora-sdxl",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "scheduler": {
- "ops.scheduler.lcm": {
- "timestep_spacing": "trailing"
- }
- },
- "generation": {
- "num_inference_steps": 4,
- "guidance_scale": 1
- }
- }
- },
- "file_256": [
- "22569a946b0db645aa3b8eb782c674c8e726a7cc0d655887c21fecf6dfe6ad91"
- ]
- },
- "stable-diffusion-v1-5": {
- "repo": "alimama-creative/slam-sd1.5",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- }
- }
- }
- }
- },
- "info.lora.spo": {
- "stable-diffusion-xl-1": {
- "repo": "SPO-Diffusion-Models/SPO-SDXL_4k-p_10ep_LoRA",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "generation": {
- "guidance_scale": 5.0
- }
- }
- },
- "file_256": [
- "0b9896f30d29daa5eedcfc9e7ad03304df6efc5114508f6ca9c328c0b4f057df"
- ]
- },
- "stable-diffusion-v1-5": {
- "repo": "SPO-Diffusion-Models/SPO-SD-v1-5_4k-p_10ep_LoRA",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "generation": {
- "guidance_scale": 7.5
- }
- }
- },
- "file_256": [
- "1be130c5be2de0beacadd3bf0bafe3bedd7e7a380729932a1e369fb29efa86f4"
- ]
- }
- },
- "info.lora.tcd": {
- "stable-diffusion-xl-1": {
- "repo": "h1t/TCD-SDXL-LoRA",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "generation": {
- "num_inference_steps": 4,
- "guidance_scale": 0,
- "eta": 0.3
- },
- "scheduler": {
- "ops.scheduler.tcd": {}
- }
- }
- },
- "file_256": [
- "2c777bc60abf41d3eb0fe405d23d73c280a020eea5adf97a82a141592c33feba"
- ]
- },
- "stable-diffusion-v1-5": {
- "repo": "h1t/TCD-SD15-LoRA",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- }
- }
- },
- "file_256": [
- "eaecb24a1cda4411eab67275b1d991071216ac93693e8fa0c9226c9df0386232"
- ],
- "layer_256": [
- "e9825b81bca684126ac3cc8867d2ebc655f74268bc26bea4e4b7e58a52ad6c75"
- ],
- "layer_b3": [
- "90158259812a89beb8874216009c799f420334aac49bbf4fa1bf0ebf4bbd256b"
- ]
- }
- },
- "info.lora.turbo": {
- "stable-diffusion-xl-1": {
- "file_256": [
- "a599c42a9f4f7494c7f410dbc0fd432cf0242720509e9d52fa41aac7a88d1b69"
- ]
- },
- "flux1-dev": {
- "repo": "alimama-creative/FLUX.1-Turbo-Alpha",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {
- "fuse": 0.125
- }
- },
- "generation": {
- "guidance_scale": 3.5,
- "num_inference_steps": 8,
- "max_sequence_length": 512
- }
- }
- },
- "file_256": [
- "77f7523a5e9c3da6cfc730c6b07461129fa52997ea06168e9ed5312228aa0bff"
- ]
- },
- "stable-diffusion-3": {
- "repo": "tensorart/stable-diffusion-3.5-large-TurboX",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {
- "fuse": 1.0
- }
- },
- "scheduler": {
- "ops.scheduler.flow-match": {
- "shift": 5
- }
- }
- }
- },
- "file_256": {
- "fae59d1b749c0d14a8fd4c68cc94eaac92876cee7b91fa75cf8fde3160e09548": {
- "num_inference_steps": "8"
- }
- }
- }
- },
- "info.art.audiogen": {
- "*": {
- "repo": "facebook/audiogen-medium",
- "pkg": {
- "0": {
- "audiocraft": "models.AudioGen",
- "generation": {
- "duration": 5
- },
- "stage_2": {
- "audiocraft": ".data.audioaudio_write",
- "generation": {
- "strategy": "loudness",
- "loudness_compressor": true
- }
- }
- }
- }
- }
- },
- "info.art.parler-tts-v1": {
- "*": {
- "repo": "parler-tts/parler-tts-large-v1",
- "pkg": {
- "0": {
- "parler_tts": "ParlerTTSForConditionalGeneration",
- "generation": {
- "return_tensors": "pt"
- }
- }
- }
- }
- },
- "info.gan.snac-st": {
- "*": {
- "repo": "Zuellni/snac-24khz-ST",
- "pkg": {
- "0": {
- "snac": "SNAC"
- },
- "1": {
- "mlx_audio": "tts.generate.generate_audio"
- }
- },
- "file_256": [
- "e61ae2f638f56ee07a37592cd5a6a9e7d642560ddc78a76ee4a7f96d6922f1be",
- "973ee1be4032319fd9685ec54eee1b93e79c7bc98c786e67f17c04669714f11d"
- ],
- "layer_256": [
- "35ba9aa1feb931010559a178fcac243673d2efdd1396a4b69d406c9853a88300",
- "5a22c4707ed6c928043f23b59f2d102a579db3a9af41cf6e60d7c3958f182841"
- ],
- "layer_b3": [
- "18307b00460a64cc4893f9061592ce8d7e15b70fc54065cc8ae0f0155381ec46",
- "d599b1bb36dee3cee4674b7922fcd69e5ec05b74413f611d21cfdfdf8f9b6119"
- ]
- }
- },
- "info.gan.kokoro": {
- "*": {
- "repo": "hexgrad/Kokoro-82M",
- "pkg": {
- "0": {
- "kokoro": "KPipeline"
- },
- "1": {
- "mlx_audio": "tts.generate.generate_audio",
- "generation": {
- "audio_format": "wav",
- "join_audio": true,
- "verbose": false
- }
- }
- },
- "file_256": [
- "5a5cb3d87478f2e74dfca208ee52209ccfce024095e137097fd276026506e45f",
- "496dba118d1a58f5f3db2efc88dbdc216e0483fc89fe6e47ee1f2c53f18ad1e4"
- ],
- "layer_256": [
- "dbedf0e2115aa309b92689f86534be4a77b91d7900365e1717879fbb19b849f6",
- "2c68574571b3f9229e015a909788116ea2251142e29c1bd5c687863192124e8b"
- ],
- "layer_b3": [
- "3e9b5017cfe67a7804ac717b18b6add42ffc0bd3353490df2bcc520eaaef79b6",
- "379660a87a64524bab69a267e3d9580f04b5eec4f7e3fbd48c6597d164d9b17d",
- "997f154f5a78879ef3ba1a1556977c40b28b9c21076b8f583f752c57ecc36e932dc3dba29452b85ea85266084a6248f9e0efe642d5f75b43e64f25b9f2837f92"
- ]
- }
- },
- "info.stst.silero-vad": {
- "*": {
- "repo": "freddyaboulton/silero-vad",
- "pkg": {
- "0": {
- "onnx": "onnx"
- },
- "1": {
- "mlx_audio": "tts.generate.generate_audio",
- "generation": {
- "audio_format": "wav",
- "join_audio": true,
- "verbose": false
- }
- }
- },
- "file_256": [
- "591f853590d11ddde2f2a54f9e7ccecb2533a8af7716330e8adfa6f3849787a9"
- ],
- "layer_256": [
- "2ffef1834d5fe14ad8db58fc78d769d5dc38dda5eddbfc396786f74b326215fd"
- ],
- "layer_b3": [
- "41ca5931452b3ffee588c6c7e5bd327c4e914141604eaf3fd05f4a790ac83bb2",
- "7dc736cd5d840182792bde4edfbf5ddc5aeaf16826a9c72d1ba8166c1e3fab9b",
- "6e2c1bdbad74f56663ffb5710c7cb849a2b91ba331d81acdba47a21f69107434",
- "ab5ff443aece9171af5e7603d0b4309d3ecc934e3940ccedefff10f0b54b931e"
- ]
- }
- },
- "info.stst.wav2vec2-conformer-rope-960h-ft": {
- "*": {
- "repo": "facebook/wav2vec2-conformer-rope-large-960h-ft",
- "pkg": {
- "0": {
- "transformers": "Wav2Vec2ConformerForCTC"
- }
- },
- "file_256": [
- "97bb9761fb71ec1225100bc81ccf7d002e0d0ba3d0604c1fd2dbda7d7d491f1d"
- ],
- "layer_256": [
- "1afcfda68307a75caa1a1c4456cf97e20c7914e8aba828006e9fe17e8675a79d"
- ],
- "layer_b3": [
- "6c9c5642aa8dce62bcb3eb577bc519619a2d868005c767c5e65371c583a8a8eb"
- ],
- "tasks": [
- "Wav2Vec2ConformerForAudioFrameClassification",
- "Wav2Vec2ConformerForCTC",
- "Wav2Vec2ConformerForPreTraining",
- "Wav2Vec2ConformerForSequenceClassification",
- "Wav2Vec2ConformerForXVector",
- "Wav2Vec2ConformerModel",
- "Wav2Vec2ConformerPreTrainedModel"
- ]
- }
- },
- "info.art.orpheus-0-ft": {
- "*": {
- "repo": "canopylabs/orpheus-3b-0.1-ft",
- "pkg": {
- "0": {
- "orpheus_tts": "OrpheusModel",
- "generation": {
- "max_model_len": 2048
- }
- },
- "1": {
- "mlx_audio": "tts.generate.generate_audio",
- "generation": {
- "audio_format": "wav",
- "join_audio": true,
- "verbose": false
- }
- }
- }
- }
- },
- "info.art.outetts-0": {
- "*": {
- "repo": "OuteAI/OuteTTS-0.3-1B",
- "pkg": {
- "0": {
- "outetts": "InterfaceHF"
- },
- "1": {
- "mlx_audio": "tts.generate.generate_audio",
- "generation": {
- "audio_format": "wav",
- "join_audio": true,
- "verbose": false
- }
- }
- }
- }
- },
- "info.gan.speecht5-hifigan": {
- "*": {
- "file_256": [
- "d9dc6513c30a5b86c2497712690c04fe74b4aa79fdab6d490b34fcb4e24c590c"
- ],
- "layer_256": [
- "bd52b538e7ac05711be9321cfb7619d4056996ce32923c9c91ee02cf69154770"
- ],
- "layer_b3": [
- "85b5acdf29ad04c63f885383340d8e3445ae0055521f82cabb82bd09cfb9a956"
- ]
- }
- },
- "info.dit.wan2-flf2v-720p": {
- "diffusers": {
- "repo": "Wan-AI/Wan2.1-FLF2V-14B-720P-Diffusers",
- "file_256": [
- "",
- ""
- ],
- "layer_256": [
- ""
- ],
- "layer_b3": [
- ""
- ]
- }
- },
- "ops.patch.hidiffusion": {
- "stable-diffusion-xl-1": {
- "pkg": {
- "0": {
- "hidiffusion": {
- "apply_hidiffusion": {
- "timesteps": "StableDiffusionXLTimesteps"
- }
- },
- "generation": {
- "height": 2048,
- "width": 2048,
- "eta": 1.0,
- "guidance_scale": 7.5,
- "num_inference_steps": 10
- }
- }
- }
- }
- },
- "ops.scheduler.align-your-steps": {
- "stable-diffusion-xl-1": {
- "pkg": {
- "0": {
- "diffusers": "schedulers.scheduling_utils.AysSchedules",
- "generation": {
- "timesteps": "StableDiffusionXLTimesteps",
- "num_inference_steps": 10
- }
- }
- }
- }
- },
- "info.vit.clip-vit-patch14": {
- "*": {
- "repo": "openai/clip-vit-large-patch14",
- "pkg": {
- "0": {
- "transformers": "CLIPTextModel"
- }
- },
- "file_256": [
- "cb0cba1ead482a850532ebe5ff6b5c8d4456aee32a5228acf0a31e7d9472415e",
- "39e79c916feca4ddf546d9fe923e664714b59ea61074f7228037d17c302f3d17",
- "893d67a23f4693ed42cdab4cbad7fe3e727cf59609c40da28a46b5470f9ed082",
- "778d02eb9e707c3fbaae0b67b79ea0d1399b52e624fb634f2f19375ae7c047c3",
- "660c6f5b1abae9dc498ac2d21e1347d2abdb0cf6c0c0c8576cd796491d9a6cdd",
- "71e183d11db0c6b6282a4d9e0abb74125edc8692393e89ed8ee5571005f35cb1",
- "5c3d6454dd2d23414b56aa1b5858a72487a656937847b6fea8d0606d7a42cdbc",
- "87c1c0b0894c9e9e10b962e597e8d64dd3a3a2d372c389922b335a53c250b2ae",
- "bd289dd57fee86bc8816b55919a2b03f9c3c75af6025e21777325a6730872325",
- "8377b1ca9d88fe06ec483dd7b3cfc62e5e8dbf8ddd252f455e79d659fa0553c5",
- "5487ea0eee9c9a9bff8abd097908d4deff3ae1fa87b3b67397f8b9538139d447",
- "92b998a9a64549bfa05c019bde114be6681549a0c79caee903fe30c9444d08b9",
- "1e090d6a828fd92401be5f83e615fd7b4fb1f4a22e9af9040a38f602e839317c",
- "11807cb2522cfe99240e5ee2bbeb1ccb42cecca2215102ee872567c7773b28b9",
- "d008943c017f0092921106440254dbbe00b6a285f7883ec8ba160c3faad88334",
- "77795e2023adcf39bc29a884661950380bd093cf0750a966d473d1718dc9ef4e",
- "b70c11ad5d7e9abf6109348908f599ea382f8019e1f36910bbc8ebecde936633",
- "fc42badf529dd83f2f7c3d20fe6bda1e22036162f37c4c668b9e130884e20561",
- "e27bafa0b3029ad637ef3ace24ce1efe85b8d0dbd22e03a2e70bda6fc88963a1"
- ],
- "layer_256": [
- "48daa3d8f939972e69f044533a4312a941971c18c78255f5e555fa26faf664c1",
- "60f5734a74c342be8b0011fc704e718431839790bcfdc7d7004fc39d70f7fec6",
- "6e76e25b4a55dddfa2eecf4b7ab189a8148658a9f6df165c00170f6ce661033c",
- "2d5249df489fec9137cc3a5e9bda499dd9b72a957ddd8e7ad4e99ff3684bad99",
- "3bf085e701713ed3e79775dafea375c3e2a43659ad1ee788b1b393c0aeff9f0e",
- "efb7976800692772e449c81a739339f59394886590ff3f768b0f9ddd87d2a94c",
- "9b0ac8d127c6c457b2eb8c7236f18c4e4ba9e8bbf27130aa8fe854d7c3f7b1e0",
- "24a9ee3d60cdde6c967f08e4b2ec7088fe1bfe308c6896e73caa874860570a5c",
- "5d6d9d0cc7943eb1b8c16862bfd5bee5c3766d0df027ec837e90fac715ac2bd3",
- "68fb122f7d6c3cfbef320341b2af8f5916678e36a69ed36fa8cfcb19e7d5c43d",
- "11807cb2522cfe99240e5ee2bbeb1ccb42cecca2215102ee872567c7773b28b9",
- "50c46cdddbe9f0162278c69b9a1f818519330e3a91b994272e19b5c789670471",
- "ffe1c4f55e07c2010ace7b9cf35798bb9f431bc954a32784e5acbdc16acc0364",
- "146ea48d234e05a934db9d8988e9a9dd86b2ac70f535eaa550ecb0ee23ec135e",
- "d97560cf9704cf71711f6121df2bf55e55a1eda4b574a6ddba074767420bc8c3"
- ],
- "layer_b3": [
- "f58a22a381f79985b6d38782f6110a52c2f319b40fdedd3b88b24945dfcbdf64",
- "8faa00b8fd1dbd9286a7237df18caeb8c91af100a6813849b6bae272a01dd7b7",
- "ab5bebc98299c155251a06deccde599ba0128038ee3ce021e8c59a45f58f72c0",
- "c70e9d86a9dcbbbe7c269ef9dfac96ce9c96c46922577338cc1902e5fe936315",
- "f285e9b7b70745df81adc8b558ec74b536b79b6fc02a453ecc61ea9d13f25f1a",
- "7ab17bfa06ab8d65840997ef641f3f593d096860e20141f1eeb0169d131c1c23",
- "2737d3f327e8176dbb549b9c5c4994821430a6c3b07e3bbc925d97511c802636",
- "58a826a4a5fe555b4df188a1ebc0d8d9c96cedae3a26ce84c247861dbb93388f",
- "1540fd8844898960e18ce8fd153e5f21a8c446bd8c4d6f536a7cf11418f02bf3",
- "c4c9caccdbec12b965d93688c521893f75e0bf9a5e0aad70a6a962b669e7b9d5",
- "e43fae8d5fd1e562607da172369cc0c5ec99b834e42502e682287ff7d12baacc",
- "c6f79f7416a882891957b815fbdfd6edfaa253c43970b1a25ef14e217599c7bc",
- "daf5e09f67ad09a909f58a01298fec0132324634cb8fca2a604c3a240c2c453f",
- "3f62bfb6bbde05f01435129326166c44aeb113ac0d9f735f31ed3f7dd04f6980",
- "22f866f3c96a92bc61e9965cf366d706db942ad047ba8cb82109edcd4e68fa40",
- "f3fa9d7a8f15741621c1fe82f8a1bcc5c601c900d947ac09fba7016615a252a5"
- ],
- "tasks": [
- "CLIPModel",
- "CLIPPreTrainedModel",
- "CLIPTextModel",
- "CLIPTextModelWithProjection",
- "CLIPVisionModel",
- "CLIPVisionModelWithProjection",
- "CLIPForImageClassification"
- ]
- }
- },
- "info.vit.clip-vit-g-14-laion-s-b": {
- "*": {
- "repo": "laion/CLIP-ViT-g-14-laion2B-s12B-b42K",
- "pkg": {
- "0": {
- "transformers": "CLIPTextModelWithProjection"
- }
- },
- "file_256": [
- "ca18e0c67c1ef1e64cac22926266765b60688f692307ecc06283d987c5768134",
- "ec310df2af79c318e24d20511b601a591ca8cd4f1fce1d8dff822a356bcdb1f4",
- "fa5b2e6f4c2efc2d82e4b8312faec1a5540eabfc6415126c9a05c8436a530ef4",
- "b84f413eebecbd049b72874c1df533a516510cb5a2489ae58c7e320209cf0ebe",
- "d3df577f6e3799c8e1bd9b40e30133710e02e8e25d0ce48cdcc790e7dfe12d6d",
- "943a2924ee888295a156dd47089d67181d633b782337890af11ef4b15af17ec5",
- "5b98e4a57a9292eeb819d67e2d2100f66f17db723cde4ecea27a7c3741160d0c",
- "4d6effa7a5e600cabf7528ed7234146a13ead1b2c151211d706b293a060b112a",
- "3a6032f63d37ae02bbc74ccd6a27440578cd71701f96532229d0154f55a8d3ff",
- "162042ac6556e73f93d4172d4c67532c1cbe4dc7a6a8fa7e44dd2e3d7cbb772b"
- ],
- "layer_256": [
- "270e998633eb22145100a3889a62ca270d5080654735e5ff8dda09a7c233af8d",
- "df18800c2a9d9318c4323d991a0fb24a6a9afceb41bea203812f60517c301536",
- "4c228b104f6b9b383e0808c9baa1998957f5125d8f90a4d98c1a86e71edd72dc",
- "f7fc81d8b5ae91ec28a5106ecc0d067be9a94fd3f394c4aa4686ed131ce5a5b3",
- "61ab42bd5c0fcb9fd3db1d4014cb844ccae8dc17fd69a108cf077a573d092946",
- "6c64e36cdda3bec7067e94b05619f882f5d31070792acaadac60ddbef580453a",
- "43c9e64995b485a7f128771c48defce128640df28e65c7f79537d472f43ebe46"
- ],
- "layer_b3": [
- "d754db276f2d89d2808abb7086b3b8eccee43ac521c128d21a071f3a631474a8",
- "2eb93685b34719e1d1e0541d8902b0a592d95848f80657e32816cf3b152a0f31",
- "e253a5cf3a6242c58037abd6b378bf0281f278e441f28dff7ca1bcfcd3cd6bd8",
- "16d0eec4e55b0aa63cdca4e4d36f78f66a4b1b9605ce3b1089305026f853c3d2",
- "f606463295ecf3bae8920d3d45bb9d180793418b3d08c3e84d4c4135c7dc2aa5",
- "7060993a5eb32d94d1ea8aef7a7301e7be73b199c639c63f8f7cfbfcd2abf10e",
- "b92af95334c657371af6051a91374a41b5455907fa6622bb66a8c112dc511600"
- ],
- "tasks": [
- "CLIPModel",
- "CLIPPreTrainedModel",
- "CLIPTextModel",
- "CLIPTextModelWithProjection",
- "CLIPVisionModel",
- "CLIPVisionModelWithProjection",
- "CLIPForImageClassification"
- ]
- }
- },
- "info.vit.clip-vit-h-14-laion-s-b": {
- "*": {
- "repo": "laion/CLIP-ViT-H-14-laion2B-s32B-b79K",
- "pkg": {
- "0": {
- "transformers": "CLIPModel"
- }
- },
- "file_256": [
- "036e6e2bd49697511f4f8b8cb5ee465f93025f7a69a145eadeb9a881ace9b18d",
- "0084e75319a50ad85ef45377bad5bc38f2f58824459eb690048d51c9f8863be5",
- "64a7ef761bfccbadbaa3da77366aac4185a6c58fa5de5f589b42a65bcc21f161"
- ],
- "layer_256": [
- "130a94ed12569e099196a6ca27388181922e20148dee5bcb58c5e309acfc2352",
- "cfdbd3fd2b90b64ba12d395a62dd7c3c3ea3e811f0a54593e91bae6516ca5061",
- "9125ce5970c649d6f9368c25493d3aaa6b41e224d4cc427e955115f7b7e53d1c"
- ],
- "layer_b3": [
- "227f26ed63120b9034f4a0c90b6b37eede721a8260f2c1e8f7ea3ccc0d109e7e",
- "3a38ffd1b60499cf2f451f3065079ff26efb9190a86f23ad1c8d993bbeb9af05",
- "ce06cf1fd684269ee96631b2bf9334c6ecde6a84a55760dfa0d9d2a6411f28e4"
- ],
- "tasks": [
- "CLIPModel",
- "CLIPPreTrainedModel",
- "CLIPTextModel",
- "CLIPTextModelWithProjection",
- "CLIPVisionModel",
- "CLIPVisionModelWithProjection",
- "CLIPForImageClassification"
- ]
- }
- },
- "info.aet.chatglm3": {
- "*": {
- "repo": "zai-org/chatglm3-6b",
- "pkg": {
- "0": {
- "transformers": "AutoModel"
- }
- },
- "file_256": [
- "0054d03310248928fdabdeef3fdc753170218dc49a1e9eb5f98323e27683f654",
- "b1052386eac358a18add3d0f92521c85ab338979da8eeb08a6499555b857f80d"
- ],
- "layer_256": [
- "174924fd7a07f370bb6fcd1ad07a73eecb7de901f15eefb80f420c1042c47d44"
- ],
- "layer_b3": [
- "a45dfba6a9fa8739777c76deb845fc9589b40f88670d3ce4661646a7b7b1d481"
- ]
- }
- },
- "info.vae.tae": {
- "stable-diffusion-3": {
- "repo": "madebyollin/taesd3",
- "pkg": {
- "0": {
- "diffusers": "AutoencoderTiny"
- }
- },
- "file_256": [
- "6f79c1397cb9ce1dac363722dbe70147aee0ccca75e28338f8482fe515891399"
- ]
- },
- "stable-diffusion-xl-1": {
- "repo": "madebyollin/taesdxl",
- "pkg": {
- "0": {
- "diffusers": "AutoencoderTiny"
- }
- },
- "file_256": [
- "ff4824aca94dd6111e0340fa749347fb74101060d9712cb5ef1ca8f1cf17502f"
- ]
- },
- "stable-diffusion-v1-5": {
- "repo": "madebyollin/taesd",
- "pkg": {
- "0": {
- "diffusers": "AutoencoderTiny"
- }
- },
- "file_256": [
- "db169d69145ec4ff064e49d99c95fa05d3eb04ee453de35824a6d0f325513549"
- ]
- },
- "flux1-dev": {
- "repo": "madebyollin/taef1",
- "pkg": {
- "0": {
- "diffusers": "AutoencoderTiny"
- }
- },
- "file_256": [
- "927f7de7f11bbd3b2d5ce402e608d97a7649e0921a9601995b044e8efc81e449"
- ]
- }
- },
- "info.vae.kl": {
- "qwen-image": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLQwenImage"
- }
- },
- "file_256": [
- "0c8bc8b758c649abef9ea407b95408389a3b2f610d0d10fcb054fe171d0a8344"
- ],
- "layer_256": [
- "42f255440ef1d379a8a731456bc44312a73a8568716caa6100803990cd5ea7dc"
- ],
- "layer_b3": [
- "64af8fb08d2054c81ad2aef94965be8fb1366fcc6136cb9222ae046550af014b"
- ]
- },
- "ltx-video": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLLTXVideo"
- }
- },
- "file_256": [],
- "layer_256": [],
- "layer_b3": []
- },
- "allegro": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLAllegro"
- }
- },
- "file_256": [
- "47871a698b18f92f15019d361a81cbc8af4676f8eef9a47fd2b95354a39f831a"
- ],
- "layer_256": [
- "bfd496586118165a13243997101fc7cdd4f855b2d8a73ee2b771a4484c4c2f9f"
- ],
- "layer_b3": [
- "93654cbab7541504d2377c66e72943c7fd9947fca2eb1be01bcc8877c322c1e0"
- ]
- },
- "cosmos-1-diffusion-video2world": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLCosmos"
- }
- },
- "file_256": [],
- "layer_256": [],
- "layer_b3": []
- },
- "easyanimatev5-zh": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLMagvit"
- }
- },
- "file_256": [],
- "layer_256": [],
- "layer_b3": []
- },
- "hunyuanvideo-i2v": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLHunyuanVideo"
- }
- },
- "file_256": [
- "95d1fc707c1421ccd88ea542838ab4c5d45a5babb48205bac9ce0985525f9818",
- "7c68a6295f9034a88225fbafb1f3258291a08d57a1fdb938233fa57b1b8f4883",
- "fbe5ea338431bc8ba20f7019b474e83379fe5763abfd562adcc04b1c0d35c728",
- "019973c147e0c3462629d8d06bdbdbb83408f3ebd4ea4b4ae21a99c3cdcb54c0"
- ]
- },
- "mochi-1": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLMochi"
- }
- },
- "file_256": [],
- "layer_256": [],
- "layer_b3": []
- },
- "audioldm-s-v2": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKL"
- }
- },
- "file_256": [
- "42f64f7565b23eabde68c9694e39f18b8bba5f7a14f477e7ed4b51e0ea7de8a5"
- ],
- "layer_256": [
- "54d075953d5253a3abac651de070736c1d5510b857a8ab24c624304f428146b6"
- ],
- "layer_b3": [
- "00959677dae940b9cfdbe5380c8cbb5a6b4951864cd26f8211d74a3d22b4f3de"
- ]
- },
- "stable-video-diffusion-img2vid-xt": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLTemporalDecoder"
- }
- }
- },
- "stable-diffusion-xl-1": {
- "repo": "madebyollin/sdxl-vae-fp16-fix",
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKL"
- }
- },
- "file_256": [
- "235745af8d86bf4a4c1b5b4f529868b37019a10f7c0b2e79ad0abca3a22bc6e1",
- "1b909373b28f2137098b0fd9dbc6f97f8410854f31f84ddc9fa04b077b0ace2c",
- "78f6189c8492013e3cac81637a1f657f790a237387f8a9dfd6bfa5fee28eb646",
- "6353737672c94b96174cb590f711eac6edf2fcce5b6e91aa9d73c5adc589ee48",
- "bcb60880a46b63dea58e9bc591abe15f8350bde47b405f9c38f4be70c6161e68",
- "1598f3d24932bcfe6634e8b618ea1e30ab1d57f5aad13a6d2de446d2199f2341",
- "703abdcd7c389316b5128faa9b750a530ea1680b453170b27afebac5e4db30c4",
- "98a14dc6fe8d71c83576f135a87c61a16561c9c080abba418d2cc976ee034f88"
- ],
- "layer_256": [
- "c9399a4cd39a180a0bb2af96a8297b9330541e090c21e83317cebb2f7cc651da",
- "2240ae134a3b983abf45200c198f07e3d8068012fbbd2f658bbaa1fd6a0629c0"
- ],
- "layer_b3": [
- "bd5b356b509814025a9cf692710b87116d4fcd0e30a8232ed1db133e908d0e74",
- "9106380403dee83238af63ff1738396d2fdff9f6d78d0d9c1d0bf770ae4294d0"
- ]
- },
- "stable-diffusion-xl-1*": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKL"
- }
- },
- "file_256": [
- "235745af8d86bf4a4c1b5b4f529868b37019a10f7c0b2e79ad0abca3a22bc6e1",
- "27ed3b02e09638568e99d4398c67bc654dde04e6c0db61fb2d21dba630e7058a",
- "eb6516ab7e1104d5d1a174a4d65c57835ae38061531d0a2192103aecfb790cc1",
- "e6bb9ea85bbf7bf6478a7c6d18b71246f22e95d41bcdd80ed40aa212c33cfeff"
- ],
- "layer_256": [
- "c9399a4cd39a180a0bb2af96a8297b9330541e090c21e83317cebb2f7cc651da",
- "2240ae134a3b983abf45200c198f07e3d8068012fbbd2f658bbaa1fd6a0629c0"
- ],
- "layer_b3": [
- "bd5b356b509814025a9cf692710b87116d4fcd0e30a8232ed1db133e908d0e74"
- ]
- },
- "shuttle-jaguar": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKL"
- }
- },
- "file_256": [
- "6fdfa2add4f04d94f36157cbb0197f97966b612e3f8eff4095315aefea74b904"
- ],
- "layer_256": [
- "9b28f36873ea283905094a64e1ccb7cfc2b0f0aa166201d0ca63807ac37caa7b"
- ],
- "layer_b3": [
- "0ebf9b7010accc44e219e355dd24bf1e3128004093c0c1dfc06f88c0a39fdbdd",
- "d0e7ef3c4af06fa08b4c0485a073e2df55f7b1e9e3ba8f7b261688bc562568f0"
- ]
- },
- "flux1-dev": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKL"
- }
- },
- "file_256": [
- "afc8e28272cd15db3919bacdb6918ce9c1ed22e96cb12c4d5ed0fba823529e38",
- "f5b59a26851551b67ae1fe58d32e76486e1e812def4696a4bea97f16604d40a3",
- "8c717328c8ad41faab2ccfd52ae17332505c6833cf176aad56e7b58f2c4d4c94",
- "8f53304a79335b55e13ec50f63e5157fee4deb2f30d5fae0654e2b2653c109dc"
- ],
- "layer_256": [
- "7950e4f3897c75affaa5f9f3c51c88b4d9a27bfd9b05ad41c3f71d8c1c620b89",
- "79d2bfe93a2ac037cdc59ccb5576e32d00d75d4741fba49fc7e82b9724928216",
- "8f084dc91fd5b481875bc9c86a4ef05e5f176896b7d31c6a5c2ce45c2e174004",
- "322e01bd511e20bc2a3c27cd611f81ed85f0046b7c023b5622c2c9a5b8b34f80"
- ],
- "layer_b3": [
- "b6db93ed78c4a10d69e80831c1b8fbc1447f04e9b3d494889ee2056b98d41f17",
- "a8a3ebdec4d7b38d65b7169d3604c19b587330e5e66f69ebf0ded56a24ec6903"
- ]
- },
- "musicldm": {
- "file_256": [
- "16e0c6c7c34e459c19500cc15cf538e6331db14969ea15917caa9b0966e44fd4"
- ],
- "layer_256": [
- "1610c0ce39d1379091eb9ab2a4d14a8567e0f1a5dc6cca40fc0fa6f8e4e97c0f"
- ],
- "layer_b3": [
- "c5c32b3fb3e73799838836ccce27d883254254daecd10f86ba8ddc55214014e0"
- ]
- },
- "stable-diffusion-v1-5": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKL"
- }
- },
- "file_256": [
- "0b204ad0cae549e0a7e298d803d57e36363760dec71c63109c1da3e1147ec520",
- "95f26a5ab04779d5467d1fcecaf93160ffa523afe399b835b3e1bb77ff2d937a",
- "32db726da04f06c1b6b14c0043ce115cc87a501482945c5add89a40d838fcb46",
- "c6a580b13a5bc05a5e16e4dbb80608ff2ec251a162311590c1f34c013d7f3dab",
- "735e4c3a447a3255760d7f86845f09f937809baa529c17370d83e4c3758f3c75",
- "a1d993488569e928462932c8c38a0760b874d166399b14414135bd9c42df5815",
- "a2b5134f4dbc140d9c11f11cba3233099e00af40f262f136c691fb7d38d2194c",
- "4fbcf0ebe55a0984f5a5e00d8c4521d52359af7229bb4d81890039d2aa16dd7c"
- ],
- "layer_256": [
- "e43f3a227b5ecb43a6272fa92ed6011d2e9abcadadd1032dfa7ea7f875f9d5bd",
- "2494154245becf98891be884f943276aa3f54e9b3f0ea1042903fc15fba488f3"
- ],
- "layer_b3": [
- "82e2dc440a23d78bb91df8c9fce069a8512da51f8f54ea29e3431f545808171e",
- "2230487833925a104bee96e7ecfebaa4c3c43cc426c7a5b863f2584313dd4833"
- ]
- }
- },
- "info.vae.wan": {
- "wan2-i2v-480p": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLWan",
- "precision": "ops.precision.float.F32"
- }
- },
- "file_256": [
- "d6e524b3fffede1787a74e81b30976dce5400c4439ba64222168e607ed19e793",
- "2fc39d31359a4b0a64f55876d8ff7fa8d780956ae2cb13463b0223e15148976b"
- ],
- "layer_256": [
- "121b3974b39263dcca9d644d1b5c9b9251a911b6a8a8e307fcb21ca778e78ed2",
- "364be43a8959012d798d3f98e17d8b5c4b99ba1e70077008dd19acca3ced395e"
- ],
- "layer_b3": [
- "f867543d636029ebfc05b8075e572be0b313a83b0470e56bcf4bbad07a6db010",
- "6b5b229727a2d4e37993687c62c94ff8519a371ab4103c699ff1f5969ca0b433"
- ]
- },
- "skyreels-v2-t2v-720p": {
- "file_256": [],
- "layer_256": [],
- "layer_b3": []
- },
- "skyreels-v2-i2v-720p": {
- "file_256": [],
- "layer_256": [],
- "layer_b3": []
- }
- },
- "info.vae.cogvideox": {
- "cogvideox-i2v": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLCogVideoX"
- }
- },
- "file_256": [
- "a410e48d988c8224cef392b68db0654485cfd41f345f4a3a81d3e6b765bb995e"
- ],
- "layer_256": [
- "43c7e9cb4364e55fd563817f01484ede8a09ff19a8e69eb61a32a12f93d6f66e"
- ],
- "layer_b3": [
- "246addb8dc798240638bffee4546a3c5c83572139b4a2a602d68b4c4146226eb"
- ]
- },
- "cogvideox-fun-v-pose": {
- "file_256": [],
- "layer_256": [],
- "layer_b3": []
- },
- "consisid": {
- "file_256": [],
- "layer_256": [],
- "layer_b3": []
- }
- },
- "info.vae.dc": {
- "sana-1024px-bf16": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderDC"
- }
- },
- "file_256": [
- "15a4b09e56d95b768a0ec9da50b702e21d920333fc9b3480d66bb5c7fad9d87f"
- ],
- "layer_256": [
- "abfc39d1a6d71f03dde7bc40fec4a90478a97d17ae1688be9aad00e0512b9bde"
- ],
- "layer_b3": [
- "cf4ecc6697d18b0663e4eac58203f1dd6d9fb689cf99adfeadbc0019de0c73d0"
- ]
- }
- },
- "info.vae.oobleck": {
- "stable-audio-open-1": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderOobleck"
- }
- }
- }
- },
- "info.vae.eq": {
- "stable-diffusion-xl-1": {
- "repo": "KBlueLeaf/EQ-SDXL-VAE",
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKL"
- }
- }
- }
- },
- "info.vae.ms-lc-eq": {
- "stable-diffusion-xl-1": {
- "repo": "Anzhc/MS-LC-EQ-D-VR_VAE",
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKL"
- }
- }
- }
- }
+ "expected": "data"
}
\ No newline at end of file
From c94c716f5657e372c13be6ba019a1ad67393db77 Mon Sep 17 00:00:00 2001
From: exdysa <91800957+exdysa@users.noreply.github.com>
Date: Sun, 11 Jan 2026 01:25:08 -0500
Subject: [PATCH 02/16] ~huge refactoring
---
LICENSE | 4 +-
MIR.egg-info/PKG-INFO | 5 +-
MIR.egg-info/SOURCES.txt | 3 +
MIR.egg-info/requires.txt | 2 +
README.md | 6 +-
mir.json | 7819 +++++++++++++++++++++++
mir/automata.py | 230 +-
mir/config/constants.py | 127 +-
mir/config/conversion.py | 143 +-
mir/doc_parser.py | 14 +-
mir/indexers.py | 276 +-
mir/inspect/classes.py | 30 -
mir/inspect/metadata.py | 205 +-
mir/inspect/pipes.py | 17 +-
mir/inspect/tasks.py | 67 +-
mir/maid.py | 17 +-
mir/mir.json | 5026 ++++++++++++++-
mir/spec/{mir.py => __init__.py} | 0
mir/spec/docstring_patterns.json | 41 +
mir/spec/missing_params.json | 62 +
mir/spec/repo_migrations.json | 29 +
mir/spec/template.json | 17 +-
mir/tag.py | 64 +-
pyproject.toml | 2 +
tests/test_find_docstring_run.py | 5 +
tests/test_gather_diffusers_metadata.py | 10 +-
tests/test_mir_db_create_restore.py | 2 +-
tests/test_mir_tagging.py | 16 +-
tests/test_regex_constants.py | 4 +-
uv.lock | 538 +-
30 files changed, 13907 insertions(+), 874 deletions(-)
create mode 100644 mir.json
rename mir/spec/{mir.py => __init__.py} (100%)
create mode 100644 mir/spec/docstring_patterns.json
create mode 100644 mir/spec/missing_params.json
create mode 100644 mir/spec/repo_migrations.json
create mode 100644 tests/test_find_docstring_run.py
diff --git a/LICENSE b/LICENSE
index 1fe559b..eab9da3 100644
--- a/LICENSE
+++ b/LICENSE
@@ -6,10 +6,10 @@ Without limiting other conditions in the License, the grant of rights under the
For purposes of the foregoing, “Sell” means practicing any or all of the rights granted to you under the License to provide to third parties, for a fee or other consideration (including without limitation fees for hosting or consulting/ support services related to the Software), a product or service whose value derives, entirely or substantially, from the functionality of the Software. Any license notice or attribution required by the License must also include this Commons Clause License Condition notice.
-Software: zodiac
+Software: mir
License : Mozilla Public License v. 2.0
Licensor: darkshapes github.com/darkshapes
-This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
\ No newline at end of file
+This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
diff --git a/MIR.egg-info/PKG-INFO b/MIR.egg-info/PKG-INFO
index 678b0bd..d98b3d3 100644
--- a/MIR.egg-info/PKG-INFO
+++ b/MIR.egg-info/PKG-INFO
@@ -11,13 +11,14 @@ License: “Commons Clause” License Condition v1.0
For purposes of the foregoing, “Sell” means practicing any or all of the rights granted to you under the License to provide to third parties, for a fee or other consideration (including without limitation fees for hosting or consulting/ support services related to the Software), a product or service whose value derives, entirely or substantially, from the functionality of the Software. Any license notice or attribution required by the License must also include this Commons Clause License Condition notice.
- Software: zodiac
+ Software: mir
License : Mozilla Public License v. 2.0
Licensor: darkshapes github.com/darkshapes
This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
Project-URL: Homepage, https://github.com/darkshapes/MIR
Project-URL: Documentation, https://github.com/darkshapes/sdbx/wiki
Keywords: ML,AI,URI,schema,diffusion,LLM,identification
@@ -33,8 +34,10 @@ Requires-Python: >=3.11
Description-Content-Type: text/markdown
License-File: LICENSE
Requires-Dist: diffusers>=0.35.2
+Requires-Dist: ftfy>=6.3.1
Requires-Dist: huggingface-hub[hf-xet]>=1.1.7
Requires-Dist: pydantic>=2.12.5
+Requires-Dist: sentencepiece>=0.2.1
Requires-Dist: tokenizers>=0.22.1
Requires-Dist: torch>=2.9.1
Requires-Dist: torchvision>=0.24.1
diff --git a/MIR.egg-info/SOURCES.txt b/MIR.egg-info/SOURCES.txt
index 23176a9..dea9843 100644
--- a/MIR.egg-info/SOURCES.txt
+++ b/MIR.egg-info/SOURCES.txt
@@ -1,3 +1,4 @@
+.env
.gitignore
CODE_OF_CONDUCT.md
LICENSE
@@ -30,6 +31,7 @@ mir/inspect/metadata.py
mir/inspect/parenting.py
mir/inspect/pipes.py
mir/inspect/tasks.py
+mir/spec/docstring_patterns.json
mir/spec/mir.py
mir/spec/modes.json
mir/spec/template.json
@@ -37,6 +39,7 @@ mir/spec/versions.json
tests/test_class_parent.py
tests/test_deconstructors_root.py
tests/test_doc_parser.py
+tests/test_find_docstring_run.py
tests/test_gather_diffusers_metadata.py
tests/test_json_io.py
tests/test_mir_db_create_restore.py
diff --git a/MIR.egg-info/requires.txt b/MIR.egg-info/requires.txt
index d9c4e5b..089ac9c 100644
--- a/MIR.egg-info/requires.txt
+++ b/MIR.egg-info/requires.txt
@@ -1,6 +1,8 @@
diffusers>=0.35.2
+ftfy>=6.3.1
huggingface-hub[hf-xet]>=1.1.7
pydantic>=2.12.5
+sentencepiece>=0.2.1
tokenizers>=0.22.1
torch>=2.9.1
torchvision>=0.24.1
diff --git a/README.md b/README.md
index e9c5b1b..d993ad2 100644
--- a/README.md
+++ b/README.md
@@ -93,14 +93,14 @@ Meant to be created by standards community, derived from code and file analysis
| ART
| Autoregressive Transformer |
| BRNN
| Bi-directional Recurrent Neural Network |
| CNN
| Convolutional Neural Network |
-| CONTROLNET
| Controlnet |
+| CONTROLNET
| ControlNet |
| DETR
| Detection Transformer |
| GAN
| Generative Adversarial Model |
-| GRU
| Gated recurrent unit |
+| GRU
| Gated Recurrent Unit |
| LORA
| Low-Rank Adaptation |
| LSTM
| Long Short-Term Memory |
| MOE
| Mixture of Experts |
-| RBM
| Restricted Boltzmann machine |
+| RBM
| Restricted Boltzmann Machine |
| RCNN
| Region-based Convolutional Neural Network |
| RESNET
| Residual Network |
| RNN
| Recurrent Neural Network |
diff --git a/mir.json b/mir.json
new file mode 100644
index 0000000..c73a611
--- /dev/null
+++ b/mir.json
@@ -0,0 +1,7819 @@
+{
+ "info.controlnet.sd-controlnet-canny": {
+ "*": {
+ "repo": "lllyasviel/sd-controlnet-canny",
+ "pkg": {
+ "0": {
+ "diffusers": "ControlNetModel"
+ }
+ }
+ }
+ },
+ "info.controlnet.blipdiffusion-controlnet": {
+ "*": {
+ "repo": "Salesforce/blipdiffusion-controlnet",
+ "pkg": {
+ "0": {
+ "diffusers": "BlipDiffusionControlNetPipeline"
+ }
+ }
+ }
+ },
+ "info.controlnet.control-v11p-sd15-inpaint": {
+ "*": {
+ "repo": "lllyasviel/control_v11p_sd15_inpaint",
+ "pkg": {
+ "0": {
+ "diffusers": "ControlNetModel"
+ }
+ }
+ }
+ },
+ "info.controlnet.controlnet-canny-sdxl-1": {
+ "*": {
+ "repo": "diffusers/controlnet-canny-sdxl-1.0",
+ "pkg": {
+ "0": {
+ "diffusers": "ControlNetModel"
+ }
+ }
+ }
+ },
+ "info.controlnet.controlnet-depth-sdxl-1": {
+ "*": {
+ "repo": "diffusers/controlnet-depth-sdxl-1.0-small",
+ "pkg": {
+ "0": {
+ "diffusers": "ControlNetModel"
+ }
+ }
+ }
+ },
+ "info.controlnet.controlnet-union-sdxl-1": {
+ "*": {
+ "repo": "xinsir/controlnet-union-sdxl-1.0",
+ "pkg": {
+ "0": {
+ "diffusers": "ControlNetUnionModel"
+ }
+ }
+ }
+ },
+ "info.controlnet.sd3-controlnet-canny": {
+ "*": {
+ "repo": "InstantX/SD3-Controlnet-Canny",
+ "pkg": {
+ "0": {
+ "diffusers": "SD3ControlNetModel"
+ }
+ }
+ }
+ },
+ "info.controlnet.sd3-controlnet-inpainting": {
+ "*": {
+ "repo": "alimama-creative/SD3-Controlnet-Inpainting",
+ "pkg": {
+ "0": {
+ "diffusers": "SD3ControlNetModel"
+ }
+ }
+ }
+ },
+ "info.controlnet.testing-conrolnetxs-sd2-canny": {
+ "*": {
+ "repo": "UmerHA/Testing-ConrolNetXS-SD2.1-canny",
+ "pkg": {
+ "0": {
+ "diffusers": "ControlNetXSAdapter"
+ }
+ }
+ }
+ },
+ "info.controlnet.testing-conrolnetxs-sdxl-canny": {
+ "*": {
+ "repo": "UmerHA/Testing-ConrolNetXS-SDXL-canny",
+ "pkg": {
+ "0": {
+ "diffusers": "ControlNetXSAdapter"
+ }
+ }
+ }
+ },
+ "info.unet.stable-diffusion-v1-5": {
+ "*": {
+ "repo": "stable-diffusion-v1-5/stable-diffusion-v1-5",
+ "pkg": {
+ "0": {
+ "diffusers": "StableDiffusionPipeline"
+ }
+ },
+ "tasks": [
+ "StableDiffusion3ControlNetInpaintingPipeline",
+ "StableDiffusion3ControlNetPipeline",
+ "StableDiffusion3Img2ImgPipeline",
+ "StableDiffusion3InpaintPipeline",
+ "StableDiffusion3PAGImg2ImgPipeline",
+ "StableDiffusion3PAGPipeline",
+ "StableDiffusion3Pipeline",
+ "StableDiffusionControlNetImg2ImgPipeline",
+ "StableDiffusionControlNetInpaintPipeline",
+ "StableDiffusionControlNetPAGInpaintPipeline",
+ "StableDiffusionControlNetPAGPipeline",
+ "StableDiffusionControlNetPipeline",
+ "StableDiffusionImg2ImgPipeline",
+ "StableDiffusionInpaintPipeline",
+ "StableDiffusionPAGImg2ImgPipeline",
+ "StableDiffusionPAGInpaintPipeline",
+ "StableDiffusionPAGPipeline",
+ "StableDiffusionPipeline",
+ "StableDiffusionXLControlNetImg2ImgPipeline",
+ "StableDiffusionXLControlNetInpaintPipeline",
+ "StableDiffusionXLControlNetPAGImg2ImgPipeline",
+ "StableDiffusionXLControlNetPAGPipeline",
+ "StableDiffusionXLControlNetPipeline",
+ "StableDiffusionXLControlNetUnionImg2ImgPipeline",
+ "StableDiffusionXLControlNetUnionInpaintPipeline",
+ "StableDiffusionXLControlNetUnionPipeline",
+ "StableDiffusionXLImg2ImgPipeline",
+ "StableDiffusionXLInpaintPipeline",
+ "StableDiffusionXLPAGImg2ImgPipeline",
+ "StableDiffusionXLPAGInpaintPipeline",
+ "StableDiffusionXLPAGPipeline",
+ "StableDiffusionXLPipeline"
+ ],
+ "pipe_names": {
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "stable-diffusion-v1-5"
+ ],
+ "scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ],
+ "safety_checker": [
+ "StableDiffusionSafetyChecker"
+ ],
+ "feature_extractor": [
+ "CLIPImageProcessor"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ]
+ }
+ }
+ },
+ "info.unet.stable-unclip-2-1-l": {
+ "*": {
+ "repo": "fusing/stable-unclip-2-1-l",
+ "pkg": {
+ "0": {
+ "diffusers": "StableUnCLIPPipeline"
+ }
+ },
+ "tasks": [
+ "StableDiffusion3ControlNetInpaintingPipeline",
+ "StableDiffusion3ControlNetPipeline",
+ "StableDiffusion3Img2ImgPipeline",
+ "StableDiffusion3InpaintPipeline",
+ "StableDiffusion3PAGImg2ImgPipeline",
+ "StableDiffusion3PAGPipeline",
+ "StableDiffusion3Pipeline",
+ "StableDiffusionControlNetImg2ImgPipeline",
+ "StableDiffusionControlNetInpaintPipeline",
+ "StableDiffusionControlNetPAGInpaintPipeline",
+ "StableDiffusionControlNetPAGPipeline",
+ "StableDiffusionControlNetPipeline",
+ "StableDiffusionImg2ImgPipeline",
+ "StableDiffusionInpaintPipeline",
+ "StableDiffusionPAGImg2ImgPipeline",
+ "StableDiffusionPAGInpaintPipeline",
+ "StableDiffusionPAGPipeline",
+ "StableDiffusionPipeline",
+ "StableDiffusionXLControlNetImg2ImgPipeline",
+ "StableDiffusionXLControlNetInpaintPipeline",
+ "StableDiffusionXLControlNetPAGImg2ImgPipeline",
+ "StableDiffusionXLControlNetPAGPipeline",
+ "StableDiffusionXLControlNetPipeline",
+ "StableDiffusionXLControlNetUnionImg2ImgPipeline",
+ "StableDiffusionXLControlNetUnionInpaintPipeline",
+ "StableDiffusionXLControlNetUnionPipeline",
+ "StableDiffusionXLImg2ImgPipeline",
+ "StableDiffusionXLInpaintPipeline",
+ "StableDiffusionXLPAGImg2ImgPipeline",
+ "StableDiffusionXLPAGInpaintPipeline",
+ "StableDiffusionXLPAGPipeline",
+ "StableDiffusionXLPipeline"
+ ],
+ "pipe_names": {
+ "prior_tokenizer": [
+ "info.encoder.tokenizer",
+ "stable-unclip-2-1-l"
+ ],
+ "prior_text_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "prior": [
+ "PriorTransformer"
+ ],
+ "prior_scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ],
+ "image_normalizer": [
+ "StableUnCLIPImageNormalizer"
+ ],
+ "image_noising_scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "stable-unclip-2-1-l"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ]
+ }
+ }
+ },
+ "info.unet.stable-diffusion-2-1-unclip": {
+ "*": {
+ "repo": "stabilityai/stable-diffusion-2-1-unclip-small",
+ "pkg": {
+ "0": {
+ "diffusers": "StableUnCLIPImg2ImgPipeline"
+ }
+ },
+ "tasks": [
+ "StableDiffusion3ControlNetInpaintingPipeline",
+ "StableDiffusion3ControlNetPipeline",
+ "StableDiffusion3Img2ImgPipeline",
+ "StableDiffusion3InpaintPipeline",
+ "StableDiffusion3PAGImg2ImgPipeline",
+ "StableDiffusion3PAGPipeline",
+ "StableDiffusion3Pipeline",
+ "StableDiffusionControlNetImg2ImgPipeline",
+ "StableDiffusionControlNetInpaintPipeline",
+ "StableDiffusionControlNetPAGInpaintPipeline",
+ "StableDiffusionControlNetPAGPipeline",
+ "StableDiffusionControlNetPipeline",
+ "StableDiffusionImg2ImgPipeline",
+ "StableDiffusionInpaintPipeline",
+ "StableDiffusionPAGImg2ImgPipeline",
+ "StableDiffusionPAGInpaintPipeline",
+ "StableDiffusionPAGPipeline",
+ "StableDiffusionPipeline",
+ "StableDiffusionXLControlNetImg2ImgPipeline",
+ "StableDiffusionXLControlNetInpaintPipeline",
+ "StableDiffusionXLControlNetPAGImg2ImgPipeline",
+ "StableDiffusionXLControlNetPAGPipeline",
+ "StableDiffusionXLControlNetPipeline",
+ "StableDiffusionXLControlNetUnionImg2ImgPipeline",
+ "StableDiffusionXLControlNetUnionInpaintPipeline",
+ "StableDiffusionXLControlNetUnionPipeline",
+ "StableDiffusionXLImg2ImgPipeline",
+ "StableDiffusionXLInpaintPipeline",
+ "StableDiffusionXLPAGImg2ImgPipeline",
+ "StableDiffusionXLPAGInpaintPipeline",
+ "StableDiffusionXLPAGPipeline",
+ "StableDiffusionXLPipeline"
+ ],
+ "pipe_names": {
+ "feature_extractor": [
+ "CLIPImageProcessor"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "image_normalizer": [
+ "StableUnCLIPImageNormalizer"
+ ],
+ "image_noising_scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "stable-diffusion-2-1-unclip"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ]
+ }
+ }
+ },
+ "info.unet.stable-diffusion-xl-1": {
+ "*": {
+ "repo": "stabilityai/stable-diffusion-xl-base-1.0",
+ "pkg": {
+ "0": {
+ "diffusers": "StableDiffusionXLPipeline"
+ }
+ },
+ "tasks": [
+ "StableDiffusionXLControlNetImg2ImgPipeline",
+ "StableDiffusionXLControlNetInpaintPipeline",
+ "StableDiffusionXLControlNetPAGImg2ImgPipeline",
+ "StableDiffusionXLControlNetPAGPipeline",
+ "StableDiffusionXLControlNetPipeline",
+ "StableDiffusionXLControlNetUnionImg2ImgPipeline",
+ "StableDiffusionXLControlNetUnionInpaintPipeline",
+ "StableDiffusionXLControlNetUnionPipeline",
+ "StableDiffusionXLImg2ImgPipeline",
+ "StableDiffusionXLInpaintPipeline",
+ "StableDiffusionXLPAGImg2ImgPipeline",
+ "StableDiffusionXLPAGInpaintPipeline",
+ "StableDiffusionXLPAGPipeline",
+ "StableDiffusionXLPipeline"
+ ],
+ "pipe_names": {
+ "vae": [
+ "info.vae.eq",
+ "info.vae.ms-lc-eq",
+ "stable-diffusion-xl-1"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "text_encoder_2": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "stable-diffusion-xl-1"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "stable-diffusion-xl-1"
+ ],
+ "scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "feature_extractor": [
+ "CLIPImageProcessor"
+ ]
+ }
+ },
+ "pony-diffusion": {
+ "file_256": [
+ "67ab2fd8ec439a89b3fedb15cc65f54336af163c7eb5e4f2acc98f090a29b0b3"
+ ],
+ "layer_256": [
+ "465425d4420dcf5aa4b4d5b456db11a1fcc7c8f61b2e4a87e2470297c98bb96e"
+ ],
+ "layer_b3": [
+ "bf4c2154daa4ece7292277b210d081f98759e9ed4d5c889564632e3ccc4a1071"
+ ]
+ },
+ "pony-diffusion-turbo": {
+ "file_256": [
+ "7555ac941f3a767833830ba5cc9a4508a9777cbf97b487b6baf0400ab7000587",
+ "9322f9d91b28abf09e4137bc02ec806af23510221a164e71b81778e61cc3b4b2"
+ ],
+ "layer_256": [
+ "7edf51ef09b39c46937a4e4141707c040cd12af0d95299a4d3cd2b7d3fabe035",
+ "74e4dbc89d57d61ff7e8af8b0fddcf7466ba233d53ca4ffb7777138991bc3d52"
+ ],
+ "layer_b3": [
+ "1e8f23fcd4be0f00eb52368b91c709fffa8a3b8e21772b92b2e0671eed9117d0",
+ "5c8b3f34f9d0a58135cf72fbfe9b5d75b5545a10e3d726478543fa7cc510a8bc"
+ ]
+ },
+ "animagine-xl-4": {
+ "repo": "cagliostrolab/animagine-xl-4.0",
+ "file_256": [
+ "8ece83aa1bed1fb39a2b81f1660f0ce6889218e493c1f2ed55e9f15f59a7e03f",
+ "6327eca98bfb6538dd7a4edce22484a1bbc57a8cff6b11d075d40da1afb847ac",
+ "1449e5b0b9de87b0f414c5f29cb11ce3b3dc61fa2b320e784c9441720bf7b766",
+ "e3c47aedb06418c6c331443cd89f2b3b3b34b7ed2102a3d4c4408a8d35aad6b0"
+ ],
+ "layer_256": [
+ "c21d1c38813e078817122e12866ab39f5aa7f56945dd4a8beee3cae1e0f139e7",
+ "b916c162c981155aaf74e93d5314038af6767bb5a129c51ee05a1fb6a206c6ac",
+ "ecc6bfc73824a2d7c3b0ca184854a235859f329c83768f017b07a19a535d17b4",
+ "97f6ca05de7fbdae7aacb2427a552f924492176c474a23dd252c192e1c0e9d65"
+ ],
+ "layer_b3": [
+ "268ffbb120670b9c4b25158bd474c787740884b7738b48203aa03c4c3f00028f",
+ "18fda1a55cad137d62c81d4328f5ece85d88b126261e06b9e14ab68055d5d484",
+ "bae9bc8a5c43145bcf92ee3391618d9eaddd689f626991bae202de9cf5f1e70e",
+ "d6bc5ccafa2b97c867b13a1e7a8c2c7ad9c4877055a66c71bb773557bc306447"
+ ]
+ },
+ "illustrious-xl-v2": {
+ "repo": "OnomaAIResearch/Illustrious-XL-v2.0",
+ "file_256": [
+ "c2a1a3eaa13d4c107dc7e00c3fe830cab427aa026362740ea094745b3422a331",
+ "536863e9f0c13b0ce834e2f8a19ada425ee4f722c0ad3d0051ec7e6adaa8156c",
+ "3e15ba00387db678ab4a099f75771c4f5ac67fda9e7100a01d263eaf30145aa9",
+ "e3d12d0f76d61aa31d2668a2217e5b642592193f2946842c44d7056ea5469cce",
+ "735cf3fefcbdc4f7817f53247e38b836ffd27c7641af6d8daa21d245242cb4bd"
+ ],
+ "layer_256": [
+ "397791b3d77affb7bd35c5ded7377493c6bf456920a41388ba95bd0157109803",
+ "b23c02b8519c6777a1f271662f4251a59468c4b3e11184a2d722fa8929b4ea48",
+ "a373981494f5508c124a1960bdd096bbc96935fbb54b1218f563206d3892c176",
+ "b709df257c40d9d981f686f2880bbe64f43b78805b7213768d659a142a593efd",
+ "f1e6b4cab0fce608dca6fa851384e8728202449f16270fbd1f0c4c5ec4946c10"
+ ],
+ "layer_b3": [
+ "93b061baf21d743d592327a61f027d099d8e18da9808a76c7704ad123eba4a29",
+ "dc05fed2acbc73cef4c377cfa2a681c5cf6d065b88d8bf70d371bbcce6a223a8",
+ "8eb1c30327e5b71b35b9a4513dc5f2cac9f244667393c0eedb10a26aa9991cd8",
+ "3dafbe31f6ebaffa3d054e1b37049e1147faa2474ceb6dab7bc3c4cded0c845e",
+ "892533778ee14454938f7b50830093f58e12f1e14560a148f71927e4ccff5f5c"
+ ]
+ },
+ "playground-v2---aesthetic": {
+ "repo": "playgroundai/playground-v2.5-1024px-aesthetic",
+ "pkg": {
+ "0": {
+ "diffusers": "DiffusionPipeline",
+ "precision": "ops.precision.float.F16",
+ "generation": {
+ "num_inference_steps": 50,
+ "guidance_scale": 3
+ }
+ }
+ },
+ "file_256": [
+ "11b6d7bce65674659cc6b7ea960658436edfd80e566cb240ebd4bfbc3e2076c8",
+ "bcaa7dd6780974f000b17b5a6c63e6f867a75c51ffa85c67d6b196882c69b992",
+ "956dca99114aaa5c3eb526381309d37ee96737e78ed64c8ae613409f47c3f65a",
+ "933778ce76c1fc0ca918b37e1488411b8a99bbd3279c12f527a3ac995a340864",
+ "5c7d38880d0940e6795158b7608ccef89217272b1f2a9331c5b0a2adffcd82c4",
+ "0411e988479884b1a3ecd184123efe38d051d8d0ef24270585a7d1d57499464a"
+ ],
+ "layer_256": [
+ "adb7be228d4ee6e583c3e5ae4ddb579fef64c3987617ce4d4aff3eb7f8d6a3f7",
+ "d4813e9f984aa76cb4ac9bf0972d55442923292d276e97e95cb2f49a57227843",
+ "fe2e9edf7e3923a80e64c2552139d8bae926cc3b028ca4773573a6ba60e67c20",
+ "bc7021473a04a6de3fe0d0fed600875d852ad1ad9d47c445278f66ce9e8ec7a0fc94481f0c52b21c5ac1fdade8d9c5b210f7239253f86ef21e6198fe393ed60e",
+ "a6f31493ceeb51c88c5239188b9078dc64ba66d3fc5958ad48c119115b06120c"
+ ],
+ "layer_b3": [
+ "d55b22740da2d5b98020ad2390cdc0a7ee08cf9e0d98c11957f16cc20c49815b",
+ "7e9be9bd9a3aed1ad7207e2f77c98c24c3a75f6adcc9b53514033c6c3365d289",
+ "5c6dfcc8d01dfb64723f8f5785caa080e2987859c0a050470bfdbe5312be9efc",
+ "703f775c6e48ed5b0eba6e847414f047bcd4adc677dbc1bf221b3ef05b2ac471",
+ "72d4ebe4af61f8a7add8fe36b8acd16602894279fb5a744ad50b5b5bac7067b8",
+ "acb757b851db12cdf9d4365a45ee0d6e64afa77ac95583bb82711baf7c4125fd"
+ ],
+ "pipe_names": {}
+ },
+ "segmind-vega": {
+ "repo": "segmind/Segmind-Vega",
+ "file_256": [
+ "94762e983e5942056be73c5c1d4464b8ffa1ada500b4fef1267550e2447953ce",
+ "1ab33e37fbb2566c55cd729e4ab79cc2f99cd9d0a578fabc7a2cf4ee47968be1",
+ "8cfa375669b1222d6fecf470f41b2abb370c76a90ab9568964c4bb15b34ec8a2"
+ ],
+ "layer_256": [
+ "029b89ee311110c8f945dbdfc52c1d5daeb1e78c353c38aa3141ec68ce28e7cc",
+ "5cdb948e5f3873300679073391d48fc648171f02093d7737d078557ff75762bb",
+ "f73afbe43cc76571cb86ebcfced618668a2fb2252b0bc6ba88d6e942bae75741"
+ ],
+ "layer_b3": [
+ "2f353c5e6ed0a2c05af00d014e18e65f69f1ce8c48f8eefbf8ad71b34f940fbf",
+ "cc34bd3135d7cafc3cb6e3f6e7cb6896c98277bad52877a952ddbd2ffe222e01",
+ "b90efdc848f5386d5250b6fb233ce380cf6cc299f497cfa1d2feaef22f87c9d1"
+ ]
+ },
+ "ssd": {
+ "repo": "segmind/SSD-1B",
+ "file_256": [
+ "7cb406ec0662e91570a79f3c4fb8f0ea5325bffe6af5d9382edae838698f72bd",
+ "1895a00bfc769a00b0c0c43a95e433e79e9db8a85402b45a33e8448785bde94d",
+ "0bf1ce6b065a6b969ab02dc8e8fa21eb20ee189b10935c49ce68c77a7e432c1c",
+ "02ed8ebd0ed55aec686fcf20946d7a1659a31f9f8d9c3798cd254ba6b67434ca",
+ "40d8ea9159f3e875278dacc7879442d58c45850cf13c62f5e26681061c51829a"
+ ],
+ "layer_256": [
+ "52267d5d327a2ba92c7a14261a9d081df621b8366819b1bb3a47d130523a813c",
+ "b365a3631c6c74532f3a571c84c68e088be35496d35be1e932031713ddd2a2f4",
+ "52267d5d327a2ba92c7a14261a9d081df621b8366819b1bb3a47d130523a813c",
+ "89f86d9c846495870416b4945b6a46a517f28405e5bab666feb4057f012340be",
+ "535b47e9b70da6494878ca6d45af3f2e201b7f17748432911c12232e586855e6"
+ ],
+ "layer_b3": [
+ "c074dc38e8ec836816b91cbcc2ca17f80d6106de8d196d416ef9a27c8837ee45",
+ "1d6c0216da57fe98e7ad29e9653566725f5b2a87845fdbdcda257b3be817b5f4",
+ "c074dc38e8ec836816b91cbcc2ca17f80d6106de8d196d416ef9a27c8837ee45",
+ "89f86d9c846495870416b4945b6a46a517f28405e5bab666feb4057f012340be",
+ "535b47e9b70da6494878ca6d45af3f2e201b7f17748432911c12232e586855e6"
+ ]
+ }
+ },
+ "info.unet.stable-diffusion-xl-refiner-1": {
+ "*": {
+ "repo": "stabilityai/stable-diffusion-xl-refiner-1.0",
+ "pkg": {
+ "0": {
+ "diffusers": "StableDiffusionXLImg2ImgPipeline"
+ }
+ },
+ "tasks": [
+ "StableDiffusionXLControlNetImg2ImgPipeline",
+ "StableDiffusionXLControlNetInpaintPipeline",
+ "StableDiffusionXLControlNetPAGImg2ImgPipeline",
+ "StableDiffusionXLControlNetPAGPipeline",
+ "StableDiffusionXLControlNetPipeline",
+ "StableDiffusionXLControlNetUnionImg2ImgPipeline",
+ "StableDiffusionXLControlNetUnionInpaintPipeline",
+ "StableDiffusionXLControlNetUnionPipeline",
+ "StableDiffusionXLImg2ImgPipeline",
+ "StableDiffusionXLInpaintPipeline",
+ "StableDiffusionXLPAGImg2ImgPipeline",
+ "StableDiffusionXLPAGInpaintPipeline",
+ "StableDiffusionXLPAGPipeline",
+ "StableDiffusionXLPipeline"
+ ],
+ "pipe_names": {
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "text_encoder_2": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "stable-diffusion-xl-refiner-1"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "stable-diffusion-xl-refiner-1"
+ ],
+ "scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "feature_extractor": [
+ "CLIPImageProcessor"
+ ]
+ }
+ }
+ },
+ "info.unet.sdxl-pix2pix-768": {
+ "*": {
+ "repo": "diffusers/sdxl-instructpix2pix-768",
+ "pkg": {
+ "0": {
+ "diffusers": "StableDiffusionXLInstructPix2PixPipeline"
+ }
+ },
+ "tasks": [
+ "StableDiffusionXLControlNetImg2ImgPipeline",
+ "StableDiffusionXLControlNetInpaintPipeline",
+ "StableDiffusionXLControlNetPAGImg2ImgPipeline",
+ "StableDiffusionXLControlNetPAGPipeline",
+ "StableDiffusionXLControlNetPipeline",
+ "StableDiffusionXLControlNetUnionImg2ImgPipeline",
+ "StableDiffusionXLControlNetUnionInpaintPipeline",
+ "StableDiffusionXLControlNetUnionPipeline",
+ "StableDiffusionXLImg2ImgPipeline",
+ "StableDiffusionXLInpaintPipeline",
+ "StableDiffusionXLPAGImg2ImgPipeline",
+ "StableDiffusionXLPAGInpaintPipeline",
+ "StableDiffusionXLPAGPipeline",
+ "StableDiffusionXLPipeline"
+ ],
+ "pipe_names": {
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "text_encoder_2": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "sdxl-pix2pix-768"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "sdxl-pix2pix-768"
+ ],
+ "scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ]
+ }
+ }
+ },
+ "info.dit.allegro": {
+ "*": {
+ "repo": "rhymes-ai/Allegro",
+ "pkg": {
+ "0": {
+ "diffusers": "AllegroPipeline"
+ }
+ },
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "allegro"
+ ],
+ "text_encoder": [
+ "T5EncoderModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "allegro"
+ ],
+ "transformer": [
+ "AllegroTransformer3DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ]
+ }
+ }
+ },
+ "info.dit.amused-512": {
+ "*": {
+ "repo": "amused/amused-512",
+ "pkg": {
+ "0": {
+ "diffusers": "AmusedInpaintPipeline"
+ }
+ },
+ "pipe_names": {
+ "vqvae": [
+ "VQModel"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "amused-512"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "transformer": [
+ "UVit2DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.amused",
+ "scheduler"
+ ]
+ }
+ }
+ },
+ "info.lora.animatediff-motion-adapter-v1-5-2": {
+ "*": {
+ "repo": "guoyww/animatediff-motion-adapter-v1-5-2",
+ "pkg": {
+ "0": {
+ "diffusers": "AnimateDiffVideoToVideoPipeline"
+ }
+ }
+ }
+ },
+ "info.lora.animatediff-motion-adapter-sdxl": {
+ "*": {
+ "repo": "a-r-r-o-w/animatediff-motion-adapter-sdxl-beta",
+ "pkg": {
+ "0": {
+ "diffusers": "AnimateDiffSDXLPipeline"
+ }
+ }
+ }
+ },
+ "info.controlnet.animatediff-sparsectrl-scribble": {
+ "*": {
+ "repo": "guoyww/animatediff-sparsectrl-scribble",
+ "pkg": {
+ "0": {
+ "diffusers": "SparseControlNetModel"
+ }
+ }
+ }
+ },
+ "info.controlnet.animatelcm": {
+ "*": {
+ "repo": "wangfuyun/AnimateLCM",
+ "pkg": {
+ "0": {
+ "diffusers": "ControlNetModel"
+ }
+ }
+ }
+ },
+ "info.dit.bria-3": {
+ "*": {
+ "repo": "briaai/BRIA-3.2",
+ "pkg": {
+ "0": {
+ "diffusers": "BriaPipeline"
+ }
+ },
+ "pipe_names": {
+ "transformer": [
+ "BriaTransformer2DModel"
+ ],
+ "scheduler": [
+ [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ]
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "T5EncoderModel"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "bria-3"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "feature_extractor": [
+ "CLIPImageProcessor"
+ ]
+ }
+ }
+ },
+ "info.dit.flux2-dev": {
+ "*": {
+ "repo": "black-forest-labs/FLUX.2-dev",
+ "pkg": {
+ "0": {
+ "diffusers": "Flux2Pipeline"
+ }
+ },
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "text_encoder": [
+ "Mistral3ForConditionalGeneration"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "flux2-dev"
+ ],
+ "transformer": [
+ "Flux2Transformer2DModel"
+ ]
+ }
+ }
+ },
+ "info.dit.flux1-schnell": {
+ "*": {
+ "repo": "black-forest-labs/FLUX.1-schnell",
+ "pkg": {
+ "0": {
+ "diffusers": "FluxInpaintPipeline"
+ }
+ },
+ "tasks": [
+ "FluxControlImg2ImgPipeline",
+ "FluxControlInpaintPipeline",
+ "FluxControlNetImg2ImgPipeline",
+ "FluxControlNetInpaintPipeline",
+ "FluxControlNetPipeline",
+ "FluxControlPipeline",
+ "FluxImg2ImgPipeline",
+ "FluxInpaintPipeline",
+ "FluxKontextPipeline",
+ "FluxPipeline"
+ ],
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "flux1-schnell"
+ ],
+ "text_encoder_2": [
+ "T5EncoderModel"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "flux1-schnell"
+ ],
+ "transformer": [
+ "FluxTransformer2DModel"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "feature_extractor": [
+ "CLIPImageProcessor"
+ ]
+ }
+ },
+ "shuttle-3-aesthetic": {
+ "repo": "shuttleai/shuttle-3.1-aesthetic",
+ "pkg": {
+ "2": {
+ "diffusers": "DiffusionPipeline",
+ "generation": {
+ "guidance_scale": 3.5,
+ "num_inference_steps": 4
+ }
+ }
+ },
+ "file_256": [
+ "176871da1d5d2d511a52ae9b0dd70faa1f5d1b7734b7e33ed6b4bffa52050e0d",
+ "4b80d37681eaed07b7f5b3825a392da929d1620933ede7c2749ef3613cc53f42"
+ ],
+ "layer_256": [
+ "e5d95de314cbfc49b79479118a1ac0b90fc95ccd6bb1a5c95803996d6cebf8fe",
+ "d299e8ea4a605917ab98a4a7330d4d398b4ae295efbf458eeeceb5ff1bd7959a"
+ ],
+ "layer_b3": [
+ "ff422d1734abf33366e87bbf44267dc6096c5d499e695287c35558174877412e",
+ "5ad8034eac6b82d842311437101c52b5d35826ce34994940d9e667e702a0d45c"
+ ]
+ },
+ "shuttle-3-diffusion": {
+ "repo": "shuttleai/shuttle-3-diffusion",
+ "pkg": {
+ "2": {
+ "diffusers": "DiffusionPipeline",
+ "generation": {
+ "guidance_scale": 3.5,
+ "num_inference_steps": 4
+ }
+ }
+ },
+ "file_256": [
+ "a5b04df4072698395387c21e8da0176d03f6557e0c38ff1dd3bf469ebab9d0fd",
+ "a91b46de2055b3511ee87523b57862648856e8c00100161d5b520543a7302755",
+ "23a77c86189d5934da48bf44bb871cf80ba99177ffd3fd5272cdecb208c8b8be",
+ "d3782d5a8f6e82c6676e8e26d54020934ada589d2aceb17fc5ca604b1bd55da8"
+ ],
+ "layer_256": [
+ "14d0e1b573023deb5a4feaddf85ebca10ab2abf3452c433e2e3ae93acb216443",
+ "7ce8d449b32a9c959431ade729b513ee7a6457f11e1c13e3ef04dd8db3494621",
+ "9c3395f67a3d844483b77f0ddd5e2ea64b61732fa9d9da19845bb8ae574c1f8c"
+ ],
+ "layer_b3": [
+ "4dd3174edf6b680ce9daf3de643e33ae2c4f09a4d5968da61ea48885f3a193c0",
+ "9fdf191b2c58b2a6e190396e12314530593dca4f2a2bee389ec5175da5e52af8",
+ "ad203ad6a00d8b1315337e34069e7c41016ea407469a536de8ad6807042017fd"
+ ]
+ },
+ "shuttle-jaguar": {
+ "repo": "shuttleai/shuttle-jaguar",
+ "pkg": {
+ "2": {
+ "diffusers": "DiffusionPipeline",
+ "generation": {
+ "guidance_scale": 3.5,
+ "num_inference_steps": 4
+ }
+ }
+ },
+ "file_256": [
+ "dcbc4f2470b177eed12c7d7515c0e7342515a849ebd31a50c8d8d43913d7bd32",
+ "26a7aa64c0798a3549e1d767932da0a7fb82b49f8edcbdcde804a20d9ed1478f"
+ ],
+ "layer_b3": [
+ "9906c29933d0c33a6ee8d9712f33fa8bd4b35b46a1c7b565ae48832b757dd980",
+ "89c453c4bf99220405687eed984dace4492bdae1b6fb08f3d9629145b1a11672"
+ ]
+ }
+ },
+ "info.controlnet.flux1-canny-dev": {
+ "*": {
+ "repo": "black-forest-labs/FLUX.1-Canny-dev",
+ "pkg": {
+ "0": {
+ "diffusers": "FluxControlPipeline"
+ }
+ }
+ }
+ },
+ "info.controlnet.flux1-dev-controlnet-canny": {
+ "*": {
+ "repo": "InstantX/FLUX.1-dev-controlnet-canny",
+ "pkg": {
+ "0": {
+ "diffusers": "FluxControlNetModel"
+ }
+ }
+ }
+ },
+ "info.controlnet.flux1-dev-controlnet-canny-alpha": {
+ "*": {
+ "repo": "InstantX/FLUX.1-dev-Controlnet-Canny-alpha",
+ "pkg": {
+ "0": {
+ "diffusers": "FluxControlNetModel"
+ }
+ }
+ }
+ },
+ "info.dit.flux1-fill-dev": {
+ "*": {
+ "repo": "black-forest-labs/FLUX.1-Fill-dev",
+ "pkg": {
+ "0": {
+ "diffusers": "FluxFillPipeline"
+ }
+ },
+ "tasks": [
+ "FluxControlImg2ImgPipeline",
+ "FluxControlInpaintPipeline",
+ "FluxControlNetImg2ImgPipeline",
+ "FluxControlNetInpaintPipeline",
+ "FluxControlNetPipeline",
+ "FluxControlPipeline",
+ "FluxImg2ImgPipeline",
+ "FluxInpaintPipeline",
+ "FluxKontextPipeline",
+ "FluxPipeline"
+ ],
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "flux1-fill-dev"
+ ],
+ "text_encoder_2": [
+ "T5EncoderModel"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "flux1-fill-dev"
+ ],
+ "transformer": [
+ "FluxTransformer2DModel"
+ ]
+ }
+ }
+ },
+ "info.dit.flux1-kontext-dev": {
+ "*": {
+ "repo": "black-forest-labs/FLUX.1-Kontext-dev",
+ "pkg": {
+ "0": {
+ "diffusers": "FluxKontextInpaintPipeline"
+ }
+ },
+ "tasks": [
+ "FluxControlImg2ImgPipeline",
+ "FluxControlInpaintPipeline",
+ "FluxControlNetImg2ImgPipeline",
+ "FluxControlNetInpaintPipeline",
+ "FluxControlNetPipeline",
+ "FluxControlPipeline",
+ "FluxImg2ImgPipeline",
+ "FluxInpaintPipeline",
+ "FluxKontextPipeline",
+ "FluxPipeline"
+ ],
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "flux1-kontext-dev"
+ ],
+ "text_encoder_2": [
+ "T5EncoderModel"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "flux1-kontext-dev"
+ ],
+ "transformer": [
+ "FluxTransformer2DModel"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "feature_extractor": [
+ "CLIPImageProcessor"
+ ]
+ }
+ }
+ },
+ "info.dit.flux1-dev": {
+ "*": {
+ "repo": "black-forest-labs/FLUX.1-dev",
+ "pkg": {
+ "0": {
+ "diffusers": "FluxPipeline"
+ }
+ },
+ "tasks": [
+ "FluxControlImg2ImgPipeline",
+ "FluxControlInpaintPipeline",
+ "FluxControlNetImg2ImgPipeline",
+ "FluxControlNetInpaintPipeline",
+ "FluxControlNetPipeline",
+ "FluxControlPipeline",
+ "FluxImg2ImgPipeline",
+ "FluxInpaintPipeline",
+ "FluxKontextPipeline",
+ "FluxPipeline"
+ ],
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "flux1-dev"
+ ],
+ "text_encoder_2": [
+ "T5EncoderModel"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "flux1-dev"
+ ],
+ "transformer": [
+ "FluxTransformer2DModel"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "feature_extractor": [
+ "CLIPImageProcessor"
+ ]
+ }
+ },
+ "mystic": {
+ "repo": "enhanceaiteam/Mystic",
+ "pkg": {
+ "0": {
+ "generation": {
+ "num_inference_steps": 16,
+ "guidance_scale": 7.5,
+ "width": 768,
+ "height": 1024
+ }
+ }
+ },
+ "file_256": [
+ "179d4000e44295f6dfadc0e4ac210146454724d46371b82657200ff9fb5c68a9",
+ "48ca85274e3b67f07f70dd84b67725e62395c2f7b188394342716f783ea4c6ac"
+ ],
+ "layer_256": [
+ "3942e6a52dbb0abaf63b031d9c4eda0df47576b51d4c81361978a3dc27b1309e"
+ ],
+ "layer_b3": [
+ "91074aaebe1b5f3b2e7755d3c092af7eb240e92a192360690f1033949d3c8a68"
+ ]
+ },
+ "flux1-lite": {
+ "repo": "freepik/flux.1-lite-8b",
+ "pkg": {
+ "0": {
+ "generation": {
+ "num_inference_steps": 28
+ }
+ }
+ },
+ "file_256": [
+ "09e970a7b8d1813ea7cacd48f9a944fd223882b137a8f4f3b61d864cdc20bbec",
+ "de90e69945c2f4afcb9b6a057ce48190905c984370fce76b16ba3b97d46e2747"
+ ],
+ "layer_256": [
+ "e1afe2f9b1ca55b3c659293cf3237f6b5571f5c4e826bad025ff0f7b54dc34ee"
+ ],
+ "layer_b3": [
+ "9276fa4805efeb45c08cca32c5b51d490e57a2ce5c15ef476a8e468a509c5cdf"
+ ]
+ },
+ "f-lite": {
+ "repo": "freepik/f-lite",
+ "pkg": {
+ "0": {
+ "f_lite": "FLitePipeline",
+ "generation": {
+ "num_inference_steps": 28
+ }
+ }
+ }
+ },
+ "f-lite-texture": {
+ "repo": "freepik/f-lite-texture",
+ "pkg": {
+ "0": {
+ "f_lite": "FLitePipeline",
+ "generation": {
+ "num_inference_steps": 28
+ }
+ }
+ }
+ },
+ "flux": {
+ "repo": "TencentARC/flux-mini",
+ "file_256": [
+ "4236455adeaeb4ed444d63b253ec99805022d17e962ed7261ada9c72ce11cfee"
+ ],
+ "layer_256": [
+ "e4a0d8cf2034da094518ab058da1d4aea14e00d132c6152a266ec196ffef02d0"
+ ],
+ "layer_b3": [
+ "c1a6f83585398fe452d20596a79a522e2986f4c2c01a40e7bfd787af113735d3"
+ ]
+ },
+ "flex2": {
+ "repo": "ostris/Flex.2-preview",
+ "file_256": [
+ "0407108e446a4f57efffc5e7518bc374876af970d3c6068dc4074de0d221c615",
+ "df168ba94d5f96c478b24604a6beedff6189047152190509c73c162ea0d8ec02"
+ ],
+ "layer_256": [
+ "5063de856be5365807d12b47ef6919b4ac611a72651739b2b4050e113bed7a83"
+ ],
+ "layer_b3": [
+ "7f85cdc186896da6965b57d5edb672f08663075d2b207f0e20e328c4034a8076"
+ ]
+ },
+ "flex1-alpha": {
+ "repo": "ostris/Flex.1-alpha",
+ "file_256": [
+ "5d6dce30a266ccbf530c3a3bf253cd5486720a8fb71cdeed556c28304201dc2f",
+ "7acf8771b80a91eaa21566abe8c7d9d3ba33d8688e6e98446827749aee7ca1ee"
+ ],
+ "layer_256": [
+ "a6b9af6efc25fa77cd24046b81ee66fea09a9987d2a8e56ffca9b7a1c9c9c519"
+ ],
+ "layer_b3": [
+ "cb3d3edafd81651eefd62894b3572deb02c5304f4b5d4f7ab8654f1fb922ecd6"
+ ]
+ }
+ },
+ "info.dit.prx-512-t2i-sft": {
+ "*": {
+ "repo": "Photoroom/prx-512-t2i-sft",
+ "pkg": {
+ "0": {
+ "diffusers": "PRXPipeline"
+ }
+ },
+ "pipe_names": {
+ "transformer": [
+ "PRXTransformer2DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "text_encoder": [
+ "T5GemmaEncoder"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "prx-512-t2i-sft"
+ ],
+ "vae": [
+ "AutoencoderKL",
+ [
+ "info.vae.dc",
+ "sana-1024px-bf16"
+ ],
+ "NoneType"
+ ]
+ }
+ }
+ },
+ "info.unet.audioldm-s-v2": {
+ "*": {
+ "repo": "cvssp/audioldm-s-full-v2",
+ "pkg": {
+ "0": {
+ "diffusers": "AudioLDMPipeline"
+ }
+ },
+ "pipe_names": {
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "ClapTextModelWithProjection"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "audioldm-s-v2"
+ ],
+ "scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ],
+ "vocoder": [
+ "SpeechT5HifiGan"
+ ]
+ }
+ }
+ },
+ "info.unet.audioldm2": {
+ "*": {
+ "repo": "cvssp/audioldm2",
+ "pkg": {
+ "0": {
+ "diffusers": "AudioLDM2Pipeline"
+ }
+ },
+ "pipe_names": {
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "ClapModel"
+ ],
+ "text_encoder_2": [
+ "T5EncoderModel",
+ "VitsModel"
+ ],
+ "projection_model": [
+ "AudioLDM2ProjectionModel"
+ ],
+ "language_model": [
+ "info.art.gpt2",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "audioldm2"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "audioldm2"
+ ],
+ "feature_extractor": [
+ "ClapFeatureExtractor"
+ ],
+ "unet": [
+ "AudioLDM2UNet2DConditionModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ],
+ "vocoder": [
+ "SpeechT5HifiGan"
+ ]
+ }
+ }
+ },
+ "info.unet.blipdiffusion": {
+ "*": {
+ "repo": "Salesforce/blipdiffusion",
+ "pkg": {
+ "0": {
+ "diffusers": "BlipDiffusionPipeline"
+ }
+ },
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "blipdiffusion"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "scheduler": [
+ "ops.scheduler.pndm",
+ "scheduler"
+ ],
+ "qformer": [
+ "info.vit.blip2-opt",
+ "*"
+ ],
+ "image_processor": [
+ "BlipImageProcessor"
+ ]
+ }
+ }
+ },
+ "info.dit.chroma": {
+ "*": {
+ "repo": "lodestones/Chroma",
+ "pkg": {
+ "0": {
+ "diffusers": "ChromaPipeline"
+ }
+ },
+ "tasks": [
+ "ChromaPipeline"
+ ],
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "T5EncoderModel"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "chroma"
+ ],
+ "transformer": [
+ "ChromaTransformer2DModel"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "feature_extractor": [
+ "CLIPImageProcessor"
+ ]
+ }
+ },
+ "chroma1-hd": {
+ "repo": "lodestones/Chroma1-HD",
+ "pkg": {
+ "0": {
+ "generation": {
+ "num_inference_steps": 40
+ }
+ }
+ },
+ "file_256": [
+ "d845553f11e6afe8139c41ca73678f9f03eab2e68d2e1c6f03ae19509a4d546",
+ "1b2993a44e63b2250496f69edce643bac2fb79833cf92ba8dd95cbd764d970c7",
+ "2dd46f08516246df1f582047cc09268ce4f747357baff05b13148e71519029fc"
+ ]
+ },
+ "chroma1-flash": {
+ "repo": "lodestones/Chroma1-Flash",
+ "pkg": {
+ "0": {
+ "diffusers": "ChromaPipeline",
+ "generation": {
+ "num_inference_steps": 8,
+ "guidance_scale": 1.0,
+ "num_images_per_prompt": 1
+ }
+ }
+ },
+ "file_256": [
+ "2c0c7d908d04418a48b453c293237a9826d54472cf0ba76e28697d1309d1021b",
+ "c88f6794753ba23e8f6bf8c84cf220daa35a6aa16d54ea0c3e0136f52e5da7e1",
+ "c759d67ca3ef50a9a1c242e3291c57f406646f226a95f43f66577996494986db"
+ ],
+ "tasks": [
+ "ChromaPipeline"
+ ],
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "T5EncoderModel"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "chroma"
+ ],
+ "transformer": [
+ "ChromaTransformer2DModel"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "feature_extractor": [
+ "CLIPImageProcessor"
+ ]
+ }
+ }
+ },
+ "info.dit.chroma1-hd": {
+ "*": {
+ "repo": "lodestones/Chroma1-HD",
+ "pkg": {
+ "0": {
+ "diffusers": "ChromaImg2ImgPipeline"
+ }
+ },
+ "tasks": [
+ "ChromaPipeline"
+ ],
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "T5EncoderModel"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "chroma1-hd"
+ ],
+ "transformer": [
+ "ChromaTransformer2DModel"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "feature_extractor": [
+ "CLIPImageProcessor"
+ ]
+ }
+ }
+ },
+ "info.dit.cogvideox": {
+ "*": {
+ "repo": "zai-org/CogVideoX-2b",
+ "pkg": {
+ "0": {
+ "diffusers": "CogVideoXPipeline"
+ }
+ },
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "cogvideox"
+ ],
+ "text_encoder": [
+ "T5EncoderModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "transformer": [
+ "CogVideoXTransformer3DModel"
+ ],
+ "scheduler": [
+ [
+ "ops.scheduler.cogvideoxddim",
+ "scheduler"
+ ],
+ [
+ "ops.scheduler.cogvideoxdpm",
+ "scheduler"
+ ]
+ ]
+ }
+ }
+ },
+ "info.controlnet.cogvideox-fun-v-pose": {
+ "*": {
+ "repo": "alibaba-pai/CogVideoX-Fun-V1.1-5b-Pose",
+ "pkg": {
+ "0": {
+ "diffusers": "CogVideoXFunControlPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.cogvideox-i2v": {
+ "*": {
+ "repo": "zai-org/CogVideoX-5b-I2V",
+ "pkg": {
+ "0": {
+ "diffusers": "CogVideoXImageToVideoPipeline"
+ }
+ },
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "cogvideox-i2v"
+ ],
+ "text_encoder": [
+ "T5EncoderModel"
+ ],
+ "vae": [
+ "info.vae.cogvideox",
+ "cogvideox-i2v"
+ ],
+ "transformer": [
+ "CogVideoXTransformer3DModel"
+ ],
+ "scheduler": [
+ [
+ "ops.scheduler.cogvideoxddim",
+ "scheduler"
+ ],
+ [
+ "ops.scheduler.cogvideoxdpm",
+ "scheduler"
+ ]
+ ]
+ }
+ }
+ },
+ "info.dit.cogview3": {
+ "*": {
+ "repo": "zai-org/CogView3-Plus-3B",
+ "pkg": {
+ "0": {
+ "diffusers": "CogView3PlusPipeline"
+ }
+ },
+ "tasks": [
+ "CogView3PlusPipeline"
+ ],
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "cogview3"
+ ],
+ "text_encoder": [
+ "T5EncoderModel"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "transformer": [
+ "CogView3PlusTransformer2DModel"
+ ],
+ "scheduler": [
+ [
+ "ops.scheduler.cogvideoxddim",
+ "scheduler"
+ ],
+ [
+ "ops.scheduler.cogvideoxdpm",
+ "scheduler"
+ ]
+ ]
+ }
+ }
+ },
+ "info.dit.cogview4": {
+ "*": {
+ "repo": "zai-org/CogView4-6B",
+ "pkg": {
+ "0": {
+ "diffusers": "CogView4Pipeline"
+ }
+ },
+ "tasks": [
+ "CogView4ControlPipeline",
+ "CogView4Pipeline"
+ ],
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "cogview4"
+ ],
+ "text_encoder": [
+ "GlmModel"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "transformer": [
+ "CogView4Transformer2DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ]
+ }
+ }
+ },
+ "info.controlnet.cogview4-control": {
+ "*": {
+ "repo": "zai-org/CogView4-6B-Control",
+ "pkg": {
+ "0": {
+ "diffusers": "CogView4ControlPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.pre-trianed": {
+ "*": {
+ "repo": "model_id, revision=\"diffusers/base/pre-trianed",
+ "pkg": {
+ "0": {
+ "diffusers": "Cosmos2_5_PredictBasePipeline"
+ }
+ },
+ "pipe_names": {
+ "text_encoder": [
+ "Qwen2_5_VLForConditionalGeneration"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "pre-trianed"
+ ],
+ "transformer": [
+ "CosmosTransformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "scheduler": [
+ "ops.scheduler.unipc",
+ "multistep"
+ ],
+ "safety_checker": [
+ "CosmosSafetyChecker"
+ ]
+ }
+ }
+ },
+ "info.dit.cosmos-predict2-text2image": {
+ "*": {
+ "repo": "nvidia/Cosmos-Predict2-2B-Text2Image",
+ "pkg": {
+ "0": {
+ "diffusers": "Cosmos2TextToImagePipeline"
+ }
+ },
+ "pipe_names": {
+ "text_encoder": [
+ "T5EncoderModel"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "cosmos-predict2-text2image"
+ ],
+ "transformer": [
+ "CosmosTransformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "safety_checker": [
+ "CosmosSafetyChecker"
+ ]
+ }
+ }
+ },
+ "info.dit.cosmos-predict2-video2world": {
+ "*": {
+ "repo": "nvidia/Cosmos-Predict2-2B-Video2World",
+ "pkg": {
+ "0": {
+ "diffusers": "Cosmos2VideoToWorldPipeline"
+ }
+ },
+ "pipe_names": {
+ "text_encoder": [
+ "T5EncoderModel"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "cosmos-predict2-video2world"
+ ],
+ "transformer": [
+ "CosmosTransformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "safety_checker": [
+ "CosmosSafetyChecker"
+ ]
+ }
+ }
+ },
+ "info.dit.cosmos-1-diffusion-text2world": {
+ "*": {
+ "repo": "nvidia/Cosmos-1.0-Diffusion-7B-Text2World",
+ "pkg": {
+ "0": {
+ "diffusers": "CosmosTextToWorldPipeline"
+ }
+ },
+ "pipe_names": {
+ "text_encoder": [
+ "T5EncoderModel"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "cosmos-1-diffusion-text2world"
+ ],
+ "transformer": [
+ "CosmosTransformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "cosmos-1-diffusion-video2world"
+ ],
+ "scheduler": [
+ "ops.scheduler.edmeuler",
+ "scheduler"
+ ],
+ "safety_checker": [
+ "CosmosSafetyChecker"
+ ]
+ }
+ }
+ },
+ "info.dit.cosmos-1-diffusion-video2world": {
+ "*": {
+ "repo": "nvidia/Cosmos-1.0-Diffusion-7B-Video2World",
+ "pkg": {
+ "0": {
+ "diffusers": "CosmosVideoToWorldPipeline"
+ }
+ },
+ "pipe_names": {
+ "text_encoder": [
+ "T5EncoderModel"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "cosmos-1-diffusion-video2world"
+ ],
+ "transformer": [
+ "CosmosTransformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "cosmos-1-diffusion-video2world"
+ ],
+ "scheduler": [
+ "ops.scheduler.edmeuler",
+ "scheduler"
+ ],
+ "safety_checker": [
+ "CosmosSafetyChecker"
+ ]
+ }
+ }
+ },
+ "info.unet.if-i-xl-v1": {
+ "*": {
+ "repo": "DeepFloyd/IF-I-XL-v1.0",
+ "pkg": {
+ "0": {
+ "diffusers": "IFPipeline"
+ }
+ },
+ "tasks": [
+ "IFImg2ImgPipeline",
+ "IFInpaintingPipeline",
+ "IFPipeline"
+ ],
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "if-i-xl-v1"
+ ],
+ "text_encoder": [
+ "T5EncoderModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.ddpm",
+ "scheduler"
+ ]
+ }
+ }
+ },
+ "info.dit.easyanimatev5-zh": {
+ "diffusers": {
+ "repo": "alibaba-pai/EasyAnimateV5.1-7b-zh-diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "EasyAnimatePipeline"
+ }
+ },
+ "pipe_names": {
+ "vae": [
+ "info.vae.kl",
+ "easyanimatev5-zh"
+ ],
+ "text_encoder": [
+ "Qwen2VLForConditionalGeneration",
+ "BertModel"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "easyanimatev5-zh"
+ ],
+ "transformer": [
+ "EasyAnimateTransformer3DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ]
+ }
+ }
+ },
+ "info.controlnet.easyanimatev5-zh-control": {
+ "diffusers": {
+ "repo": "alibaba-pai/EasyAnimateV5.1-12b-zh-Control-diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "EasyAnimateControlPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.easyanimatev5-zh-inp": {
+ "diffusers": {
+ "repo": "alibaba-pai/EasyAnimateV5.1-12b-zh-InP-diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "EasyAnimateInpaintPipeline"
+ }
+ },
+ "pipe_names": {
+ "vae": [
+ "info.vae.kl",
+ "easyanimatev5-zh"
+ ],
+ "text_encoder": [
+ "Qwen2VLForConditionalGeneration",
+ "BertModel"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "easyanimatev5-zh-inp"
+ ],
+ "transformer": [
+ "EasyAnimateTransformer3DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ]
+ }
+ }
+ },
+ "info.dit.hidream-i1": {
+ "*": {
+ "repo": "HiDream-ai/HiDream-I1-Full",
+ "pkg": {
+ "0": {
+ "diffusers": "HiDreamImagePipeline"
+ }
+ },
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "hidream-i1"
+ ],
+ "text_encoder_2": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "hidream-i1"
+ ],
+ "text_encoder_3": [
+ "T5EncoderModel"
+ ],
+ "tokenizer_3": [
+ "info.encoder.tokenizer",
+ "hidream-i1"
+ ],
+ "text_encoder_4": [
+ "LlamaForCausalLM"
+ ],
+ "tokenizer_4": [
+ "info.encoder.tokenizer",
+ "hidream-i1"
+ ],
+ "transformer": [
+ "HiDreamImageTransformer2DModel"
+ ]
+ }
+ }
+ },
+ "info.dit.hunyuandit-v1": {
+ "diffusers": {
+ "repo": "tencent-hunyuan/hunyuandiT-v1.2-diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "HunyuanDiTPipeline"
+ }
+ },
+ "tasks": [
+ "HunyuanDiTPipeline"
+ ],
+ "pipe_names": {
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "BertModel"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "hunyuandit-v1"
+ ],
+ "transformer": [
+ "HunyuanDiT2DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.ddpm",
+ "scheduler"
+ ],
+ "safety_checker": [
+ "StableDiffusionSafetyChecker"
+ ],
+ "feature_extractor": [
+ "CLIPImageProcessor"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "hunyuandit-v1"
+ ]
+ }
+ }
+ },
+ "info.dit.hunyuanvideo": {
+ "*": {
+ "repo": "hunyuanvideo-community/HunyuanVideo",
+ "pkg": {
+ "0": {
+ "diffusers": "HunyuanVideoPipeline"
+ }
+ },
+ "pipe_names": {
+ "text_encoder": [
+ "LlamaModel"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "hunyuanvideo"
+ ],
+ "transformer": [
+ "HunyuanVideoTransformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "hunyuanvideo-i2v"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "text_encoder_2": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "hunyuanvideo"
+ ]
+ }
+ }
+ },
+ "info.dit.hunyuanvideo-i2v": {
+ "*": {
+ "repo": "hunyuanvideo-community/HunyuanVideo-I2V",
+ "pkg": {
+ "0": {
+ "diffusers": "HunyuanVideoImageToVideoPipeline"
+ }
+ },
+ "pipe_names": {
+ "text_encoder": [
+ "LlavaForConditionalGeneration"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "hunyuanvideo-i2v"
+ ],
+ "transformer": [
+ "HunyuanVideoTransformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "hunyuanvideo-i2v"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "text_encoder_2": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "hunyuanvideo-i2v"
+ ],
+ "image_processor": [
+ "CLIPImageProcessor"
+ ]
+ }
+ }
+ },
+ "info.dit.hunyuanvideo-1-480p-t2v": {
+ "*": {
+ "repo": "hunyuanvideo-community/HunyuanVideo-1.5-480p_t2v",
+ "pkg": {
+ "0": {
+ "diffusers": "HunyuanVideo15Pipeline"
+ }
+ },
+ "pipe_names": {
+ "text_encoder": [
+ "Qwen2_5_VLTextModel"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "hunyuanvideo-1-480p-t2v"
+ ],
+ "transformer": [
+ "HunyuanVideo15Transformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "hunyuanvideo-i2v"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "text_encoder_2": [
+ "T5EncoderModel"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "hunyuanvideo-1-480p-t2v"
+ ],
+ "guider": [
+ "ClassifierFreeGuidance"
+ ]
+ }
+ }
+ },
+ "info.dit.hunyuanvideo-1-480p-i2v": {
+ "*": {
+ "repo": "hunyuanvideo-community/HunyuanVideo-1.5-480p_i2v",
+ "pkg": {
+ "0": {
+ "diffusers": "HunyuanVideo15ImageToVideoPipeline"
+ }
+ },
+ "pipe_names": {
+ "text_encoder": [
+ "Qwen2_5_VLTextModel"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "hunyuanvideo-1-480p-i2v"
+ ],
+ "transformer": [
+ "HunyuanVideo15Transformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "hunyuanvideo-i2v"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "text_encoder_2": [
+ "T5EncoderModel"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "hunyuanvideo-1-480p-i2v"
+ ],
+ "guider": [
+ "ClassifierFreeGuidance"
+ ],
+ "image_encoder": [
+ "SiglipVisionModel"
+ ],
+ "feature_extractor": [
+ "SiglipImageProcessor"
+ ]
+ }
+ }
+ },
+ "info.dit.hunyuanimage-2": {
+ "diffusers": {
+ "repo": "hunyuanvideo-community/HunyuanImage-2.1-Diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "HunyuanImagePipeline"
+ }
+ },
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "text_encoder": [
+ "Qwen2_5_VLForConditionalGeneration"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "hunyuanimage-2"
+ ],
+ "text_encoder_2": [
+ "T5EncoderModel"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "hunyuanimage-2"
+ ],
+ "transformer": [
+ "HunyuanImageTransformer2DModel"
+ ]
+ }
+ }
+ },
+ "info.dit.hunyuanimage-2-refiner": {
+ "diffusers": {
+ "repo": "hunyuanvideo-community/HunyuanImage-2.1-Refiner-Diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "HunyuanImageRefinerPipeline"
+ }
+ },
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "text_encoder": [
+ "Qwen2_5_VLForConditionalGeneration"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "hunyuanimage-2-refiner"
+ ],
+ "transformer": [
+ "HunyuanImageTransformer2DModel"
+ ]
+ }
+ }
+ },
+ "info.unet.kandinsky-2-1": {
+ "prior": {
+ "repo": "kandinsky-community/kandinsky-2-1-prior",
+ "pkg": {
+ "0": {
+ "diffusers": "KandinskyPriorPipeline"
+ }
+ },
+ "tasks": [
+ "Kandinsky3Img2ImgPipeline",
+ "Kandinsky3Pipeline",
+ "KandinskyCombinedPipeline",
+ "KandinskyImg2ImgCombinedPipeline",
+ "KandinskyImg2ImgPipeline",
+ "KandinskyInpaintCombinedPipeline",
+ "KandinskyInpaintPipeline",
+ "KandinskyPipeline",
+ "KandinskyV22CombinedPipeline",
+ "KandinskyV22Img2ImgCombinedPipeline",
+ "KandinskyV22Img2ImgPipeline",
+ "KandinskyV22InpaintCombinedPipeline",
+ "KandinskyV22InpaintPipeline",
+ "KandinskyV22Pipeline"
+ ],
+ "pipe_names": {
+ "prior": [
+ "PriorTransformer"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "kandinsky-2-1"
+ ],
+ "scheduler": [
+ "ops.scheduler.unclip",
+ "scheduler"
+ ],
+ "image_processor": [
+ "CLIPImageProcessor"
+ ]
+ }
+ }
+ },
+ "info.unet.kandinsky-2-2": {
+ "prior": {
+ "repo": "kandinsky-community/kandinsky-2-2-prior",
+ "pkg": {
+ "0": {
+ "diffusers": "KandinskyPriorPipeline"
+ }
+ },
+ "tasks": [
+ "Kandinsky3Img2ImgPipeline",
+ "Kandinsky3Pipeline",
+ "KandinskyCombinedPipeline",
+ "KandinskyImg2ImgCombinedPipeline",
+ "KandinskyImg2ImgPipeline",
+ "KandinskyInpaintCombinedPipeline",
+ "KandinskyInpaintPipeline",
+ "KandinskyPipeline",
+ "KandinskyV22CombinedPipeline",
+ "KandinskyV22Img2ImgCombinedPipeline",
+ "KandinskyV22Img2ImgPipeline",
+ "KandinskyV22InpaintCombinedPipeline",
+ "KandinskyV22InpaintPipeline",
+ "KandinskyV22Pipeline"
+ ],
+ "pipe_names": {
+ "prior": [
+ "PriorTransformer"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "kandinsky-2-2"
+ ],
+ "scheduler": [
+ "ops.scheduler.unclip",
+ "scheduler"
+ ],
+ "image_processor": [
+ "CLIPImageProcessor"
+ ]
+ }
+ }
+ },
+ "info.dit.latte-1": {
+ "*": {
+ "repo": "maxin-cn/Latte-1",
+ "pkg": {
+ "0": {
+ "diffusers": "LattePipeline"
+ }
+ },
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "latte-1"
+ ],
+ "text_encoder": [
+ "T5EncoderModel"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "transformer": [
+ "LatteTransformer3DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ]
+ }
+ }
+ },
+ "info.dit.ltx-video": {
+ "*": {
+ "repo": "Lightricks/LTX-Video",
+ "pkg": {
+ "0": {
+ "diffusers": "LTXImageToVideoPipeline"
+ }
+ },
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "ltx-video"
+ ],
+ "text_encoder": [
+ "T5EncoderModel"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "ltx-video"
+ ],
+ "transformer": [
+ "LTXVideoTransformer3DModel"
+ ]
+ }
+ }
+ },
+ "info.dit.ltx-video-09": {
+ "*": {
+ "repo": "Lightricks/LTX-Video-0.9.5",
+ "pkg": {
+ "0": {
+ "diffusers": "LTXConditionPipeline"
+ }
+ },
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "ltx-video"
+ ],
+ "text_encoder": [
+ "T5EncoderModel"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "ltx-video-09"
+ ],
+ "transformer": [
+ "LTXVideoTransformer3DModel"
+ ]
+ }
+ }
+ },
+ "info.dit.lumina-next-sft": {
+ "diffusers": {
+ "repo": "Alpha-VLLM/Lumina-Next-SFT-diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "LuminaPipeline"
+ }
+ },
+ "tasks": [
+ "Lumina2Pipeline",
+ "LuminaPipeline"
+ ],
+ "pipe_names": {
+ "transformer": [
+ "LuminaNextDiT2DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "GemmaPreTrainedModel"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "lumina-next-sft"
+ ]
+ }
+ }
+ },
+ "info.dit.lumina-image-2": {
+ "*": {
+ "repo": "Alpha-VLLM/Lumina-Image-2.0",
+ "pkg": {
+ "0": {
+ "diffusers": "Lumina2Pipeline"
+ }
+ },
+ "tasks": [
+ "Lumina2Pipeline"
+ ],
+ "pipe_names": {
+ "transformer": [
+ "Lumina2Transformer2DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "Gemma2PreTrainedModel"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "lumina-image-2"
+ ]
+ }
+ },
+ "illustrious-lumina-v3": {
+ "repo": "OnomaAIResearch/Illustrious-Lumina-v0.03",
+ "file_256": [
+ "dc6cffcfb0ccfca6332ddb5d2fe25bcb5f496f44b481627f48c42626156fa6a8",
+ "2ac549741fa1c6de2d6cd8be06abcdce52d472eeae2439f948e285258b66a214"
+ ],
+ "layer_256": [
+ "39086c199b9ac296dcba53461ba1e113906d91fbc1b12556d92f5cc77ca11f9f",
+ "e51ba2ded40f1af5ca6f78c46eed8305fbd87cd6401e9d439837e10d35cc5828"
+ ],
+ "layer_b3": [
+ "a97b4a63e1e7678e8e7154fae55252267bd1f0ba76b03dba622d801644e657ac",
+ "aa6c1b2d1971cea3c4ed0963c8d68d4c50db683f8eab9f77f60ea2d04ed6ce5c"
+ ]
+ }
+ },
+ "info.dit.longcat-image": {
+ "*": {
+ "repo": "meituan-longcat/LongCat-Image",
+ "pkg": {
+ "0": {
+ "diffusers": "LongCatImagePipeline"
+ }
+ },
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "Qwen2_5_VLForConditionalGeneration"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "longcat-image"
+ ],
+ "text_processor": [
+ "Qwen2VLProcessor"
+ ],
+ "transformer": [
+ "LongCatImageTransformer2DModel"
+ ]
+ }
+ }
+ },
+ "info.dit.longcat-image-edit": {
+ "*": {
+ "repo": "meituan-longcat/LongCat-Image-Edit",
+ "pkg": {
+ "0": {
+ "diffusers": "LongCatImageEditPipeline"
+ }
+ },
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "Qwen2_5_VLForConditionalGeneration"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "longcat-image-edit"
+ ],
+ "text_processor": [
+ "Qwen2VLProcessor"
+ ],
+ "transformer": [
+ "LongCatImageTransformer2DModel"
+ ]
+ }
+ }
+ },
+ "info.dit.mochi-1": {
+ "*": {
+ "repo": "genmo/mochi-1-preview",
+ "pkg": {
+ "0": {
+ "diffusers": "MochiPipeline"
+ }
+ },
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "mochi-1"
+ ],
+ "text_encoder": [
+ "T5EncoderModel"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "mochi-1"
+ ],
+ "transformer": [
+ "MochiTransformer3DModel"
+ ]
+ }
+ }
+ },
+ "info.unet.musicldm": {
+ "*": {
+ "repo": "ucsd-reach/musicldm",
+ "pkg": {
+ "0": {
+ "diffusers": "MusicLDMPipeline"
+ }
+ },
+ "pipe_names": {
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "ClapTextModelWithProjection",
+ "ClapModel"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "musicldm"
+ ],
+ "scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ],
+ "vocoder": [
+ "SpeechT5HifiGan"
+ ]
+ }
+ }
+ },
+ "info.dit.omnigen-v1": {
+ "diffusers": {
+ "repo": "Shitao/OmniGen-v1-diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "OmniGenPipeline"
+ }
+ },
+ "pipe_names": {
+ "transformer": [
+ "OmniGenTransformer2DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "omnigen-v1"
+ ]
+ }
+ }
+ },
+ "info.dit.ovis-image": {
+ "*": {
+ "repo": "AIDC-AI/Ovis-Image-7B",
+ "pkg": {
+ "0": {
+ "diffusers": "OvisImagePipeline"
+ }
+ },
+ "tasks": [
+ "OvisImagePipeline"
+ ],
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "Qwen3Model"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "ovis-image"
+ ],
+ "transformer": [
+ "OvisImageTransformer2DModel"
+ ]
+ }
+ }
+ },
+ "info.dit.visualclozepipeline-384": {
+ "*": {
+ "repo": "VisualCloze/VisualClozePipeline-384",
+ "pkg": {
+ "0": {
+ "diffusers": "VisualClozeGenerationPipeline"
+ }
+ },
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "visualclozepipeline-384"
+ ],
+ "text_encoder_2": [
+ "T5EncoderModel"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "visualclozepipeline-384"
+ ],
+ "transformer": [
+ "FluxTransformer2DModel"
+ ]
+ }
+ }
+ },
+ "info.dit.pixart-xl-2-1024-ms": {
+ "*": {
+ "repo": "PixArt-alpha/PixArt-XL-2-1024-MS",
+ "pkg": {
+ "0": {
+ "diffusers": "PixArtAlphaPipeline"
+ }
+ },
+ "tasks": [
+ "PixArtAlphaPipeline"
+ ],
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "pixart-xl-2-1024-ms"
+ ],
+ "text_encoder": [
+ "T5EncoderModel"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "transformer": [
+ "PixArtTransformer2DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.dpm",
+ "multistep"
+ ]
+ }
+ }
+ },
+ "info.dit.pixart-sigma-xl-2-1024-ms": {
+ "*": {
+ "repo": "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS",
+ "pkg": {
+ "0": {
+ "diffusers": "PixArtSigmaPipeline"
+ }
+ },
+ "tasks": [
+ "PixArtAlphaPipeline",
+ "PixArtSigmaPipeline"
+ ],
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "pixart-sigma-xl-2-1024-ms"
+ ],
+ "text_encoder": [
+ "T5EncoderModel"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "transformer": [
+ "PixArtTransformer2DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ]
+ }
+ }
+ },
+ "info.dit.sana-1024px-bf16": {
+ "diffusers": {
+ "repo": "Efficient-Large-Model/Sana_1600M_1024px_BF16_diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "SanaPipeline"
+ }
+ },
+ "tasks": [
+ "SanaPAGPipeline",
+ "SanaPipeline"
+ ],
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "sana-1024px-bf16"
+ ],
+ "text_encoder": [
+ "Gemma2PreTrainedModel"
+ ],
+ "vae": [
+ "info.vae.dc",
+ "sana-1024px-bf16"
+ ],
+ "transformer": [
+ "SanaTransformer2DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.dpm",
+ "multistep"
+ ]
+ }
+ }
+ },
+ "info.controlnet.sana-1024px-controlnet": {
+ "diffusers": {
+ "repo": "ishan24/Sana_600M_1024px_ControlNetPlus_diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "SanaControlNetPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.sana-sprint-1024px": {
+ "diffusers": {
+ "repo": "Efficient-Large-Model/Sana_Sprint_1.6B_1024px_diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "SanaSprintPipeline"
+ }
+ },
+ "tasks": [
+ "SanaPAGPipeline",
+ "SanaPipeline"
+ ],
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "sana-sprint-1024px"
+ ],
+ "text_encoder": [
+ "Gemma2PreTrainedModel"
+ ],
+ "vae": [
+ "info.vae.dc",
+ "sana-1024px-bf16"
+ ],
+ "transformer": [
+ "SanaTransformer2DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.dpm",
+ "multistep"
+ ]
+ }
+ }
+ },
+ "info.dit.sana-video": {
+ "*": {
+ "repo": "Efficient-Large-Model/SANA-Video_2B_480p_diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "SanaImageToVideoPipeline"
+ }
+ },
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "sana-video"
+ ],
+ "text_encoder": [
+ "Gemma2PreTrainedModel"
+ ],
+ "vae": [
+ [
+ "info.vae.dc",
+ "sana-1024px-bf16"
+ ],
+ [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ]
+ ],
+ "transformer": [
+ "SanaVideoTransformer3DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ]
+ }
+ }
+ },
+ "info.unet.shap-e": {
+ "*": {
+ "repo": "openai/shap-e",
+ "pkg": {
+ "0": {
+ "diffusers": "ShapEPipeline"
+ }
+ },
+ "pipe_names": {
+ "prior": [
+ "PriorTransformer"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "shap-e"
+ ],
+ "scheduler": [
+ "ops.scheduler.heun",
+ "discrete"
+ ],
+ "shap_e_renderer": [
+ "ShapERenderer"
+ ]
+ }
+ }
+ },
+ "info.dit.stable-audio-open-1": {
+ "*": {
+ "repo": "stabilityai/stable-audio-open-1.0",
+ "pkg": {
+ "0": {
+ "diffusers": "StableAudioPipeline"
+ }
+ },
+ "pipe_names": {
+ "vae": [
+ "info.vae.oobleck",
+ "stable-audio-open-1"
+ ],
+ "text_encoder": [
+ "T5EncoderModel"
+ ],
+ "projection_model": [
+ "StableAudioProjectionModel"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "stable-audio-open-1"
+ ],
+ "transformer": [
+ "StableAudioDiTModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.dpm",
+ "multistep"
+ ]
+ }
+ }
+ },
+ "info.unet.stable-cascade": {
+ "prior": {
+ "repo": "stabilityai/stable-cascade-prior",
+ "pkg": {
+ "0": {
+ "diffusers": "StableCascadePriorPipeline"
+ }
+ },
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "stable-cascade"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "prior": [
+ "StableCascadeUNet"
+ ],
+ "scheduler": [
+ "ops.scheduler.ddpmwuerstchen",
+ "scheduler"
+ ]
+ }
+ }
+ },
+ "info.unet.flux1-dev": {
+ "decoder": {
+ "repo": "black-forest-labs/FLUX.1-dev",
+ "pkg": {
+ "0": {
+ "diffusers": "WuerstchenDecoderPipeline"
+ }
+ },
+ "tasks": [
+ "WuerstchenCombinedPipeline",
+ "WuerstchenDecoderPipeline"
+ ],
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "flux1-dev"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "decoder": [
+ "WuerstchenDiffNeXt"
+ ],
+ "scheduler": [
+ "ops.scheduler.ddpmwuerstchen",
+ "scheduler"
+ ],
+ "vqgan": [
+ "PaellaVQModel"
+ ]
+ }
+ }
+ },
+ "info.dit.auraflow": {
+ "*": {
+ "repo": "fal/AuraFlow",
+ "pkg": {
+ "0": {
+ "diffusers": "AuraFlowPipeline"
+ }
+ },
+ "tasks": [
+ "AuraFlowPipeline"
+ ],
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "auraflow"
+ ],
+ "text_encoder": [
+ "UMT5EncoderModel"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "transformer": [
+ "AuraFlowTransformer2DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ]
+ }
+ }
+ },
+ "info.dit.stable-diffusion-3": {
+ "*": {
+ "repo": "stabilityai/stable-diffusion-3.5-medium",
+ "pkg": {
+ "0": {
+ "diffusers": "StableDiffusion3Pipeline"
+ }
+ },
+ "tasks": [
+ "StableDiffusion3ControlNetInpaintingPipeline",
+ "StableDiffusion3ControlNetPipeline",
+ "StableDiffusion3Img2ImgPipeline",
+ "StableDiffusion3InpaintPipeline",
+ "StableDiffusion3PAGImg2ImgPipeline",
+ "StableDiffusion3PAGPipeline",
+ "StableDiffusion3Pipeline"
+ ],
+ "pipe_names": {
+ "transformer": [
+ "SD3Transformer2DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "info.vae.tae",
+ "stable-diffusion-3"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "stable-diffusion-3"
+ ],
+ "text_encoder_2": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "stable-diffusion-3"
+ ],
+ "text_encoder_3": [
+ "T5EncoderModel"
+ ],
+ "tokenizer_3": [
+ "info.encoder.tokenizer",
+ "stable-diffusion-3"
+ ],
+ "image_encoder": [
+ "SiglipVisionModel"
+ ],
+ "feature_extractor": [
+ "SiglipImageProcessor"
+ ]
+ }
+ },
+ "stable-diffusion-3-turbo": {
+ "repo": "tensorart/stable-diffusion-3.5-medium-turbo",
+ "pkg": {
+ "0": {
+ "precision": "ops.precision.bfloat.B16",
+ "generation": {
+ "num_inference_steps": 8,
+ "guidance_scale": 1.5,
+ "height": 1024,
+ "width": 768
+ }
+ }
+ },
+ "file_256": [
+ "5b0530e8d71b49fa1358f1208047cd789a40bae5b44406c9524b0f0d88f8b246",
+ "07119c77c3548a1d9eb30923df4dd55ec74914dc5ec81626804dcbe51ce17a5d",
+ "3c379381344d2a2b3ee3d7a1bc97f7d1e58fa95c6b5187fb48b3ce446f99f17b",
+ "6b3806cafdb4303ea2638e9e08eb186067b4a46a95ddf344ccdbe56537afaf6e"
+ ],
+ "layer_256": [
+ "3c324055a1ec6eb4ee0242e344bb2b6356afcbd2e215fdd9d160cda691a72fae",
+ "7284d2027523482af9ef47405667ca891cc518bfb6ebf1f1d4666cb0accc8cd5",
+ "d938ee5738c73f701760ed18acad274b074d2796123aee3f2eee1328b6c36ea4",
+ "c4c40056c2a77959083b5a69a1a4b205caa463ccabde057352c5c4e38b2c67b6"
+ ],
+ "layer_b3": [
+ "873821614080a98e1ebfe56673bc96c2ac57379720d4ad2f97e4bca317571d48",
+ "7284d2027523482af9ef47405667ca891cc518bfb6ebf1f1d4666cb0accc8cd5",
+ "d938ee5738c73f701760ed18acad274b074d2796123aee3f2eee1328b6c36ea4",
+ "c4c40056c2a77959083b5a69a1a4b205caa463ccabde057352c5c4e38b2c67b6"
+ ]
+ }
+ },
+ "info.unet.gligen-1-4-inpainting-text-box": {
+ "*": {
+ "repo": "masterful/gligen-1-4-inpainting-text-box",
+ "pkg": {
+ "0": {
+ "diffusers": "StableDiffusionGLIGENPipeline"
+ }
+ }
+ }
+ },
+ "info.unet.gligen-inpainting-text-image": {
+ "*": {
+ "repo": "anhnct/Gligen_Inpainting_Text_Image",
+ "pkg": {
+ "0": {
+ "diffusers": "StableDiffusionGLIGENTextImagePipeline"
+ }
+ }
+ }
+ },
+ "info.unet.stable-video-diffusion-img2vid-xt": {
+ "*": {
+ "repo": "stabilityai/stable-video-diffusion-img2vid-xt",
+ "pkg": {
+ "0": {
+ "diffusers": "StableVideoDiffusionPipeline"
+ }
+ },
+ "pipe_names": {
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "unet": [
+ "UNetSpatioTemporalConditionModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "feature_extractor": [
+ "CLIPImageProcessor"
+ ]
+ }
+ }
+ },
+ "info.unet.ldm3d-4c": {
+ "*": {
+ "repo": "Intel/ldm3d-4c",
+ "pkg": {
+ "0": {
+ "diffusers": "StableDiffusionLDM3DPipeline"
+ }
+ },
+ "tasks": [
+ "StableDiffusion3ControlNetInpaintingPipeline",
+ "StableDiffusion3ControlNetPipeline",
+ "StableDiffusion3Img2ImgPipeline",
+ "StableDiffusion3InpaintPipeline",
+ "StableDiffusion3PAGImg2ImgPipeline",
+ "StableDiffusion3PAGPipeline",
+ "StableDiffusion3Pipeline",
+ "StableDiffusionControlNetImg2ImgPipeline",
+ "StableDiffusionControlNetInpaintPipeline",
+ "StableDiffusionControlNetPAGInpaintPipeline",
+ "StableDiffusionControlNetPAGPipeline",
+ "StableDiffusionControlNetPipeline",
+ "StableDiffusionImg2ImgPipeline",
+ "StableDiffusionInpaintPipeline",
+ "StableDiffusionPAGImg2ImgPipeline",
+ "StableDiffusionPAGInpaintPipeline",
+ "StableDiffusionPAGPipeline",
+ "StableDiffusionPipeline",
+ "StableDiffusionXLControlNetImg2ImgPipeline",
+ "StableDiffusionXLControlNetInpaintPipeline",
+ "StableDiffusionXLControlNetPAGImg2ImgPipeline",
+ "StableDiffusionXLControlNetPAGPipeline",
+ "StableDiffusionXLControlNetPipeline",
+ "StableDiffusionXLControlNetUnionImg2ImgPipeline",
+ "StableDiffusionXLControlNetUnionInpaintPipeline",
+ "StableDiffusionXLControlNetUnionPipeline",
+ "StableDiffusionXLImg2ImgPipeline",
+ "StableDiffusionXLInpaintPipeline",
+ "StableDiffusionXLPAGImg2ImgPipeline",
+ "StableDiffusionXLPAGInpaintPipeline",
+ "StableDiffusionXLPAGPipeline",
+ "StableDiffusionXLPipeline"
+ ],
+ "pipe_names": {
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "ldm3d-4c"
+ ],
+ "scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ],
+ "safety_checker": [
+ "StableDiffusionSafetyChecker"
+ ],
+ "feature_extractor": [
+ "CLIPImageProcessor"
+ ]
+ }
+ }
+ },
+ "info.unet.i2vgen-xl": {
+ "*": {
+ "repo": "ali-vilab/i2vgen-xl",
+ "pkg": {
+ "0": {
+ "diffusers": "I2VGenXLPipeline"
+ }
+ },
+ "pipe_names": {
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "i2vgen-xl"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "feature_extractor": [
+ "CLIPImageProcessor"
+ ],
+ "unet": [
+ "I2VGenXLUNet"
+ ],
+ "scheduler": [
+ "ops.scheduler.ddim",
+ "scheduler"
+ ]
+ }
+ }
+ },
+ "info.unet.wuerstchen": {
+ "prior": {
+ "repo": "warp-ai/wuerstchen-prior",
+ "pkg": {
+ "0": {
+ "diffusers": "WuerstchenPriorPipeline"
+ }
+ },
+ "tasks": [
+ "WuerstchenCombinedPipeline",
+ "WuerstchenDecoderPipeline"
+ ],
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "wuerstchen"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "prior": [
+ "WuerstchenPrior"
+ ],
+ "scheduler": [
+ "ops.scheduler.ddpmwuerstchen",
+ "scheduler"
+ ]
+ }
+ }
+ },
+ "info.dit.wan2-t2v": {
+ "diffusers": {
+ "repo": "Wan-AI/Wan2.1-T2V-14B-Diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "WanPipeline"
+ }
+ },
+ "tasks": [
+ "WanImageToVideoPipeline",
+ "WanPipeline",
+ "WanVideoToVideoPipeline"
+ ],
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "wan2-t2v"
+ ],
+ "text_encoder": [
+ "UMT5EncoderModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ]
+ }
+ }
+ },
+ "info.dit.wan-animate": {
+ "diffusers": {
+ "repo": "Wan-AI/Wan2.2-Animate-14B-Diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "WanAnimatePipeline"
+ }
+ },
+ "tasks": [
+ "WanImageToVideoPipeline",
+ "WanPipeline",
+ "WanVideoToVideoPipeline"
+ ],
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "wan-animate"
+ ],
+ "text_encoder": [
+ "UMT5EncoderModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "scheduler": [
+ "ops.scheduler.unipc",
+ "multistep"
+ ],
+ "image_processor": [
+ "CLIPImageProcessor"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "transformer": [
+ "WanAnimateTransformer3DModel"
+ ]
+ }
+ }
+ },
+ "info.dit.wan2-i2v-480p": {
+ "diffusers": {
+ "repo": "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "WanImageToVideoPipeline"
+ }
+ },
+ "tasks": [
+ "WanImageToVideoPipeline",
+ "WanPipeline",
+ "WanVideoToVideoPipeline"
+ ],
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "wan2-i2v-480p"
+ ],
+ "text_encoder": [
+ "UMT5EncoderModel"
+ ],
+ "vae": [
+ "info.vae.wan",
+ "wan2-i2v-480p"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "image_processor": [
+ "CLIPImageProcessor"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "transformer": [
+ "WanTransformer3DModel"
+ ],
+ "transformer_2": [
+ "WanTransformer3DModel"
+ ]
+ }
+ }
+ },
+ "info.dit.wan21-vace": {
+ "diffusers": {
+ "repo": "Wan-AI/Wan2.1-VACE-1.3B-diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "WanVACEPipeline"
+ }
+ },
+ "tasks": [
+ "WanImageToVideoPipeline",
+ "WanPipeline",
+ "WanVideoToVideoPipeline"
+ ],
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "wan21-vace"
+ ],
+ "text_encoder": [
+ "UMT5EncoderModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "transformer": [
+ "WanVACETransformer3DModel"
+ ],
+ "transformer_2": [
+ "WanVACETransformer3DModel"
+ ]
+ }
+ }
+ },
+ "info.dit.wan21-t2v": {
+ "diffusers": {
+ "repo": "Wan-AI/Wan2.1-T2V-1.3B-Diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "WanVideoToVideoPipeline"
+ }
+ },
+ "tasks": [
+ "WanImageToVideoPipeline",
+ "WanPipeline",
+ "WanVideoToVideoPipeline"
+ ],
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "wan21-t2v"
+ ],
+ "text_encoder": [
+ "UMT5EncoderModel"
+ ],
+ "transformer": [
+ "WanTransformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ]
+ }
+ }
+ },
+ "info.dit.kandinsky-5-t2v-lite-sft-5s": {
+ "diffusers": {
+ "repo": "kandinskylab/Kandinsky-5.0-T2V-Lite-sft-5s-Diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "Kandinsky5T2VPipeline"
+ }
+ },
+ "pipe_names": {
+ "transformer": [
+ "Kandinsky5Transformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "hunyuanvideo-i2v"
+ ],
+ "text_encoder": [
+ "Qwen2_5_VLForConditionalGeneration"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "kandinsky-5-t2v-lite-sft-5s"
+ ],
+ "text_encoder_2": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "kandinsky-5-t2v-lite-sft-5s"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ]
+ }
+ }
+ },
+ "info.dit.kandinsky-5-i2i-lite-sft": {
+ "diffusers": {
+ "repo": "kandinskylab/Kandinsky-5.0-I2I-Lite-sft-Diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "Kandinsky5I2IPipeline"
+ }
+ },
+ "pipe_names": {
+ "transformer": [
+ "Kandinsky5Transformer3DModel"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "Qwen2_5_VLForConditionalGeneration"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "kandinsky-5-i2i-lite-sft"
+ ],
+ "text_encoder_2": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "kandinsky-5-i2i-lite-sft"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ]
+ }
+ }
+ },
+ "info.dit.kandinsky-5-i2v-sft-5s": {
+ "diffusers": {
+ "repo": "kandinskylab/Kandinsky-5.0-I2V-Pro-sft-5s-Diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "Kandinsky5I2VPipeline"
+ }
+ },
+ "pipe_names": {
+ "transformer": [
+ "Kandinsky5Transformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "hunyuanvideo-i2v"
+ ],
+ "text_encoder": [
+ "Qwen2_5_VLForConditionalGeneration"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "kandinsky-5-i2v-sft-5s"
+ ],
+ "text_encoder_2": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "kandinsky-5-i2v-sft-5s"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ]
+ }
+ }
+ },
+ "info.dit.kandinsky-5-t2i-lite-sft": {
+ "diffusers": {
+ "repo": "kandinskylab/Kandinsky-5.0-T2I-Lite-sft-Diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "Kandinsky5T2IPipeline"
+ }
+ },
+ "pipe_names": {
+ "transformer": [
+ "Kandinsky5Transformer3DModel"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "Qwen2_5_VLForConditionalGeneration"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "kandinsky-5-t2i-lite-sft"
+ ],
+ "text_encoder_2": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "kandinsky-5-t2i-lite-sft"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ]
+ }
+ }
+ },
+ "info.dit.z-image-turbo": {
+ "*": {
+ "repo": "Z-a-o/Z-Image-Turbo",
+ "pkg": {
+ "0": {
+ "diffusers": "ZImageOmniPipeline"
+ }
+ },
+ "tasks": [
+ "ZImageControlNetInpaintPipeline",
+ "ZImageControlNetPipeline",
+ "ZImageImg2ImgPipeline",
+ "ZImageOmniPipeline",
+ "ZImagePipeline"
+ ],
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "PreTrainedModel"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "z-image-turbo"
+ ],
+ "transformer": [
+ "ZImageTransformer2DModel"
+ ],
+ "siglip": [
+ "Siglip2VisionModel"
+ ],
+ "siglip_processor": [
+ "Siglip2ImageProcessorFast"
+ ]
+ }
+ }
+ },
+ "info.controlnet.z-image-turbo": {
+ "*": {
+ "repo": "Tongyi-MAI/Z-Image-Turbo",
+ "pkg": {
+ "0": {
+ "diffusers": "ZImageControlNetInpaintPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.skyreels-v2-t2v-720p": {
+ "diffusers": {
+ "repo": "Skywork/SkyReels-V2-T2V-14B-720P-Diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "SkyReelsV2Pipeline"
+ }
+ },
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "skyreels-v2-t2v-720p"
+ ],
+ "text_encoder": [
+ "UMT5EncoderModel"
+ ],
+ "transformer": [
+ "SkyReelsV2Transformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "scheduler": [
+ "ops.scheduler.unipc",
+ "multistep"
+ ]
+ }
+ }
+ },
+ "info.dit.skyreels-v2-df-720p": {
+ "diffusers": {
+ "repo": "Skywork/SkyReels-V2-DF-14B-720P-Diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "SkyReelsV2DiffusionForcingVideoToVideoPipeline"
+ }
+ },
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "skyreels-v2-df-720p"
+ ],
+ "text_encoder": [
+ "UMT5EncoderModel"
+ ],
+ "transformer": [
+ "SkyReelsV2Transformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "scheduler": [
+ "ops.scheduler.unipc",
+ "multistep"
+ ]
+ }
+ }
+ },
+ "info.dit.skyreels-v2-i2v-720p": {
+ "diffusers": {
+ "repo": "Skywork/SkyReels-V2-I2V-14B-720P-Diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "SkyReelsV2ImageToVideoPipeline"
+ }
+ },
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "skyreels-v2-i2v-720p"
+ ],
+ "text_encoder": [
+ "UMT5EncoderModel"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "image_processor": [
+ "CLIPProcessor"
+ ],
+ "transformer": [
+ "SkyReelsV2Transformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "scheduler": [
+ "ops.scheduler.unipc",
+ "multistep"
+ ]
+ }
+ }
+ },
+ "info.dit.qwen-image": {
+ "*": {
+ "repo": "Qwen/Qwen-Image",
+ "pkg": {
+ "0": {
+ "diffusers": "QwenImageInpaintPipeline"
+ }
+ },
+ "tasks": [
+ "QwenImageControlNetPipeline",
+ "QwenImageEditInpaintPipeline",
+ "QwenImageEditPipeline",
+ "QwenImageEditPlusPipeline",
+ "QwenImageImg2ImgPipeline",
+ "QwenImageInpaintPipeline",
+ "QwenImagePipeline"
+ ],
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "qwen-image"
+ ],
+ "text_encoder": [
+ "Qwen2_5_VLForConditionalGeneration"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "qwen-image"
+ ],
+ "transformer": [
+ "QwenImageTransformer2DModel"
+ ]
+ }
+ }
+ },
+ "info.controlnet.qwen-image-controlnet-union": {
+ "*": {
+ "repo": "InstantX/Qwen-Image-ControlNet-Union",
+ "pkg": {
+ "0": {
+ "diffusers": "QwenImageControlNetModel"
+ }
+ }
+ }
+ },
+ "info.controlnet.qwen-image-controlnet-inpainting": {
+ "*": {
+ "repo": "InstantX/Qwen-Image-ControlNet-Inpainting",
+ "pkg": {
+ "0": {
+ "diffusers": "QwenImageControlNetModel"
+ }
+ }
+ }
+ },
+ "info.dit.qwen-image-edit": {
+ "*": {
+ "repo": "Qwen/Qwen-Image-Edit",
+ "pkg": {
+ "0": {
+ "diffusers": "QwenImageEditInpaintPipeline"
+ }
+ },
+ "tasks": [
+ "QwenImageControlNetPipeline",
+ "QwenImageEditInpaintPipeline",
+ "QwenImageEditPipeline",
+ "QwenImageEditPlusPipeline",
+ "QwenImageImg2ImgPipeline",
+ "QwenImageInpaintPipeline",
+ "QwenImagePipeline"
+ ],
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "qwen-image"
+ ],
+ "text_encoder": [
+ "Qwen2_5_VLForConditionalGeneration"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "qwen-image-edit"
+ ],
+ "processor": [
+ "Qwen2VLProcessor"
+ ],
+ "transformer": [
+ "QwenImageTransformer2DModel"
+ ]
+ }
+ }
+ },
+ "info.dit.qwen-image-edit-2509": {
+ "*": {
+ "repo": "Qwen/Qwen-Image-Edit-2509",
+ "pkg": {
+ "0": {
+ "diffusers": "QwenImageEditPlusPipeline"
+ }
+ },
+ "tasks": [
+ "QwenImageControlNetPipeline",
+ "QwenImageEditInpaintPipeline",
+ "QwenImageEditPipeline",
+ "QwenImageEditPlusPipeline",
+ "QwenImageImg2ImgPipeline",
+ "QwenImageInpaintPipeline",
+ "QwenImagePipeline"
+ ],
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "qwen-image"
+ ],
+ "text_encoder": [
+ "Qwen2_5_VLForConditionalGeneration"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "qwen-image-edit-2509"
+ ],
+ "processor": [
+ "Qwen2VLProcessor"
+ ],
+ "transformer": [
+ "QwenImageTransformer2DModel"
+ ]
+ }
+ }
+ },
+ "info.dit.qwen-image-layered": {
+ "*": {
+ "repo": "Qwen/Qwen-Image-Layered",
+ "pkg": {
+ "0": {
+ "diffusers": "QwenImageLayeredPipeline"
+ }
+ },
+ "tasks": [
+ "QwenImageControlNetPipeline",
+ "QwenImageEditInpaintPipeline",
+ "QwenImageEditPipeline",
+ "QwenImageEditPlusPipeline",
+ "QwenImageImg2ImgPipeline",
+ "QwenImageInpaintPipeline",
+ "QwenImagePipeline"
+ ],
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "qwen-image"
+ ],
+ "text_encoder": [
+ "Qwen2_5_VLForConditionalGeneration"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "qwen-image-layered"
+ ],
+ "processor": [
+ "Qwen2VLProcessor"
+ ],
+ "transformer": [
+ "QwenImageTransformer2DModel"
+ ]
+ }
+ }
+ },
+ "info.dit.chronoedit": {
+ "diffusers": {
+ "repo": "nvidia/ChronoEdit-14B-Diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "ChronoEditPipeline"
+ }
+ },
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "chronoedit"
+ ],
+ "text_encoder": [
+ "UMT5EncoderModel"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "image_processor": [
+ "CLIPImageProcessor"
+ ],
+ "transformer": [
+ "ChronoEditTransformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ]
+ }
+ }
+ },
+ "info.unet.kolors": {
+ "diffusers": {
+ "repo": "Kwai-Kolors/Kolors-diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "KolorsPipeline"
+ }
+ },
+ "tasks": [
+ "KolorsImg2ImgPipeline",
+ "KolorsPAGPipeline",
+ "KolorsPipeline"
+ ],
+ "pipe_names": {
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "ChatGLMModel"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "kolors"
+ ],
+ "scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch14",
+ "*"
+ ],
+ "feature_extractor": [
+ "CLIPImageProcessor"
+ ]
+ }
+ }
+ },
+ "info.encoder.tokenizer": {
+ "omdet-turbo-swin-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "13": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "14": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "15": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "16": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "blip2-opt": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "deberta-v2-x": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "13": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "14": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "15": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "16": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "17": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "bert-for-seq-generation-l-24-bbc-encoder": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "13": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "14": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "15": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "16": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "17": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "18": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "19": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "20": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "21": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "22": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "wav2vec2-bert-rel-pos": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "13": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "14": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "15": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "16": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "17": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "18": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "19": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "nllb-moe": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "efficient-mlm-m0-0": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "13": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "14": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "15": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "xlm-roberta": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "13": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "14": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "15": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "16": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "17": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "18": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "mgp-str": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "13": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "14": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "blenderbot": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "13": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "14": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "15": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "16": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "17": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "18": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "19": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "20": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "21": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "22": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "23": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "xlm-roberta-xl": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "13": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "14": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "15": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "16": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "17": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "18": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "megatron-bert-uncased": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "grounding-dino": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "funnel": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "13": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "14": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "wav2vec2-conformer-rel-pos": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "13": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "14": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "15": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "16": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "17": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "18": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "19": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "mm-grounding-dino-o365v1-goldg-v3det": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "gpt2": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "13": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "14": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ }
+ },
+ "info.detr.omdet-turbo-swin-hf": {
+ "*": {
+ "repo": "omlab/omdet-turbo-swin-tiny-hf",
+ "pkg": {
+ "0": {
+ "transformers": "OmDetTurboForObjectDetection"
+ }
+ },
+ "tasks": [
+ "OmDetTurboForObjectDetection",
+ "OmDetTurboPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.blip2-opt": {
+ "*": {
+ "repo": "Salesforce/blip2-opt-2.7b",
+ "pkg": {
+ "0": {
+ "transformers": "Blip2Model"
+ }
+ },
+ "tasks": [
+ "Blip2Model",
+ "Blip2VisionModelWithProjection",
+ "Blip2QFormerModel",
+ "Blip2PreTrainedModel",
+ "Blip2ForConditionalGeneration",
+ "Blip2ForImageTextRetrieval",
+ "Blip2VisionModel",
+ "Blip2TextModelWithProjection"
+ ]
+ }
+ },
+ "info.art.deberta-v2-x": {
+ "*": {
+ "repo": "microsoft/deberta-v2-xlarge",
+ "pkg": {
+ "0": {
+ "transformers": "DebertaV2Model"
+ }
+ },
+ "tasks": [
+ "DebertaV2ForMaskedLM",
+ "DebertaV2ForMultipleChoice",
+ "DebertaV2ForQuestionAnswering",
+ "DebertaV2ForSequenceClassification",
+ "DebertaV2ForTokenClassification",
+ "DebertaV2Model",
+ "DebertaV2PreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.ast-finetuned-audioset-10-10-0593": {
+ "*": {
+ "repo": "MIT/ast-finetuned-audioset-10-10-0.4593",
+ "pkg": {
+ "0": {
+ "transformers": "ASTModel"
+ }
+ },
+ "tasks": [
+ "ASTForAudioClassification",
+ "ASTModel",
+ "ASTPreTrainedModel"
+ ]
+ }
+ },
+ "info.detr.dab-detr": {
+ "*": {
+ "repo": "IDEA-Research/dab-detr-resnet-50",
+ "pkg": {
+ "0": {
+ "transformers": "DabDetrModel"
+ }
+ },
+ "tasks": [
+ "DabDetrForObjectDetection",
+ "DabDetrModel",
+ "DabDetrPreTrainedModel"
+ ]
+ }
+ },
+ "info.aet.sew-d": {
+ "*": {
+ "repo": "asapp/sew-d-tiny-100k",
+ "pkg": {
+ "0": {
+ "transformers": "SEWDModel"
+ }
+ },
+ "tasks": [
+ "SEWDForCTC",
+ "SEWDForSequenceClassification",
+ "SEWDModel",
+ "SEWDPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.bert-for-seq-generation-l-24-bbc-encoder": {
+ "*": {
+ "repo": "google/bert_for_seq_generation_L-24_bbc_encoder",
+ "pkg": {
+ "0": {
+ "transformers": "BertGenerationEncoder"
+ }
+ },
+ "tasks": [
+ "BertGenerationDecoder",
+ "BertGenerationEncoder",
+ "BertGenerationPreTrainedModel"
+ ]
+ }
+ },
+ "info.aet.wav2vec2-bert-rel-pos": {
+ "*": {
+ "repo": "facebook/w2v-bert-2.0",
+ "pkg": {
+ "0": {
+ "transformers": "Wav2Vec2BertModel"
+ }
+ },
+ "tasks": [
+ "Wav2Vec2BertForAudioFrameClassification",
+ "Wav2Vec2BertForCTC",
+ "Wav2Vec2BertForSequenceClassification",
+ "Wav2Vec2BertForXVector",
+ "Wav2Vec2BertModel",
+ "Wav2Vec2BertPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.nllb-moe": {
+ "*": {
+ "repo": "facebook/nllb-moe-54b",
+ "pkg": {
+ "0": {
+ "transformers": "NllbMoeModel"
+ }
+ },
+ "tasks": [
+ "NllbMoeForConditionalGeneration",
+ "NllbMoeModel",
+ "NllbMoePreTrainedModel",
+ "NllbMoeTop2Router",
+ "NllbMoeSparseMLP"
+ ]
+ }
+ },
+ "info.art.efficient-mlm-m0-0": {
+ "*": {
+ "repo": "andreasmadsen/efficient_mlm_m0.40",
+ "pkg": {
+ "0": {
+ "transformers": "RobertaPreLayerNormModel"
+ }
+ },
+ "tasks": [
+ "RobertaPreLayerNormForCausalLM",
+ "RobertaPreLayerNormForMaskedLM",
+ "RobertaPreLayerNormForMultipleChoice",
+ "RobertaPreLayerNormForQuestionAnswering",
+ "RobertaPreLayerNormForSequenceClassification",
+ "RobertaPreLayerNormForTokenClassification",
+ "RobertaPreLayerNormModel",
+ "RobertaPreLayerNormPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.xlm-roberta": {
+ "*": {
+ "repo": "FacebookAI/xlm-roberta-base",
+ "pkg": {
+ "0": {
+ "transformers": "XLMRobertaModel"
+ }
+ },
+ "tasks": [
+ "XLMRobertaForCausalLM",
+ "XLMRobertaForMaskedLM",
+ "XLMRobertaForMultipleChoice",
+ "XLMRobertaForQuestionAnswering",
+ "XLMRobertaForSequenceClassification",
+ "XLMRobertaForTokenClassification",
+ "XLMRobertaModel",
+ "XLMRobertaPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.mgp-str": {
+ "*": {
+ "repo": "alibaba-damo/mgp-str-base",
+ "pkg": {
+ "0": {
+ "transformers": "MgpstrForSceneTextRecognition"
+ }
+ },
+ "tasks": [
+ "MgpstrModel",
+ "MgpstrPreTrainedModel",
+ "MgpstrForSceneTextRecognition"
+ ]
+ }
+ },
+ "info.stst.blenderbot": {
+ "*": {
+ "repo": "facebook/blenderbot_small-90M",
+ "pkg": {
+ "0": {
+ "transformers": "BlenderbotSmallModel"
+ }
+ },
+ "tasks": [
+ "BlenderbotSmallForCausalLM",
+ "BlenderbotSmallForConditionalGeneration",
+ "BlenderbotSmallModel",
+ "BlenderbotSmallPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.xlm-roberta-xl": {
+ "*": {
+ "repo": "facebook/xlm-roberta-xl",
+ "pkg": {
+ "0": {
+ "transformers": "XLMRobertaXLModel"
+ }
+ },
+ "tasks": [
+ "XLMRobertaXLForCausalLM",
+ "XLMRobertaXLForMaskedLM",
+ "XLMRobertaXLForMultipleChoice",
+ "XLMRobertaXLForQuestionAnswering",
+ "XLMRobertaXLForSequenceClassification",
+ "XLMRobertaXLForTokenClassification",
+ "XLMRobertaXLModel",
+ "XLMRobertaXLPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.megatron-bert-uncased": {
+ "*": {
+ "repo": "nvidia/megatron-bert-uncased-345m",
+ "pkg": {
+ "0": {
+ "transformers": "MegatronBertModel"
+ }
+ },
+ "tasks": [
+ "MegatronBertForCausalLM",
+ "MegatronBertForMaskedLM",
+ "MegatronBertForMultipleChoice",
+ "MegatronBertForNextSentencePrediction",
+ "MegatronBertForPreTraining",
+ "MegatronBertForQuestionAnswering",
+ "MegatronBertForSequenceClassification",
+ "MegatronBertForTokenClassification",
+ "MegatronBertModel",
+ "MegatronBertPreTrainedModel"
+ ]
+ }
+ },
+ "info.detr.grounding-dino": {
+ "*": {
+ "repo": "IDEA-Research/grounding-dino-tiny",
+ "pkg": {
+ "0": {
+ "transformers": "GroundingDinoModel"
+ }
+ },
+ "tasks": [
+ "GroundingDinoForObjectDetection",
+ "GroundingDinoModel",
+ "GroundingDinoPreTrainedModel"
+ ]
+ }
+ },
+ "info.detr.table-transformer-detection": {
+ "*": {
+ "repo": "microsoft/table-transformer-detection",
+ "pkg": {
+ "0": {
+ "transformers": "TableTransformerModel"
+ }
+ },
+ "tasks": [
+ "TableTransformerForObjectDetection",
+ "TableTransformerModel",
+ "TableTransformerPreTrainedModel"
+ ]
+ }
+ },
+ "info.aet.funnel": {
+ "*": {
+ "repo": "funnel-transformer/small",
+ "pkg": {
+ "0": {
+ "transformers": "FunnelModel"
+ }
+ },
+ "tasks": [
+ "FunnelBaseModel",
+ "FunnelForMaskedLM",
+ "FunnelForMultipleChoice",
+ "FunnelForPreTraining",
+ "FunnelForQuestionAnswering",
+ "FunnelForSequenceClassification",
+ "FunnelForTokenClassification",
+ "FunnelModel",
+ "FunnelPreTrainedModel"
+ ]
+ }
+ },
+ "info.aet.unispeech-sat-100h-libri-ft": {
+ "*": {
+ "repo": "microsoft/unispeech-sat-base-100h-libri-ft",
+ "pkg": {
+ "0": {
+ "transformers": "UniSpeechSatModel"
+ }
+ },
+ "tasks": [
+ "UniSpeechSatForAudioFrameClassification",
+ "UniSpeechSatForCTC",
+ "UniSpeechSatForPreTraining",
+ "UniSpeechSatForSequenceClassification",
+ "UniSpeechSatForXVector",
+ "UniSpeechSatModel",
+ "UniSpeechSatPreTrainedModel"
+ ]
+ }
+ },
+ "info.aet.wav2vec2-conformer-rel-pos": {
+ "*": {
+ "repo": "facebook/wav2vec2-conformer-rel-pos-large",
+ "pkg": {
+ "0": {
+ "transformers": "Wav2Vec2ConformerModel"
+ }
+ },
+ "tasks": [
+ "Wav2Vec2ConformerForAudioFrameClassification",
+ "Wav2Vec2ConformerForCTC",
+ "Wav2Vec2ConformerForPreTraining",
+ "Wav2Vec2ConformerForSequenceClassification",
+ "Wav2Vec2ConformerForXVector",
+ "Wav2Vec2ConformerModel",
+ "Wav2Vec2ConformerPreTrainedModel"
+ ]
+ }
+ },
+ "info.detr.mm-grounding-dino-o365v1-goldg-v3det": {
+ "*": {
+ "repo": "openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det",
+ "pkg": {
+ "0": {
+ "transformers": "MMGroundingDinoModel"
+ }
+ },
+ "tasks": [
+ "MMGroundingDinoForObjectDetection",
+ "MMGroundingDinoModel",
+ "MMGroundingDinoPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.gpt2": {
+ "*": {
+ "repo": "openai-community/gpt2",
+ "pkg": {
+ "0": {
+ "transformers": "GPT2Model"
+ }
+ },
+ "tasks": [
+ "GPT2DoubleHeadsModel",
+ "GPT2ForQuestionAnswering",
+ "GPT2ForSequenceClassification",
+ "GPT2ForTokenClassification",
+ "GPT2LMHeadModel",
+ "GPT2Model",
+ "GPT2PreTrainedModel"
+ ]
+ }
+ },
+ "ops.precision.uint": {
+ "U8": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "uint8": {
+ "variant": "uint8"
+ }
+ }
+ }
+ }
+ },
+ "U16": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "uint16": {
+ "variant": "uint16"
+ }
+ }
+ }
+ }
+ },
+ "U32": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "uint32": {
+ "variant": "uint32"
+ }
+ }
+ }
+ }
+ },
+ "U64": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "uint64": {
+ "variant": "uint64"
+ }
+ }
+ }
+ }
+ },
+ "U1": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "uint1": {
+ "variant": "uint1"
+ }
+ }
+ }
+ }
+ },
+ "U2": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "uint2": {
+ "variant": "uint2"
+ }
+ }
+ }
+ }
+ },
+ "U3": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "uint3": {
+ "variant": "uint3"
+ }
+ }
+ }
+ }
+ },
+ "U4": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "uint4": {
+ "variant": "uint4"
+ }
+ }
+ }
+ }
+ },
+ "U5": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "uint5": {
+ "variant": "uint5"
+ }
+ }
+ }
+ }
+ },
+ "U6": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "uint6": {
+ "variant": "uint6"
+ }
+ }
+ }
+ }
+ },
+ "U7": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "uint7": {
+ "variant": "uint7"
+ }
+ }
+ }
+ }
+ }
+ },
+ "ops.precision.int": {
+ "I8": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "int8": {
+ "variant": "int8"
+ }
+ }
+ }
+ }
+ },
+ "I16": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "int16": {
+ "variant": "int16"
+ }
+ }
+ }
+ }
+ },
+ "I32": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "int32": {
+ "variant": "int32"
+ }
+ }
+ }
+ }
+ },
+ "I64": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "int64": {
+ "variant": "int64"
+ }
+ }
+ }
+ }
+ },
+ "Q8": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "qint8": {
+ "variant": "qint8"
+ }
+ }
+ }
+ }
+ },
+ "Q32": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "qint32": {
+ "variant": "qint32"
+ }
+ }
+ }
+ }
+ },
+ "I1": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "int1": {
+ "variant": "int1"
+ }
+ }
+ }
+ }
+ },
+ "I2": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "int2": {
+ "variant": "int2"
+ }
+ }
+ }
+ }
+ },
+ "I3": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "int3": {
+ "variant": "int3"
+ }
+ }
+ }
+ }
+ },
+ "I4": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "int4": {
+ "variant": "int4"
+ }
+ }
+ }
+ }
+ },
+ "I5": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "int5": {
+ "variant": "int5"
+ }
+ }
+ }
+ }
+ },
+ "I6": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "int6": {
+ "variant": "int6"
+ }
+ }
+ }
+ }
+ },
+ "I7": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "int7": {
+ "variant": "int7"
+ }
+ }
+ }
+ }
+ }
+ },
+ "ops.precision.float": {
+ "F16": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "float16": {
+ "variant": "fp16"
+ }
+ }
+ }
+ }
+ },
+ "F32": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "float32": {
+ "variant": "fp32"
+ }
+ }
+ }
+ }
+ },
+ "F64": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "float64": {
+ "variant": "fp64"
+ }
+ }
+ }
+ }
+ },
+ "F8_E5M2": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "float8_e5m2": {
+ "variant": "fp8_e5m2"
+ }
+ }
+ }
+ }
+ },
+ "F8_E4M3": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "float8_e4m3fn": {
+ "variant": "fp8_e4m3fn"
+ }
+ }
+ }
+ }
+ },
+ "F8_E5M2FNUZ": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "float8_e5m2fnuz": {
+ "variant": "fp8_e5m2fnuz"
+ }
+ }
+ }
+ }
+ },
+ "F8_E4M3FNUZ": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "float8_e4m3fnuz": {
+ "variant": "fp8_e4m3fnuz"
+ }
+ }
+ }
+ }
+ },
+ "F8_E8M0FNU": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "float8_e8m0fnu": {
+ "variant": "fp8_e8m0fnu"
+ }
+ }
+ }
+ }
+ },
+ "F8_E2M1": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "float4_e2m1fn_x2": {
+ "variant": "fp4_e2m1fn_x2"
+ }
+ }
+ }
+ }
+ }
+ },
+ "ops.precision.complex": {
+ "C32": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "complex32": {
+ "variant": "complex32"
+ }
+ }
+ }
+ }
+ },
+ "C64": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "complex64": {
+ "variant": "complex64"
+ }
+ }
+ }
+ }
+ },
+ "C128": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "complex128": {
+ "variant": "complex128"
+ }
+ }
+ }
+ }
+ }
+ },
+ "ops.precision.bool": {
+ "Bbool": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "bool": {
+ "variant": "bool"
+ }
+ }
+ }
+ }
+ }
+ },
+ "ops.precision.quint": {
+ "Q8": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "quint8": {
+ "variant": "quint8"
+ }
+ }
+ }
+ }
+ },
+ "Q4x2": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "quint4x2": {
+ "variant": "quint4x2"
+ }
+ }
+ }
+ }
+ },
+ "Q2x4": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "quint2x4": {
+ "variant": "quint2x4"
+ }
+ }
+ }
+ }
+ }
+ },
+ "ops.precision.bfloat": {
+ "B16": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "bfloat16": {
+ "variant": "bf16"
+ }
+ }
+ }
+ }
+ }
+ },
+ "ops.precision.bits": {
+ "B1x8": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "bits1x8": {
+ "variant": "bits1x8"
+ }
+ }
+ }
+ }
+ },
+ "B2x4": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "bits2x4": {
+ "variant": "bits2x4"
+ }
+ }
+ }
+ }
+ },
+ "B4x2": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "bits4x2": {
+ "variant": "bits4x2"
+ }
+ }
+ }
+ }
+ },
+ "B8": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "bits8": {
+ "variant": "bits8"
+ }
+ }
+ }
+ }
+ },
+ "B16": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "bits16": {
+ "variant": "bits16"
+ }
+ }
+ }
+ }
+ }
+ },
+ "ops.scheduler.amused": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "AmusedScheduler",
+ "module_path": "diffusers.schedulers.scheduling_amused"
+ }
+ }
+ }
+ },
+ "ops.scheduler.cmstochasticiterative": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "CMStochasticIterativeScheduler",
+ "module_path": "diffusers.schedulers.scheduling_consistency_models"
+ }
+ }
+ }
+ },
+ "ops.scheduler.cogvideoxddim": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "CogVideoXDDIMScheduler",
+ "module_path": "diffusers.schedulers.scheduling_ddim_cogvideox"
+ }
+ }
+ }
+ },
+ "ops.scheduler.cogvideoxdpm": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "CogVideoXDPMScheduler",
+ "module_path": "diffusers.schedulers.scheduling_dpm_cogvideox"
+ }
+ }
+ }
+ },
+ "ops.scheduler.ddiminverse": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "DDIMInverseScheduler",
+ "module_path": "diffusers.schedulers.scheduling_ddim_inverse"
+ }
+ }
+ }
+ },
+ "ops.scheduler.ddimparallel": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "DDIMParallelScheduler",
+ "module_path": "diffusers.schedulers.scheduling_ddim_parallel"
+ }
+ }
+ }
+ },
+ "ops.scheduler.ddim": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "DDIMScheduler",
+ "module_path": "diffusers.schedulers.scheduling_ddim"
+ }
+ }
+ }
+ },
+ "ops.scheduler.ddpmparallel": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "DDPMParallelScheduler",
+ "module_path": "diffusers.schedulers.scheduling_ddpm_parallel"
+ }
+ }
+ }
+ },
+ "ops.scheduler.ddpm": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "DDPMScheduler",
+ "module_path": "diffusers.schedulers.scheduling_ddpm"
+ }
+ }
+ }
+ },
+ "ops.scheduler.ddpmwuerstchen": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "DDPMWuerstchenScheduler",
+ "module_path": "diffusers.schedulers.scheduling_ddpm_wuerstchen"
+ }
+ }
+ }
+ },
+ "ops.scheduler.deis": {
+ "multistep": {
+ "pkg": {
+ "0": {
+ "diffusers": "DEISMultistepScheduler",
+ "module_path": "diffusers.schedulers.scheduling_deis_multistep"
+ }
+ }
+ }
+ },
+ "ops.scheduler.dpminverse": {
+ "multistep": {
+ "pkg": {
+ "0": {
+ "diffusers": "DPMSolverMultistepInverseScheduler",
+ "module_path": "diffusers.schedulers.scheduling_dpmsolver_multistep_inverse"
+ }
+ }
+ }
+ },
+ "ops.scheduler.dpm": {
+ "multistep": {
+ "pkg": {
+ "0": {
+ "diffusers": "DPMSolverMultistepScheduler",
+ "module_path": "diffusers.schedulers.scheduling_dpmsolver_multistep"
+ }
+ }
+ }
+ },
+ "ops.scheduler.dpmsinglestep": {
+ "solver": {
+ "pkg": {
+ "0": {
+ "diffusers": "DPMSolverSinglestepScheduler",
+ "module_path": "diffusers.schedulers.scheduling_dpmsolver_singlestep"
+ }
+ }
+ }
+ },
+ "ops.scheduler.edmdpm": {
+ "multistep": {
+ "pkg": {
+ "0": {
+ "diffusers": "EDMDPMSolverMultistepScheduler",
+ "module_path": "diffusers.schedulers.scheduling_edm_dpmsolver_multistep"
+ }
+ }
+ }
+ },
+ "ops.scheduler.edmeuler": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "EDMEulerScheduler",
+ "module_path": "diffusers.schedulers.scheduling_edm_euler"
+ }
+ }
+ }
+ },
+ "ops.scheduler.eulerancestral": {
+ "discrete": {
+ "pkg": {
+ "0": {
+ "diffusers": "EulerAncestralDiscreteScheduler",
+ "module_path": "diffusers.schedulers.scheduling_euler_ancestral_discrete"
+ }
+ }
+ }
+ },
+ "ops.scheduler.euler": {
+ "discrete": {
+ "pkg": {
+ "0": {
+ "diffusers": "EulerDiscreteScheduler",
+ "module_path": "diffusers.schedulers.scheduling_euler_discrete"
+ }
+ }
+ }
+ },
+ "ops.scheduler.flowmatcheuler": {
+ "discrete": {
+ "pkg": {
+ "0": {
+ "diffusers": "FlowMatchEulerDiscreteScheduler",
+ "module_path": "diffusers.schedulers.scheduling_flow_match_euler_discrete"
+ }
+ }
+ }
+ },
+ "ops.scheduler.flowmatchheun": {
+ "discrete": {
+ "pkg": {
+ "0": {
+ "diffusers": "FlowMatchHeunDiscreteScheduler",
+ "module_path": "diffusers.schedulers.scheduling_flow_match_heun_discrete"
+ }
+ }
+ }
+ },
+ "ops.scheduler.flowmatchlcm": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "FlowMatchLCMScheduler",
+ "module_path": "diffusers.schedulers.scheduling_flow_match_lcm"
+ }
+ }
+ }
+ },
+ "ops.scheduler.heun": {
+ "discrete": {
+ "pkg": {
+ "0": {
+ "diffusers": "HeunDiscreteScheduler",
+ "module_path": "diffusers.schedulers.scheduling_heun_discrete"
+ }
+ }
+ }
+ },
+ "ops.scheduler.ipndm": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "IPNDMScheduler",
+ "module_path": "diffusers.schedulers.scheduling_ipndm"
+ }
+ }
+ }
+ },
+ "ops.scheduler.karrasve": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "KarrasVeScheduler",
+ "module_path": "diffusers.schedulers.deprecated.scheduling_karras_ve"
+ }
+ }
+ }
+ },
+ "ops.scheduler.kdpm2ancestral": {
+ "discrete": {
+ "pkg": {
+ "0": {
+ "diffusers": "KDPM2AncestralDiscreteScheduler",
+ "module_path": "diffusers.schedulers.scheduling_k_dpm_2_ancestral_discrete"
+ }
+ }
+ }
+ },
+ "ops.scheduler.kdpm2": {
+ "discrete": {
+ "pkg": {
+ "0": {
+ "diffusers": "KDPM2DiscreteScheduler",
+ "module_path": "diffusers.schedulers.scheduling_k_dpm_2_discrete"
+ }
+ }
+ }
+ },
+ "ops.scheduler.lcm": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "LCMScheduler",
+ "module_path": "diffusers.schedulers.scheduling_lcm"
+ }
+ }
+ }
+ },
+ "ops.scheduler.pndm": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "PNDMScheduler",
+ "module_path": "diffusers.schedulers.scheduling_pndm"
+ }
+ }
+ }
+ },
+ "ops.scheduler.repaint": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "RePaintScheduler",
+ "module_path": "diffusers.schedulers.scheduling_repaint"
+ }
+ }
+ }
+ },
+ "ops.scheduler.sa": {
+ "solver": {
+ "pkg": {
+ "0": {
+ "diffusers": "SASolverScheduler",
+ "module_path": "diffusers.schedulers.scheduling_sasolver"
+ }
+ }
+ }
+ },
+ "ops.scheduler.scm": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "SCMScheduler",
+ "module_path": "diffusers.schedulers.scheduling_scm"
+ }
+ }
+ }
+ },
+ "ops.scheduler.scoresdeve": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "ScoreSdeVeScheduler",
+ "module_path": "diffusers.schedulers.scheduling_sde_ve"
+ }
+ }
+ }
+ },
+ "ops.scheduler.tcd": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "TCDScheduler",
+ "module_path": "diffusers.schedulers.scheduling_tcd"
+ }
+ }
+ }
+ },
+ "ops.scheduler.unclip": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "UnCLIPScheduler",
+ "module_path": "diffusers.schedulers.scheduling_unclip"
+ }
+ }
+ }
+ },
+ "ops.scheduler.unipc": {
+ "multistep": {
+ "pkg": {
+ "0": {
+ "diffusers": "UniPCMultistepScheduler",
+ "module_path": "diffusers.schedulers.scheduling_unipc_multistep"
+ }
+ }
+ }
+ },
+ "ops.scheduler.vqdiffusion": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "VQDiffusionScheduler",
+ "module_path": "diffusers.schedulers.scheduling_vq_diffusion"
+ }
+ }
+ }
+ },
+ "ops.scheduler.karrasdiffusion": {
+ "schedulers": {
+ "pkg": {
+ "0": {
+ "diffusers": "KarrasDiffusionSchedulers",
+ "module_path": "diffusers.schedulers.scheduling_utils"
+ }
+ }
+ }
+ },
+ "info.lora.dmd": {
+ "stable-diffusion-xl-1": {
+ "repo": "tianweiy/DMD2",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ },
+ "generation": {
+ "num_inference_steps": 4,
+ "guidance_scale": 0,
+ "timesteps": [
+ 999,
+ 749,
+ 499,
+ 249
+ ]
+ },
+ "scheduler": {
+ "ops.scheduler.lcm": ""
+ }
+ }
+ },
+ "file_256": [
+ "b3d9173815a4b595991c3a7a0e0e63ad821080f314a0b2a3cc31ecd7fcf2cbb8",
+ "a374289e9446d7f14d2037c4b3770756b7b52c292142a691377c3c755010a1bb"
+ ]
+ }
+ },
+ "info.lora.dpo": {
+ "stable-diffusion-xl-1": {
+ "repo": "radames/sdxl-DPO-LoRA",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ },
+ "generation": {
+ "guidance_scale": 7.5,
+ "num_inference_steps": 4
+ },
+ "scheduler": {
+ "ops.scheduler.dpm": {
+ "algorithm_type": "sde-dpmsolver++",
+ "use_karras_sigmas": true,
+ "order": 2
+ }
+ }
+ }
+ },
+ "file_256": [
+ "666f71a833fc41229ec7e8a264fb7b0fcb8bf47a80e366ae7486c18f38ec9fc0",
+ "6b1dcbfb234d7b6000948b5b95ccebc8f903450ce2ba1b50bc3456987c9087ad"
+ ]
+ }
+ },
+ "info.lora.flash": {
+ "stable-diffusion-xl-1": {
+ "repo": "jasperai/flash-sdxl",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ },
+ "scheduler": "ops.scheduler.lcm"
+ }
+ },
+ "file_256": [
+ "afe2ca6e27c4c6087f50ef42772c45d7b0efbc471b76e422492403f9cae724d7"
+ ]
+ },
+ "pixart-alpha": {
+ "repo": "jasperai/flash-pixart",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ }
+ }
+ },
+ "file_256": [
+ "99ef037fe3c1fb6d6bbefdbb85ad60df434fcc0577d34c768d752d60cf69681b"
+ ]
+ },
+ "stable-diffusion-3": {
+ "repo": "jasperai/flash-sd3",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ }
+ }
+ },
+ "file_256": [
+ "85fce13c36e3739aa42930f745eb9fceb6c53d53fb17e2a687e3234c1a58ee15"
+ ]
+ },
+ "stable-diffusion-v1-5": {
+ "repo": "jasperai/flash-sd",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ },
+ "generation": {
+ "num_inference_steps": 4,
+ "guidance_scale": 0
+ }
+ }
+ },
+ "file_256": [
+ "99353444c1a0f40719a1b3037049dbd24800317979a73c312025c05af3574a5f"
+ ]
+ }
+ },
+ "info.lora.hyper": {
+ "stable-diffusion-xl-1": {
+ "repo": "ByteDance/Hyper-SD",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {
+ "fuse": 1.0
+ }
+ }
+ }
+ },
+ "file_256": {
+ "0b97f447b5878323a28fbe7c51ba7acebd21f4d77552ba77b04b11c8911825b6": {
+ "num_inference_steps": 12
+ },
+ "55b51334c85061afff5eff7c550b61963c8b8607a5868bbe4f26db49374719b1": {
+ "num_inference_steps": 8
+ },
+ "c912df184c5116792d2c604d26c6bc2aa916685f4a793755255cda1c43a3c78a": {
+ "num_inference_steps": 1,
+ "guidance_scale": 0.0
+ },
+ "69b25c0187ced301c3603c599c0bc509ac99b8ac34db89a2aecc3d5f77a35187": {
+ "num_inference_steps": 2,
+ "guidance_scale": 0.0
+ },
+ "12f81a27d00a751a40d68fd15597091896c5a90f3bd632fb6c475607cbdad76e": {
+ "num_inference_steps": 4,
+ "guidance_scale": 0.0
+ },
+ "ca689190e8c46038550384b5675488526cfe5a40d35f82b27acb75c100f417c1": {
+ "num_inference_steps": 8,
+ "guidance_scale": 0.0
+ }
+ }
+ },
+ "flux1-dev": {
+ "repo": "ByteDance/Hyper-SD",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {
+ "fuse": 0.125
+ }
+ }
+ }
+ },
+ "file_256": {
+ "6461f67dfc1a967ae60344c3b3f350877149ccab758c273cc37f5e8a87b5842e": {
+ "num_inference_steps": 16,
+ "guidance_scale": 0.0
+ },
+ "e0ab0fdf569cd01a382f19bd87681f628879dea7ad51fe5a3799b6c18c7b2d03": {
+ "num_inference_steps": 8,
+ "guidance_scale": 0.0
+ }
+ }
+ },
+ "stable-diffusion-3": {
+ "repo": "ByteDance/Hyper-SD",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {
+ "fuse": 0.125
+ }
+ }
+ }
+ },
+ "file_256": {
+ "5b4d0b99d58deb811bdbbe521a06f4dbf56a2e9148ff3211c594e0502b656bc9": {
+ "num_inference_steps": 16
+ },
+ "0ee4e529abd17b06d4295e3bb91c0d4ddae393afad86b2b43c4f5eeb9e401602": {
+ "num_inference_steps": 4
+ },
+ "fc6a3e73e14ed11e21e4820e960d7befcffe7e333850ada9545f239e9aa6027e": {
+ "num_inference_steps": 8
+ }
+ }
+ },
+ "stable-diffusion-v1-5": {
+ "repo": "ByteDance/Hyper-SD",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ }
+ }
+ },
+ "file_256": {
+ "64b98437383537cd968fda6f87a05c33160ece9c79ff4757949a1e212ff78361": {
+ "num_inference_steps": 12
+ },
+ "f6123d5b950d5250ab6c33600e27f4dcf71b3099ebf888685e01e9e8117ce482": {
+ "num_inference_steps": 8
+ },
+ "a04fd9a535c1e56d38f7590ee72a13fd5ca0409853b4fff021e5a9482cf1ca3b": {
+ "num_inference_steps": 1,
+ "guidance_scale": 0.0
+ },
+ "2f26dcc1d883feb07557a552315baae2ca2a04ac08556b08a355a244547e8c3a": {
+ "num_inference_steps": 2,
+ "guidance_scale": 0.0
+ },
+ "c5dd058616461ed5053e2b14eec4dbe3fa0eea3b13688642f6d6c80ea2ba5958": {
+ "num_inference_steps": 4,
+ "guidance_scale": 0.0
+ },
+ "91fc3186236e956d64dbb4357f2e120c69b968b78af7d2db9884a5ca74d3cd13": {
+ "num_inference_steps": 8,
+ "guidance_scale": 0.0
+ }
+ }
+ }
+ },
+ "info.lora.lcm": {
+ "stable-diffusion-xl-1": {
+ "repo": "latent-consistency/lcm-lora-sdxl",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {
+ "fuse": 1.0
+ }
+ },
+ "scheduler": {
+ "ops.scheduler.lcm": {
+ "timestep_spacing": "trailing"
+ }
+ },
+ "generation": {
+ "num_inference_steps": 8
+ }
+ }
+ },
+ "file_256": [
+ "a764e6859b6e04047cd761c08ff0cee96413a8e004c9f07707530cd776b19141"
+ ]
+ },
+ "ssd": {
+ "repo": "latent-consistency/lcm-lora-ssd-1b",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ },
+ "generation": {
+ "num_inference_steps": 8
+ }
+ }
+ },
+ "file_256": [
+ "7adaaa69db6f011058a19fd1d5315fdf19ef79fcd513cdab30e173833fd5c59b"
+ ]
+ },
+ "segmind-vega": {
+ "repo": "segmind/Segmind-VegaRT",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ },
+ "gen_kwargs": {
+ "num_inference_steps": 8
+ }
+ }
+ },
+ "file_256": [
+ "9b6e8cd833fa205eaeeed391ca623a6f2546e447470bd1c5dcce3fa8d2f26afb"
+ ]
+ },
+ "stable-diffusion-v1-5": {
+ "repo": "latent-consistency/lcm-lora-sdv1-5",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ },
+ "generation": {
+ "num_inference_steps": 8
+ }
+ }
+ },
+ "file_256": [
+ "8f90d840e075ff588a58e22c6586e2ae9a6f7922996ee6649a7f01072333afe4"
+ ]
+ }
+ },
+ "info.lora.lightning": {
+ "stable-diffusion-xl-1": {
+ "repo": "ByteDance/SDXL-Lightning",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ },
+ "generation": {
+ "num_inference_steps": 4,
+ "guidance_scale": 0
+ }
+ }
+ }
+ }
+ },
+ "info.lora.pcm": {
+ "stable-diffusion-xl-1": {
+ "repo": "wangfuyun/PCM_Weights",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ }
+ }
+ },
+ "file_256": {
+ "0365f6107250a4fed1b83e8ae6a070065e026a2ba54bff65f55a50284232bbe6": {
+ "num_inference_steps": 4,
+ "guidance_scale": 0.0
+ },
+ "04ea827435d5750e63d113dc509174b4f6e8a069ff8f91970c3d25299c10b1f8": {
+ "num_inference_steps": 16
+ },
+ "7eb353b2abcaabab6251ba4e17d6cbe2e763feb0674b0f950555552212b44621": {
+ "num_inference_steps": 16
+ },
+ "a85cf70ac16ed42011630a5cd6b5927722cb7c40a2107eff85e2670f9a38c893": {
+ "num_inference_steps": 4
+ },
+ "9f7f13bb019925eacd89aeff678e4fd831f7b60245b986855dff6634aee4eba9": {
+ "num_inference_steps": 4
+ },
+ "3b9c970a3e4c0e182931e71b3f769c1956f16c6b06db98b4d67236790d4d0b1d": {
+ "num_inference_steps": 8
+ },
+ "7f04ba8911b4c25ef2c7cbf74abcb6daa3b4f0e4bc6a03896bdae7601f2f180b": {
+ "num_inference_steps": 8
+ },
+ "13fb038025ce9dad93b8ee1b67fc81bac8affb59a77b67d408d286e0b0365a1d": {
+ "num_inference_steps": 16,
+ "guidance_scale": 0.0
+ },
+ "3442eff271aa3b60a094fd6f9169d03e49e4051044a974f6fcf690507959191f": {
+ "num_inference_steps": 16,
+ "guidance_scale": 0.0
+ },
+ "242cbe4695fe3f2e248faa71cf53f2ccbf248a316973e4b2f38ab9e34f35a5ab": {
+ "num_inference_steps": 2,
+ "guidance_scale": 0.0
+ },
+ "e1f600491bb8e0cd94f41144321e44fdb2cb346447f31e71f6e53f1c24cccfbf": {
+ "num_inference_steps": 2,
+ "guidance_scale": 0.0
+ },
+ "d0bf40a7f280829195563486bec7253f043a06b1f218602b20901c367641023e": {
+ "num_inference_steps": 4,
+ "guidance_scale": 0.0
+ },
+ "212150d7953627fb89df99aad579d6763645a1cb2ef26b19fee8b398d5e5ff4d": {
+ "num_inference_steps": 4,
+ "guidance_scale": 0.0
+ },
+ "e80fcf46d15f4d3821d3d9611bdb3022a4a8b647b2536833b168d317a91e4f74": {
+ "num_inference_steps": 8,
+ "guidance_scale": 0.0
+ },
+ "56ed9dc9f51f4bb0d6172e13b7947f215c347fc0da341c8951b2c12b9507d09e": {
+ "num_inference_steps": 8,
+ "guidance_scale": 0.0
+ }
+ }
+ },
+ "stable-diffusion-v1-5": {
+ "repo": "wangfuyun/PCM_Weights",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ }
+ }
+ },
+ "file_256": {
+ "b80b27dd6504f1c3a7637237dda86bc7e26fa5766da30c4fc853c0a1d46bad31": {
+ "num_inference_steps": 4,
+ "guidance_scale": 0.0
+ },
+ "8f605ffde3616592deb37ed8c6bacb83fe98963c1fd0883c2a4f93787098aa45": {
+ "num_inference_steps": 16
+ },
+ "fa6acb94f11dba3bf4120af5a12e3c88cd2b9572d43ec1a6fb04eede9f32829e": {
+ "num_inference_steps": 4
+ },
+ "bff3d4499718b61455b0757b5f8d98fe23e73a768b538c82ecf91c693b69dbcd": {
+ "num_inference_steps": 8
+ },
+ "c7ac2fa3df3a5b7080ebe63f259ab13630014f104c93c3c706d77b05cc48506b": {
+ "num_inference_steps": 16,
+ "guidance_scale": 0.0
+ },
+ "4c5f27a727d12146de4b1d987cee3343bca89b085d12b03c45297af05ce88ef4": {
+ "num_inference_steps": 2,
+ "guidance_scale": 0.0
+ },
+ "29278bc86274fdfc840961e3c250758ff5e2dc4666d940f103e78630d5b879d3": {
+ "num_inference_steps": 4,
+ "guidance_scale": 0.0
+ },
+ "41a7f0b966d18f643d16c4401f0b5ef6b9ef7362c20e17128322f17874709107": {
+ "num_inference_steps": 8,
+ "guidance_scale": 0.0
+ }
+ }
+ },
+ "stable-diffusion-3": {
+ "repo": "wangfuyun/PCM_Weights",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ }
+ }
+ },
+ "file_256": {
+ "8a45878ecc34e53855fe21146cb6ef32682053b7c4eacc013be89fb08c4c19d8": {
+ "num_inference_steps": 2,
+ "guidance_scale": 1.2
+ },
+ "9444a5cead551c56c4d1c455ce829ba9f96f01fbcca31294277e0862a6a15b76": {
+ "num_inference_steps": 4,
+ "guidance_scale": 1.2
+ },
+ "e365902c208cbc0456ca5e7c41a490f637c15f3f7b98691cbba21f96a8c960b4": {
+ "num_inference_steps": 4,
+ "guidance_scale": 1.2
+ },
+ "3550fa018cd0b60d9e36ac94c31b30f27e402d3855ed63e47668bb181b35a0ad": {
+ "num_inference_steps": 4,
+ "guidance_scale": 1.2
+ }
+ }
+ }
+ },
+ "info.lora.slam": {
+ "stable-diffusion-xl-1": {
+ "repo": "alimama-creative/slam-lora-sdxl",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ },
+ "scheduler": {
+ "ops.scheduler.lcm": {
+ "timestep_spacing": "trailing"
+ }
+ },
+ "generation": {
+ "num_inference_steps": 4,
+ "guidance_scale": 1
+ }
+ }
+ },
+ "file_256": [
+ "22569a946b0db645aa3b8eb782c674c8e726a7cc0d655887c21fecf6dfe6ad91"
+ ]
+ },
+ "stable-diffusion-v1-5": {
+ "repo": "alimama-creative/slam-sd1.5",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ }
+ }
+ }
+ }
+ },
+ "info.lora.spo": {
+ "stable-diffusion-xl-1": {
+ "repo": "SPO-Diffusion-Models/SPO-SDXL_4k-p_10ep_LoRA",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ },
+ "generation": {
+ "guidance_scale": 5.0
+ }
+ }
+ },
+ "file_256": [
+ "0b9896f30d29daa5eedcfc9e7ad03304df6efc5114508f6ca9c328c0b4f057df"
+ ]
+ },
+ "stable-diffusion-v1-5": {
+ "repo": "SPO-Diffusion-Models/SPO-SD-v1-5_4k-p_10ep_LoRA",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ },
+ "generation": {
+ "guidance_scale": 7.5
+ }
+ }
+ },
+ "file_256": [
+ "1be130c5be2de0beacadd3bf0bafe3bedd7e7a380729932a1e369fb29efa86f4"
+ ]
+ }
+ },
+ "info.lora.tcd": {
+ "stable-diffusion-xl-1": {
+ "repo": "h1t/TCD-SDXL-LoRA",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ },
+ "generation": {
+ "num_inference_steps": 4,
+ "guidance_scale": 0,
+ "eta": 0.3
+ },
+ "scheduler": {
+ "ops.scheduler.tcd": {}
+ }
+ }
+ },
+ "file_256": [
+ "2c777bc60abf41d3eb0fe405d23d73c280a020eea5adf97a82a141592c33feba"
+ ]
+ },
+ "stable-diffusion-v1-5": {
+ "repo": "h1t/TCD-SD15-LoRA",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ }
+ }
+ },
+ "file_256": [
+ "eaecb24a1cda4411eab67275b1d991071216ac93693e8fa0c9226c9df0386232"
+ ],
+ "layer_256": [
+ "e9825b81bca684126ac3cc8867d2ebc655f74268bc26bea4e4b7e58a52ad6c75"
+ ],
+ "layer_b3": [
+ "90158259812a89beb8874216009c799f420334aac49bbf4fa1bf0ebf4bbd256b"
+ ]
+ }
+ },
+ "info.lora.turbo": {
+ "stable-diffusion-xl-1": {
+ "file_256": [
+ "a599c42a9f4f7494c7f410dbc0fd432cf0242720509e9d52fa41aac7a88d1b69"
+ ]
+ },
+ "flux1-dev": {
+ "repo": "alimama-creative/FLUX.1-Turbo-Alpha",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {
+ "fuse": 0.125
+ }
+ },
+ "generation": {
+ "guidance_scale": 3.5,
+ "num_inference_steps": 8,
+ "max_sequence_length": 512
+ }
+ }
+ },
+ "file_256": [
+ "77f7523a5e9c3da6cfc730c6b07461129fa52997ea06168e9ed5312228aa0bff"
+ ]
+ },
+ "stable-diffusion-3": {
+ "repo": "tensorart/stable-diffusion-3.5-large-TurboX",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {
+ "fuse": 1.0
+ }
+ },
+ "scheduler": {
+ "ops.scheduler.flow-match": {
+ "shift": 5
+ }
+ }
+ }
+ },
+ "file_256": {
+ "fae59d1b749c0d14a8fd4c68cc94eaac92876cee7b91fa75cf8fde3160e09548": {
+ "num_inference_steps": "8"
+ }
+ }
+ }
+ },
+ "info.art.audiogen": {
+ "*": {
+ "repo": "facebook/audiogen-medium",
+ "pkg": {
+ "0": {
+ "audiocraft": "models.AudioGen",
+ "generation": {
+ "duration": 5
+ },
+ "stage_2": {
+ "audiocraft": ".data.audioaudio_write",
+ "generation": {
+ "strategy": "loudness",
+ "loudness_compressor": true
+ }
+ }
+ }
+ }
+ }
+ },
+ "info.art.parler-tts-v1": {
+ "*": {
+ "repo": "parler-tts/parler-tts-large-v1",
+ "pkg": {
+ "0": {
+ "parler_tts": "ParlerTTSForConditionalGeneration",
+ "generation": {
+ "return_tensors": "pt"
+ }
+ }
+ }
+ }
+ },
+ "info.gan.snac-st": {
+ "*": {
+ "repo": "Zuellni/snac-24khz-ST",
+ "pkg": {
+ "0": {
+ "snac": "SNAC"
+ },
+ "1": {
+ "mlx_audio": "tts.generate.generate_audio"
+ }
+ },
+ "file_256": [
+ "e61ae2f638f56ee07a37592cd5a6a9e7d642560ddc78a76ee4a7f96d6922f1be",
+ "973ee1be4032319fd9685ec54eee1b93e79c7bc98c786e67f17c04669714f11d"
+ ],
+ "layer_256": [
+ "35ba9aa1feb931010559a178fcac243673d2efdd1396a4b69d406c9853a88300",
+ "5a22c4707ed6c928043f23b59f2d102a579db3a9af41cf6e60d7c3958f182841"
+ ],
+ "layer_b3": [
+ "18307b00460a64cc4893f9061592ce8d7e15b70fc54065cc8ae0f0155381ec46",
+ "d599b1bb36dee3cee4674b7922fcd69e5ec05b74413f611d21cfdfdf8f9b6119"
+ ]
+ }
+ },
+ "info.gan.kokoro": {
+ "*": {
+ "repo": "hexgrad/Kokoro-82M",
+ "pkg": {
+ "0": {
+ "kokoro": "KPipeline"
+ },
+ "1": {
+ "mlx_audio": "tts.generate.generate_audio",
+ "generation": {
+ "audio_format": "wav",
+ "join_audio": true,
+ "verbose": false
+ }
+ }
+ },
+ "file_256": [
+ "5a5cb3d87478f2e74dfca208ee52209ccfce024095e137097fd276026506e45f",
+ "496dba118d1a58f5f3db2efc88dbdc216e0483fc89fe6e47ee1f2c53f18ad1e4"
+ ],
+ "layer_256": [
+ "dbedf0e2115aa309b92689f86534be4a77b91d7900365e1717879fbb19b849f6",
+ "2c68574571b3f9229e015a909788116ea2251142e29c1bd5c687863192124e8b"
+ ],
+ "layer_b3": [
+ "3e9b5017cfe67a7804ac717b18b6add42ffc0bd3353490df2bcc520eaaef79b6",
+ "379660a87a64524bab69a267e3d9580f04b5eec4f7e3fbd48c6597d164d9b17d",
+ "997f154f5a78879ef3ba1a1556977c40b28b9c21076b8f583f752c57ecc36e932dc3dba29452b85ea85266084a6248f9e0efe642d5f75b43e64f25b9f2837f92"
+ ]
+ }
+ },
+ "info.stst.silero-vad": {
+ "*": {
+ "repo": "freddyaboulton/silero-vad",
+ "pkg": {
+ "0": {
+ "onnx": "onnx"
+ },
+ "1": {
+ "mlx_audio": "tts.generate.generate_audio",
+ "generation": {
+ "audio_format": "wav",
+ "join_audio": true,
+ "verbose": false
+ }
+ }
+ },
+ "file_256": [
+ "591f853590d11ddde2f2a54f9e7ccecb2533a8af7716330e8adfa6f3849787a9"
+ ],
+ "layer_256": [
+ "2ffef1834d5fe14ad8db58fc78d769d5dc38dda5eddbfc396786f74b326215fd"
+ ],
+ "layer_b3": [
+ "41ca5931452b3ffee588c6c7e5bd327c4e914141604eaf3fd05f4a790ac83bb2",
+ "7dc736cd5d840182792bde4edfbf5ddc5aeaf16826a9c72d1ba8166c1e3fab9b",
+ "6e2c1bdbad74f56663ffb5710c7cb849a2b91ba331d81acdba47a21f69107434",
+ "ab5ff443aece9171af5e7603d0b4309d3ecc934e3940ccedefff10f0b54b931e"
+ ]
+ }
+ },
+ "info.stst.wav2vec2-conformer-rope-960h-ft": {
+ "*": {
+ "repo": "facebook/wav2vec2-conformer-rope-large-960h-ft",
+ "pkg": {
+ "0": {
+ "transformers": "Wav2Vec2ConformerForCTC"
+ }
+ },
+ "file_256": [
+ "97bb9761fb71ec1225100bc81ccf7d002e0d0ba3d0604c1fd2dbda7d7d491f1d"
+ ],
+ "layer_256": [
+ "1afcfda68307a75caa1a1c4456cf97e20c7914e8aba828006e9fe17e8675a79d"
+ ],
+ "layer_b3": [
+ "6c9c5642aa8dce62bcb3eb577bc519619a2d868005c767c5e65371c583a8a8eb"
+ ],
+ "tasks": [
+ "Wav2Vec2ConformerForAudioFrameClassification",
+ "Wav2Vec2ConformerForCTC",
+ "Wav2Vec2ConformerForPreTraining",
+ "Wav2Vec2ConformerForSequenceClassification",
+ "Wav2Vec2ConformerForXVector",
+ "Wav2Vec2ConformerModel",
+ "Wav2Vec2ConformerPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.orpheus-0-ft": {
+ "*": {
+ "repo": "canopylabs/orpheus-3b-0.1-ft",
+ "pkg": {
+ "0": {
+ "orpheus_tts": "OrpheusModel",
+ "generation": {
+ "max_model_len": 2048
+ }
+ },
+ "1": {
+ "mlx_audio": "tts.generate.generate_audio",
+ "generation": {
+ "audio_format": "wav",
+ "join_audio": true,
+ "verbose": false
+ }
+ }
+ }
+ }
+ },
+ "info.art.outetts-0": {
+ "*": {
+ "repo": "OuteAI/OuteTTS-0.3-1B",
+ "pkg": {
+ "0": {
+ "outetts": "InterfaceHF"
+ },
+ "1": {
+ "mlx_audio": "tts.generate.generate_audio",
+ "generation": {
+ "audio_format": "wav",
+ "join_audio": true,
+ "verbose": false
+ }
+ }
+ }
+ }
+ },
+ "info.gan.speecht5-hifigan": {
+ "*": {
+ "file_256": [
+ "d9dc6513c30a5b86c2497712690c04fe74b4aa79fdab6d490b34fcb4e24c590c"
+ ],
+ "layer_256": [
+ "bd52b538e7ac05711be9321cfb7619d4056996ce32923c9c91ee02cf69154770"
+ ],
+ "layer_b3": [
+ "85b5acdf29ad04c63f885383340d8e3445ae0055521f82cabb82bd09cfb9a956"
+ ]
+ }
+ },
+ "info.dit.wan2-flf2v-720p": {
+ "diffusers": {
+ "repo": "Wan-AI/Wan2.1-FLF2V-14B-720P-Diffusers",
+ "file_256": [
+ "",
+ ""
+ ],
+ "layer_256": [
+ ""
+ ],
+ "layer_b3": [
+ ""
+ ]
+ }
+ },
+ "ops.patch.hidiffusion": {
+ "stable-diffusion-xl-1": {
+ "pkg": {
+ "0": {
+ "hidiffusion": {
+ "apply_hidiffusion": {
+ "timesteps": "StableDiffusionXLTimesteps"
+ }
+ },
+ "generation": {
+ "height": 2048,
+ "width": 2048,
+ "eta": 1.0,
+ "guidance_scale": 7.5,
+ "num_inference_steps": 10
+ }
+ }
+ }
+ }
+ },
+ "ops.scheduler.align-your-steps": {
+ "stable-diffusion-xl-1": {
+ "pkg": {
+ "0": {
+ "diffusers": "schedulers.scheduling_utils.AysSchedules",
+ "generation": {
+ "timesteps": "StableDiffusionXLTimesteps",
+ "num_inference_steps": 10
+ }
+ }
+ }
+ }
+ },
+ "info.art.chameleon": {
+ "lumina-mgpt-1024": {
+ "repo": "Alpha-VLLM/Lumina-mGPT-7B-1024",
+ "pkg": {
+ "0": {
+ "inference_solver": {
+ "FlexARInferenceSolver": {
+ "precision": "bf16",
+ "target_size": 768
+ }
+ },
+ "generation": {
+ "images": [],
+ "qas": [
+ [
+ "q1",
+ null
+ ]
+ ],
+ "max_gen_len": 8192,
+ "temperature": 1.0
+ }
+ },
+ "1": {
+ "inference_solver": "ChameleonXLLMXForConditionalGeneration"
+ }
+ },
+ "file_256": [
+ "6b71408a7c574d98f00114ab770ac6addc71471770456e482e7b5ec641c02345",
+ "1d5d8d5532bae0f32ba35d10d411e506d61e4378dc9fc338f2b1e6af2aa322ec",
+ "a8fe636bbee30fef06dcd8e806ffc65b2aed0ad08a07fdc62f35717d0f851be5",
+ "6420fa13483576d46263996627ba7add2237a01f46dedd3b7750112c0cc2d95b"
+ ],
+ "layer_256": [
+ "eaa882db6a69cf8ed0104a15b2cdbbb570a23a06ab8c8f65f4c6c21719c6ba25"
+ ],
+ "layer_b3": [
+ "6cd6b3caaea270feb5aff8e9fec205a27da4f48a1e740e63dc9a08f16e70a656"
+ ]
+ }
+ },
+ "info.vit.clip-vit-patch14": {
+ "*": {
+ "repo": "openai/clip-vit-large-patch14",
+ "pkg": {
+ "0": {
+ "transformers": "CLIPTextModel"
+ }
+ },
+ "file_256": [
+ "cb0cba1ead482a850532ebe5ff6b5c8d4456aee32a5228acf0a31e7d9472415e",
+ "39e79c916feca4ddf546d9fe923e664714b59ea61074f7228037d17c302f3d17",
+ "893d67a23f4693ed42cdab4cbad7fe3e727cf59609c40da28a46b5470f9ed082",
+ "778d02eb9e707c3fbaae0b67b79ea0d1399b52e624fb634f2f19375ae7c047c3",
+ "660c6f5b1abae9dc498ac2d21e1347d2abdb0cf6c0c0c8576cd796491d9a6cdd",
+ "71e183d11db0c6b6282a4d9e0abb74125edc8692393e89ed8ee5571005f35cb1",
+ "5c3d6454dd2d23414b56aa1b5858a72487a656937847b6fea8d0606d7a42cdbc",
+ "87c1c0b0894c9e9e10b962e597e8d64dd3a3a2d372c389922b335a53c250b2ae",
+ "bd289dd57fee86bc8816b55919a2b03f9c3c75af6025e21777325a6730872325",
+ "8377b1ca9d88fe06ec483dd7b3cfc62e5e8dbf8ddd252f455e79d659fa0553c5",
+ "5487ea0eee9c9a9bff8abd097908d4deff3ae1fa87b3b67397f8b9538139d447",
+ "92b998a9a64549bfa05c019bde114be6681549a0c79caee903fe30c9444d08b9",
+ "1e090d6a828fd92401be5f83e615fd7b4fb1f4a22e9af9040a38f602e839317c",
+ "11807cb2522cfe99240e5ee2bbeb1ccb42cecca2215102ee872567c7773b28b9",
+ "d008943c017f0092921106440254dbbe00b6a285f7883ec8ba160c3faad88334",
+ "77795e2023adcf39bc29a884661950380bd093cf0750a966d473d1718dc9ef4e",
+ "b70c11ad5d7e9abf6109348908f599ea382f8019e1f36910bbc8ebecde936633",
+ "fc42badf529dd83f2f7c3d20fe6bda1e22036162f37c4c668b9e130884e20561",
+ "e27bafa0b3029ad637ef3ace24ce1efe85b8d0dbd22e03a2e70bda6fc88963a1"
+ ],
+ "layer_256": [
+ "48daa3d8f939972e69f044533a4312a941971c18c78255f5e555fa26faf664c1",
+ "60f5734a74c342be8b0011fc704e718431839790bcfdc7d7004fc39d70f7fec6",
+ "6e76e25b4a55dddfa2eecf4b7ab189a8148658a9f6df165c00170f6ce661033c",
+ "2d5249df489fec9137cc3a5e9bda499dd9b72a957ddd8e7ad4e99ff3684bad99",
+ "3bf085e701713ed3e79775dafea375c3e2a43659ad1ee788b1b393c0aeff9f0e",
+ "efb7976800692772e449c81a739339f59394886590ff3f768b0f9ddd87d2a94c",
+ "9b0ac8d127c6c457b2eb8c7236f18c4e4ba9e8bbf27130aa8fe854d7c3f7b1e0",
+ "24a9ee3d60cdde6c967f08e4b2ec7088fe1bfe308c6896e73caa874860570a5c",
+ "5d6d9d0cc7943eb1b8c16862bfd5bee5c3766d0df027ec837e90fac715ac2bd3",
+ "68fb122f7d6c3cfbef320341b2af8f5916678e36a69ed36fa8cfcb19e7d5c43d",
+ "11807cb2522cfe99240e5ee2bbeb1ccb42cecca2215102ee872567c7773b28b9",
+ "50c46cdddbe9f0162278c69b9a1f818519330e3a91b994272e19b5c789670471",
+ "ffe1c4f55e07c2010ace7b9cf35798bb9f431bc954a32784e5acbdc16acc0364",
+ "146ea48d234e05a934db9d8988e9a9dd86b2ac70f535eaa550ecb0ee23ec135e",
+ "d97560cf9704cf71711f6121df2bf55e55a1eda4b574a6ddba074767420bc8c3"
+ ],
+ "layer_b3": [
+ "f58a22a381f79985b6d38782f6110a52c2f319b40fdedd3b88b24945dfcbdf64",
+ "8faa00b8fd1dbd9286a7237df18caeb8c91af100a6813849b6bae272a01dd7b7",
+ "ab5bebc98299c155251a06deccde599ba0128038ee3ce021e8c59a45f58f72c0",
+ "c70e9d86a9dcbbbe7c269ef9dfac96ce9c96c46922577338cc1902e5fe936315",
+ "f285e9b7b70745df81adc8b558ec74b536b79b6fc02a453ecc61ea9d13f25f1a",
+ "7ab17bfa06ab8d65840997ef641f3f593d096860e20141f1eeb0169d131c1c23",
+ "2737d3f327e8176dbb549b9c5c4994821430a6c3b07e3bbc925d97511c802636",
+ "58a826a4a5fe555b4df188a1ebc0d8d9c96cedae3a26ce84c247861dbb93388f",
+ "1540fd8844898960e18ce8fd153e5f21a8c446bd8c4d6f536a7cf11418f02bf3",
+ "c4c9caccdbec12b965d93688c521893f75e0bf9a5e0aad70a6a962b669e7b9d5",
+ "e43fae8d5fd1e562607da172369cc0c5ec99b834e42502e682287ff7d12baacc",
+ "c6f79f7416a882891957b815fbdfd6edfaa253c43970b1a25ef14e217599c7bc",
+ "daf5e09f67ad09a909f58a01298fec0132324634cb8fca2a604c3a240c2c453f",
+ "3f62bfb6bbde05f01435129326166c44aeb113ac0d9f735f31ed3f7dd04f6980",
+ "22f866f3c96a92bc61e9965cf366d706db942ad047ba8cb82109edcd4e68fa40",
+ "f3fa9d7a8f15741621c1fe82f8a1bcc5c601c900d947ac09fba7016615a252a5"
+ ],
+ "tasks": [
+ "CLIPModel",
+ "CLIPPreTrainedModel",
+ "CLIPTextModel",
+ "CLIPTextModelWithProjection",
+ "CLIPVisionModel",
+ "CLIPVisionModelWithProjection",
+ "CLIPForImageClassification"
+ ]
+ }
+ },
+ "info.vit.clip-vit-g-14-laion-s-b": {
+ "*": {
+ "repo": "laion/CLIP-ViT-g-14-laion2B-s12B-b42K",
+ "pkg": {
+ "0": {
+ "transformers": "CLIPTextModelWithProjection"
+ }
+ },
+ "file_256": [
+ "ca18e0c67c1ef1e64cac22926266765b60688f692307ecc06283d987c5768134",
+ "ec310df2af79c318e24d20511b601a591ca8cd4f1fce1d8dff822a356bcdb1f4",
+ "fa5b2e6f4c2efc2d82e4b8312faec1a5540eabfc6415126c9a05c8436a530ef4",
+ "b84f413eebecbd049b72874c1df533a516510cb5a2489ae58c7e320209cf0ebe",
+ "d3df577f6e3799c8e1bd9b40e30133710e02e8e25d0ce48cdcc790e7dfe12d6d",
+ "943a2924ee888295a156dd47089d67181d633b782337890af11ef4b15af17ec5",
+ "5b98e4a57a9292eeb819d67e2d2100f66f17db723cde4ecea27a7c3741160d0c",
+ "4d6effa7a5e600cabf7528ed7234146a13ead1b2c151211d706b293a060b112a",
+ "3a6032f63d37ae02bbc74ccd6a27440578cd71701f96532229d0154f55a8d3ff",
+ "162042ac6556e73f93d4172d4c67532c1cbe4dc7a6a8fa7e44dd2e3d7cbb772b"
+ ],
+ "layer_256": [
+ "270e998633eb22145100a3889a62ca270d5080654735e5ff8dda09a7c233af8d",
+ "df18800c2a9d9318c4323d991a0fb24a6a9afceb41bea203812f60517c301536",
+ "4c228b104f6b9b383e0808c9baa1998957f5125d8f90a4d98c1a86e71edd72dc",
+ "f7fc81d8b5ae91ec28a5106ecc0d067be9a94fd3f394c4aa4686ed131ce5a5b3",
+ "61ab42bd5c0fcb9fd3db1d4014cb844ccae8dc17fd69a108cf077a573d092946",
+ "6c64e36cdda3bec7067e94b05619f882f5d31070792acaadac60ddbef580453a",
+ "43c9e64995b485a7f128771c48defce128640df28e65c7f79537d472f43ebe46"
+ ],
+ "layer_b3": [
+ "d754db276f2d89d2808abb7086b3b8eccee43ac521c128d21a071f3a631474a8",
+ "2eb93685b34719e1d1e0541d8902b0a592d95848f80657e32816cf3b152a0f31",
+ "e253a5cf3a6242c58037abd6b378bf0281f278e441f28dff7ca1bcfcd3cd6bd8",
+ "16d0eec4e55b0aa63cdca4e4d36f78f66a4b1b9605ce3b1089305026f853c3d2",
+ "f606463295ecf3bae8920d3d45bb9d180793418b3d08c3e84d4c4135c7dc2aa5",
+ "7060993a5eb32d94d1ea8aef7a7301e7be73b199c639c63f8f7cfbfcd2abf10e",
+ "b92af95334c657371af6051a91374a41b5455907fa6622bb66a8c112dc511600"
+ ],
+ "tasks": [
+ "CLIPModel",
+ "CLIPPreTrainedModel",
+ "CLIPTextModel",
+ "CLIPTextModelWithProjection",
+ "CLIPVisionModel",
+ "CLIPVisionModelWithProjection",
+ "CLIPForImageClassification"
+ ]
+ }
+ },
+ "info.vit.clip-vit-h-14-laion-s-b": {
+ "*": {
+ "repo": "laion/CLIP-ViT-H-14-laion2B-s32B-b79K",
+ "pkg": {
+ "0": {
+ "transformers": "CLIPModel"
+ }
+ },
+ "file_256": [
+ "036e6e2bd49697511f4f8b8cb5ee465f93025f7a69a145eadeb9a881ace9b18d",
+ "0084e75319a50ad85ef45377bad5bc38f2f58824459eb690048d51c9f8863be5",
+ "64a7ef761bfccbadbaa3da77366aac4185a6c58fa5de5f589b42a65bcc21f161"
+ ],
+ "layer_256": [
+ "130a94ed12569e099196a6ca27388181922e20148dee5bcb58c5e309acfc2352",
+ "cfdbd3fd2b90b64ba12d395a62dd7c3c3ea3e811f0a54593e91bae6516ca5061",
+ "9125ce5970c649d6f9368c25493d3aaa6b41e224d4cc427e955115f7b7e53d1c"
+ ],
+ "layer_b3": [
+ "227f26ed63120b9034f4a0c90b6b37eede721a8260f2c1e8f7ea3ccc0d109e7e",
+ "3a38ffd1b60499cf2f451f3065079ff26efb9190a86f23ad1c8d993bbeb9af05",
+ "ce06cf1fd684269ee96631b2bf9334c6ecde6a84a55760dfa0d9d2a6411f28e4"
+ ],
+ "tasks": [
+ "CLIPModel",
+ "CLIPPreTrainedModel",
+ "CLIPTextModel",
+ "CLIPTextModelWithProjection",
+ "CLIPVisionModel",
+ "CLIPVisionModelWithProjection",
+ "CLIPForImageClassification"
+ ]
+ }
+ },
+ "info.aet.chatglm3": {
+ "*": {
+ "repo": "zai-org/chatglm3-6b",
+ "pkg": {
+ "0": {
+ "transformers": "AutoModel"
+ }
+ },
+ "file_256": [
+ "0054d03310248928fdabdeef3fdc753170218dc49a1e9eb5f98323e27683f654",
+ "b1052386eac358a18add3d0f92521c85ab338979da8eeb08a6499555b857f80d"
+ ],
+ "layer_256": [
+ "174924fd7a07f370bb6fcd1ad07a73eecb7de901f15eefb80f420c1042c47d44"
+ ],
+ "layer_b3": [
+ "a45dfba6a9fa8739777c76deb845fc9589b40f88670d3ce4661646a7b7b1d481"
+ ]
+ }
+ },
+ "info.art.qwen2": {
+ "bagel-mot": {
+ "repo": "ByteDance-Seed/BAGEL-7B-MoT",
+ "pkg": {
+ "0": {
+ "Bagel": "app"
+ }
+ }
+ }
+ },
+ "info.vae.tae": {
+ "stable-diffusion-3": {
+ "repo": "madebyollin/taesd3",
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderTiny"
+ }
+ },
+ "file_256": [
+ "6f79c1397cb9ce1dac363722dbe70147aee0ccca75e28338f8482fe515891399"
+ ]
+ },
+ "stable-diffusion-xl-1": {
+ "repo": "madebyollin/taesdxl",
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderTiny"
+ }
+ },
+ "file_256": [
+ "ff4824aca94dd6111e0340fa749347fb74101060d9712cb5ef1ca8f1cf17502f"
+ ]
+ },
+ "stable-diffusion-v1-5": {
+ "repo": "madebyollin/taesd",
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderTiny"
+ }
+ },
+ "file_256": [
+ "db169d69145ec4ff064e49d99c95fa05d3eb04ee453de35824a6d0f325513549"
+ ]
+ },
+ "flux1-dev": {
+ "repo": "madebyollin/taef1",
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderTiny"
+ }
+ },
+ "file_256": [
+ "927f7de7f11bbd3b2d5ce402e608d97a7649e0921a9601995b044e8efc81e449"
+ ]
+ }
+ },
+ "info.vae.kl": {
+ "qwen-image": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKLQwenImage"
+ }
+ },
+ "file_256": [
+ "0c8bc8b758c649abef9ea407b95408389a3b2f610d0d10fcb054fe171d0a8344"
+ ],
+ "layer_256": [
+ "42f255440ef1d379a8a731456bc44312a73a8568716caa6100803990cd5ea7dc"
+ ],
+ "layer_b3": [
+ "64af8fb08d2054c81ad2aef94965be8fb1366fcc6136cb9222ae046550af014b"
+ ]
+ },
+ "ltx-video": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKLLTXVideo"
+ }
+ },
+ "file_256": [],
+ "layer_256": [],
+ "layer_b3": []
+ },
+ "allegro": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKLAllegro"
+ }
+ },
+ "file_256": [
+ "47871a698b18f92f15019d361a81cbc8af4676f8eef9a47fd2b95354a39f831a"
+ ],
+ "layer_256": [
+ "bfd496586118165a13243997101fc7cdd4f855b2d8a73ee2b771a4484c4c2f9f"
+ ],
+ "layer_b3": [
+ "93654cbab7541504d2377c66e72943c7fd9947fca2eb1be01bcc8877c322c1e0"
+ ]
+ },
+ "cosmos-1-diffusion-video2world": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKLCosmos"
+ }
+ },
+ "file_256": [],
+ "layer_256": [],
+ "layer_b3": []
+ },
+ "easyanimatev5-zh": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKLMagvit"
+ }
+ },
+ "file_256": [],
+ "layer_256": [],
+ "layer_b3": []
+ },
+ "hunyuanvideo-i2v": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKLHunyuanVideo"
+ }
+ },
+ "file_256": [
+ "95d1fc707c1421ccd88ea542838ab4c5d45a5babb48205bac9ce0985525f9818",
+ "7c68a6295f9034a88225fbafb1f3258291a08d57a1fdb938233fa57b1b8f4883",
+ "fbe5ea338431bc8ba20f7019b474e83379fe5763abfd562adcc04b1c0d35c728",
+ "019973c147e0c3462629d8d06bdbdbb83408f3ebd4ea4b4ae21a99c3cdcb54c0"
+ ]
+ },
+ "mochi-1": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKLMochi"
+ }
+ },
+ "file_256": [],
+ "layer_256": [],
+ "layer_b3": []
+ },
+ "audioldm-s-v2": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKL"
+ }
+ },
+ "file_256": [
+ "42f64f7565b23eabde68c9694e39f18b8bba5f7a14f477e7ed4b51e0ea7de8a5"
+ ],
+ "layer_256": [
+ "54d075953d5253a3abac651de070736c1d5510b857a8ab24c624304f428146b6"
+ ],
+ "layer_b3": [
+ "00959677dae940b9cfdbe5380c8cbb5a6b4951864cd26f8211d74a3d22b4f3de"
+ ]
+ },
+ "stable-video-diffusion-img2vid-xt": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKLTemporalDecoder"
+ }
+ }
+ },
+ "stable-diffusion-xl-1": {
+ "repo": "madebyollin/sdxl-vae-fp16-fix",
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKL"
+ }
+ },
+ "file_256": [
+ "235745af8d86bf4a4c1b5b4f529868b37019a10f7c0b2e79ad0abca3a22bc6e1",
+ "1b909373b28f2137098b0fd9dbc6f97f8410854f31f84ddc9fa04b077b0ace2c",
+ "78f6189c8492013e3cac81637a1f657f790a237387f8a9dfd6bfa5fee28eb646",
+ "6353737672c94b96174cb590f711eac6edf2fcce5b6e91aa9d73c5adc589ee48",
+ "bcb60880a46b63dea58e9bc591abe15f8350bde47b405f9c38f4be70c6161e68",
+ "1598f3d24932bcfe6634e8b618ea1e30ab1d57f5aad13a6d2de446d2199f2341",
+ "703abdcd7c389316b5128faa9b750a530ea1680b453170b27afebac5e4db30c4",
+ "98a14dc6fe8d71c83576f135a87c61a16561c9c080abba418d2cc976ee034f88"
+ ],
+ "layer_256": [
+ "c9399a4cd39a180a0bb2af96a8297b9330541e090c21e83317cebb2f7cc651da",
+ "2240ae134a3b983abf45200c198f07e3d8068012fbbd2f658bbaa1fd6a0629c0"
+ ],
+ "layer_b3": [
+ "bd5b356b509814025a9cf692710b87116d4fcd0e30a8232ed1db133e908d0e74",
+ "9106380403dee83238af63ff1738396d2fdff9f6d78d0d9c1d0bf770ae4294d0"
+ ]
+ },
+ "stable-diffusion-xl-1*": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKL"
+ }
+ },
+ "file_256": [
+ "235745af8d86bf4a4c1b5b4f529868b37019a10f7c0b2e79ad0abca3a22bc6e1",
+ "27ed3b02e09638568e99d4398c67bc654dde04e6c0db61fb2d21dba630e7058a",
+ "eb6516ab7e1104d5d1a174a4d65c57835ae38061531d0a2192103aecfb790cc1",
+ "e6bb9ea85bbf7bf6478a7c6d18b71246f22e95d41bcdd80ed40aa212c33cfeff"
+ ],
+ "layer_256": [
+ "c9399a4cd39a180a0bb2af96a8297b9330541e090c21e83317cebb2f7cc651da",
+ "2240ae134a3b983abf45200c198f07e3d8068012fbbd2f658bbaa1fd6a0629c0"
+ ],
+ "layer_b3": [
+ "bd5b356b509814025a9cf692710b87116d4fcd0e30a8232ed1db133e908d0e74"
+ ]
+ },
+ "shuttle-jaguar": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKL"
+ }
+ },
+ "file_256": [
+ "6fdfa2add4f04d94f36157cbb0197f97966b612e3f8eff4095315aefea74b904"
+ ],
+ "layer_256": [
+ "9b28f36873ea283905094a64e1ccb7cfc2b0f0aa166201d0ca63807ac37caa7b"
+ ],
+ "layer_b3": [
+ "0ebf9b7010accc44e219e355dd24bf1e3128004093c0c1dfc06f88c0a39fdbdd",
+ "d0e7ef3c4af06fa08b4c0485a073e2df55f7b1e9e3ba8f7b261688bc562568f0"
+ ]
+ },
+ "flux1-dev": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKL"
+ }
+ },
+ "file_256": [
+ "afc8e28272cd15db3919bacdb6918ce9c1ed22e96cb12c4d5ed0fba823529e38",
+ "f5b59a26851551b67ae1fe58d32e76486e1e812def4696a4bea97f16604d40a3",
+ "8c717328c8ad41faab2ccfd52ae17332505c6833cf176aad56e7b58f2c4d4c94",
+ "8f53304a79335b55e13ec50f63e5157fee4deb2f30d5fae0654e2b2653c109dc"
+ ],
+ "layer_256": [
+ "7950e4f3897c75affaa5f9f3c51c88b4d9a27bfd9b05ad41c3f71d8c1c620b89",
+ "79d2bfe93a2ac037cdc59ccb5576e32d00d75d4741fba49fc7e82b9724928216",
+ "8f084dc91fd5b481875bc9c86a4ef05e5f176896b7d31c6a5c2ce45c2e174004",
+ "322e01bd511e20bc2a3c27cd611f81ed85f0046b7c023b5622c2c9a5b8b34f80"
+ ],
+ "layer_b3": [
+ "b6db93ed78c4a10d69e80831c1b8fbc1447f04e9b3d494889ee2056b98d41f17",
+ "a8a3ebdec4d7b38d65b7169d3604c19b587330e5e66f69ebf0ded56a24ec6903"
+ ]
+ },
+ "musicldm": {
+ "file_256": [
+ "16e0c6c7c34e459c19500cc15cf538e6331db14969ea15917caa9b0966e44fd4"
+ ],
+ "layer_256": [
+ "1610c0ce39d1379091eb9ab2a4d14a8567e0f1a5dc6cca40fc0fa6f8e4e97c0f"
+ ],
+ "layer_b3": [
+ "c5c32b3fb3e73799838836ccce27d883254254daecd10f86ba8ddc55214014e0"
+ ]
+ },
+ "stable-diffusion-v1-5": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKL"
+ }
+ },
+ "file_256": [
+ "0b204ad0cae549e0a7e298d803d57e36363760dec71c63109c1da3e1147ec520",
+ "95f26a5ab04779d5467d1fcecaf93160ffa523afe399b835b3e1bb77ff2d937a",
+ "32db726da04f06c1b6b14c0043ce115cc87a501482945c5add89a40d838fcb46",
+ "c6a580b13a5bc05a5e16e4dbb80608ff2ec251a162311590c1f34c013d7f3dab",
+ "735e4c3a447a3255760d7f86845f09f937809baa529c17370d83e4c3758f3c75",
+ "a1d993488569e928462932c8c38a0760b874d166399b14414135bd9c42df5815",
+ "a2b5134f4dbc140d9c11f11cba3233099e00af40f262f136c691fb7d38d2194c",
+ "4fbcf0ebe55a0984f5a5e00d8c4521d52359af7229bb4d81890039d2aa16dd7c"
+ ],
+ "layer_256": [
+ "e43f3a227b5ecb43a6272fa92ed6011d2e9abcadadd1032dfa7ea7f875f9d5bd",
+ "2494154245becf98891be884f943276aa3f54e9b3f0ea1042903fc15fba488f3"
+ ],
+ "layer_b3": [
+ "82e2dc440a23d78bb91df8c9fce069a8512da51f8f54ea29e3431f545808171e",
+ "2230487833925a104bee96e7ecfebaa4c3c43cc426c7a5b863f2584313dd4833"
+ ]
+ }
+ },
+ "info.vae.wan": {
+ "wan2-i2v-480p": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKLWan",
+ "precision": "ops.precision.float.F32"
+ }
+ },
+ "file_256": [
+ "d6e524b3fffede1787a74e81b30976dce5400c4439ba64222168e607ed19e793",
+ "2fc39d31359a4b0a64f55876d8ff7fa8d780956ae2cb13463b0223e15148976b"
+ ],
+ "layer_256": [
+ "121b3974b39263dcca9d644d1b5c9b9251a911b6a8a8e307fcb21ca778e78ed2",
+ "364be43a8959012d798d3f98e17d8b5c4b99ba1e70077008dd19acca3ced395e"
+ ],
+ "layer_b3": [
+ "f867543d636029ebfc05b8075e572be0b313a83b0470e56bcf4bbad07a6db010",
+ "6b5b229727a2d4e37993687c62c94ff8519a371ab4103c699ff1f5969ca0b433"
+ ]
+ },
+ "skyreels-v2-t2v-720p": {
+ "file_256": [],
+ "layer_256": [],
+ "layer_b3": []
+ },
+ "skyreels-v2-i2v-720p": {
+ "file_256": [],
+ "layer_256": [],
+ "layer_b3": []
+ }
+ },
+ "info.vae.cogvideox": {
+ "cogvideox-i2v": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKLCogVideoX"
+ }
+ },
+ "file_256": [
+ "a410e48d988c8224cef392b68db0654485cfd41f345f4a3a81d3e6b765bb995e"
+ ],
+ "layer_256": [
+ "43c7e9cb4364e55fd563817f01484ede8a09ff19a8e69eb61a32a12f93d6f66e"
+ ],
+ "layer_b3": [
+ "246addb8dc798240638bffee4546a3c5c83572139b4a2a602d68b4c4146226eb"
+ ]
+ },
+ "cogvideox-fun-v-pose": {
+ "file_256": [],
+ "layer_256": [],
+ "layer_b3": []
+ },
+ "consisid": {
+ "file_256": [],
+ "layer_256": [],
+ "layer_b3": []
+ }
+ },
+ "info.vae.dc": {
+ "sana-1024px-bf16": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderDC"
+ }
+ },
+ "file_256": [
+ "15a4b09e56d95b768a0ec9da50b702e21d920333fc9b3480d66bb5c7fad9d87f"
+ ],
+ "layer_256": [
+ "abfc39d1a6d71f03dde7bc40fec4a90478a97d17ae1688be9aad00e0512b9bde"
+ ],
+ "layer_b3": [
+ "cf4ecc6697d18b0663e4eac58203f1dd6d9fb689cf99adfeadbc0019de0c73d0"
+ ]
+ }
+ },
+ "info.vae.oobleck": {
+ "stable-audio-open-1": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderOobleck"
+ }
+ }
+ }
+ },
+ "info.vae.eq": {
+ "stable-diffusion-xl-1": {
+ "repo": "KBlueLeaf/EQ-SDXL-VAE",
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKL"
+ }
+ }
+ }
+ },
+ "info.vae.ms-lc-eq": {
+ "stable-diffusion-xl-1": {
+ "repo": "Anzhc/MS-LC-EQ-D-VR_VAE",
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKL"
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/mir/automata.py b/mir/automata.py
index 595125c..227f4ab 100644
--- a/mir/automata.py
+++ b/mir/automata.py
@@ -18,17 +18,17 @@
from mir.config.conversion import slice_number
from mir.indexers import diffusers_index, transformers_index
from mir.maid import MIRDatabase
-from mir.spec.mir import mir_entry
-from mir.tag import make_mir_tag, make_scheduler_tag, tag_base_model, tag_pipe
+from mir.spec import mir_entry
+from mir.tag import tag_model_from_repo, tag_scheduler, tag_base_model, tag_pipe
-sd1_series, sd1_comp = make_mir_tag("stable-diffusion-v1-5/stable-diffusion-v1-5")
-sdxl_series, sdxl_comp = make_mir_tag("stabilityai/stable-diffusion-xl-base-1.0")
-dev_series, dev_comp = make_mir_tag("black-forest-labs/FLUX.1-dev")
-schnell_series, schnell_comp = make_mir_tag("black-forest-labs/FLUX.1-schnell")
-ssd_series, ssd_comp = make_mir_tag("segmind/SSD-1B")
-vega_series, vega_comp = make_mir_tag("segmind/Segmind-Vega")
-sd3_series, sd3_comp = make_mir_tag("stable-diffusion-3.5-medium") #
+sd1_series, sd1_comp = tag_model_from_repo("stable-diffusion-v1-5/stable-diffusion-v1-5")
+sdxl_series, sdxl_comp = tag_model_from_repo("stabilityai/stable-diffusion-xl-base-1.0")
+dev_series, dev_comp = tag_model_from_repo("black-forest-labs/FLUX.1-dev")
+schnell_series, schnell_comp = tag_model_from_repo("black-forest-labs/FLUX.1-schnell")
+ssd_series, ssd_comp = tag_model_from_repo("segmind/SSD-1B")
+vega_series, vega_comp = tag_model_from_repo("segmind/Segmind-Vega")
+sd3_series, sd3_comp = tag_model_from_repo("stable-diffusion-3.5-medium") #
# def gen_attention_processors(mir_db: MIRDatabase): # upstream not quite ready for this yet
# from diffusers.models.attention_processor import AttentionProcessor
@@ -214,7 +214,7 @@ def add_mir_schedulers(mir_db: MIRDatabase):
for class_name in _import_structure["schedulers"]:
if class_name != "SchedulerMixin":
- series_name, comp_name = make_scheduler_tag(class_name)
+ series_name, comp_name = tag_scheduler(class_name)
class_obj = import_module("diffusers.schedulers")
class_path = getattr(class_obj, class_name).__module__
mir_db.add(
@@ -233,7 +233,7 @@ def add_mir_schedulers(mir_db: MIRDatabase):
)
class_name = "KarrasDiffusionSchedulers"
- series_name, comp_name = make_scheduler_tag(class_name)
+ series_name, comp_name = tag_scheduler(class_name)
class_obj = import_module("diffusers.schedulers.scheduling_utils")
class_path = getattr(class_obj, class_name).__module__
mir_db.add(
@@ -528,35 +528,6 @@ def mir_update(mir_db: MIRDatabase, task_list: list = None, pipe_list: list = No
],
},
),
- (
- "Kwai-Kolors/Kolors-diffusers",
- "KolorsPipeline",
- {
- "pkg": {
- 0: {
- "precision": "ops.precision.float.F16",
- "generation": {
- "negative_prompt": "",
- "guidance_scale": 5.0,
- "num_inference_steps": 50,
- "width": 1024,
- "height": 1024,
- },
- },
- 1: {"diffusers": "DiffusionPipeline"},
- },
- "file_256": [
- "425ff1dcbe3a70ac13d3afdd69bd4e3176b0c3260722527c80b210f11d2d966c", # fp16,
- ],
- "layer_b3": [
- "6eb15506fa38b4cbb26391ab1b6c9ead05f86c711e46583bfbe8fc4421571414", # fp16
- ],
- "layer_256": [
- "04e3c17170b8a200481f6941b370fdc5056a00fe5a16956de01790f8a93c0dcd", # fp16
- ],
- "identifiers": [".DenseReluDense.wi.weight", "encoder_hid_proj.weight"],
- },
- ),
(
"stabilityai/stable-cascade-prior",
"StableCascadePriorPipeline",
@@ -981,20 +952,6 @@ def mir_update(mir_db: MIRDatabase, task_list: list = None, pipe_list: list = No
"layer_256": ["ab109d01b43788063802f00c6ecab024c830ea58d668f5c2df9e3ae5b87d86cb"],
},
),
- (
- "tencent-hunyuan/hunyuandiT-v1.2-diffusers",
- "HunyuanDiTPipeline",
- {
- "pkg": {
- 0: {
- "precision": "ops.precision.float.F16",
- }
- },
- "file_256": ["7d31ac8fa389ff39dd0a81430010e52c43b59f15adc00c83625a47881e16830e"],
- "layer_b3": ["bccd37ecc9f85d132b46d0bb67b4facb49fc6c091428a4feba9ab9a93140f5fe"],
- "layer_256": ["ed25d241d58ca298d28abd5919e70341ad194e77dce4859436b52ea4d8fcb616"],
- },
- ),
(
"Alpha-VLLM/Lumina-Image-2.0",
"Lumina2Pipeline",
@@ -1098,6 +1055,49 @@ def mir_update(mir_db: MIRDatabase, task_list: list = None, pipe_list: list = No
}
},
),
+ (
+ "Kwai-Kolors/Kolors-diffusers",
+ "KolorsPipeline",
+ {
+ "pkg": {
+ 0: {
+ "precision": "ops.precision.float.F16",
+ "generation": {
+ "negative_prompt": "",
+ "guidance_scale": 5.0,
+ "num_inference_steps": 50,
+ "width": 1024,
+ "height": 1024,
+ },
+ },
+ 1: {"diffusers": "DiffusionPipeline"},
+ },
+ "file_256": [
+ "425ff1dcbe3a70ac13d3afdd69bd4e3176b0c3260722527c80b210f11d2d966c", # fp16,
+ ],
+ "layer_b3": [
+ "6eb15506fa38b4cbb26391ab1b6c9ead05f86c711e46583bfbe8fc4421571414", # fp16
+ ],
+ "layer_256": [
+ "04e3c17170b8a200481f6941b370fdc5056a00fe5a16956de01790f8a93c0dcd", # fp16
+ ],
+ "identifiers": [".DenseReluDense.wi.weight", "encoder_hid_proj.weight"],
+ },
+ ),
+ (
+ "tencent-hunyuan/hunyuandiT-v1.2-diffusers",
+ "HunyuanDiTPipeline",
+ {
+ "pkg": {
+ 0: {
+ "precision": "ops.precision.float.F16",
+ }
+ },
+ "file_256": ["7d31ac8fa389ff39dd0a81430010e52c43b59f15adc00c83625a47881e16830e"],
+ "layer_b3": ["bccd37ecc9f85d132b46d0bb67b4facb49fc6c091428a4feba9ab9a93140f5fe"],
+ "layer_256": ["ed25d241d58ca298d28abd5919e70341ad194e77dce4859436b52ea4d8fcb616"],
+ },
+ ),
]
transformers_addons = [
@@ -1458,7 +1458,7 @@ def add_mir_diffusion(mir_db: MIRDatabase):
"""Create MIR entries missing from the database"""
repo = "microsoft/speecht5_hifigan"
- series, comp = make_mir_tag(repo)
+ series, comp = tag_model_from_repo(repo)
mir_db.add(
mir_entry(
domain="info",
@@ -1476,14 +1476,14 @@ def add_mir_diffusion(mir_db: MIRDatabase):
],
)
)
- series, comp = make_mir_tag("lodestones/Chroma")
+ series, comp = tag_model_from_repo("lodestones/Chroma")
repo = "lodestones/Chroma1-HD"
mir_db.add(
mir_entry(
domain="info",
arch="dit",
series=series,
- comp=make_mir_tag(repo)[0],
+ comp=tag_model_from_repo(repo)[0],
repo=repo,
pkg={
"0": {
@@ -1514,7 +1514,7 @@ def add_mir_diffusion(mir_db: MIRDatabase):
domain="info",
arch="dit",
series=series,
- comp=make_mir_tag(repo)[0],
+ comp=tag_model_from_repo(repo)[0],
repo=repo,
pkg={
"0": {
@@ -1572,7 +1572,7 @@ def add_mir_diffusion(mir_db: MIRDatabase):
domain="info",
arch="unet",
series=sdxl_series,
- comp=make_mir_tag(repo)[0],
+ comp=tag_model_from_repo(repo)[0],
repo=repo,
file_256=[
"8ece83aa1bed1fb39a2b81f1660f0ce6889218e493c1f2ed55e9f15f59a7e03f", # v4
@@ -1600,7 +1600,7 @@ def add_mir_diffusion(mir_db: MIRDatabase):
domain="info",
arch="unet",
series=sdxl_series,
- comp=make_mir_tag(repo)[0],
+ comp=tag_model_from_repo(repo)[0],
repo=repo,
file_256=[
"c2a1a3eaa13d4c107dc7e00c3fe830cab427aa026362740ea094745b3422a331", # v2
@@ -1631,7 +1631,7 @@ def add_mir_diffusion(mir_db: MIRDatabase):
domain="info",
arch="unet",
series=sdxl_series,
- comp=make_mir_tag(repo)[0],
+ comp=tag_model_from_repo(repo)[0],
repo=repo,
file_256=[
"11b6d7bce65674659cc6b7ea960658436edfd80e566cb240ebd4bfbc3e2076c8", # 2.5 diffusers
@@ -1677,7 +1677,7 @@ def add_mir_diffusion(mir_db: MIRDatabase):
domain="info",
arch="unet",
series=sdxl_series,
- comp=make_mir_tag(repo)[0],
+ comp=tag_model_from_repo(repo)[0],
repo=repo,
file_256=[
"94762e983e5942056be73c5c1d4464b8ffa1ada500b4fef1267550e2447953ce", # modelspec sai
@@ -1703,7 +1703,7 @@ def add_mir_diffusion(mir_db: MIRDatabase):
domain="info",
arch="unet",
series=sdxl_series,
- comp=make_mir_tag(repo)[0],
+ comp=tag_model_from_repo(repo)[0],
repo=repo,
file_256=[
"7cb406ec0662e91570a79f3c4fb8f0ea5325bffe6af5d9382edae838698f72bd", # modelspec sai
@@ -1734,7 +1734,7 @@ def add_mir_diffusion(mir_db: MIRDatabase):
domain="info",
arch="dit",
series=schnell_series,
- comp=make_mir_tag(repo)[0],
+ comp=tag_model_from_repo(repo)[0],
repo=repo,
pkg={
2: {
@@ -1762,7 +1762,7 @@ def add_mir_diffusion(mir_db: MIRDatabase):
domain="info",
arch="dit",
series=schnell_series,
- comp=make_mir_tag(repo)[0],
+ comp=tag_model_from_repo(repo)[0],
repo=repo,
pkg={
2: {
@@ -1794,7 +1794,7 @@ def add_mir_diffusion(mir_db: MIRDatabase):
domain="info",
arch="dit",
series=dev_series,
- comp=make_mir_tag(repo)[0],
+ comp=tag_model_from_repo(repo)[0],
repo=repo,
pkg={0: {"generation": {"num_inference_steps": 16, "guidance_scale": 7.5, "width": 768, "height": 1024}}},
file_256=[
@@ -1815,7 +1815,7 @@ def add_mir_diffusion(mir_db: MIRDatabase):
domain="info",
arch="dit",
series=schnell_series,
- comp=make_mir_tag(repo)[0],
+ comp=tag_model_from_repo(repo)[0],
repo=repo,
pkg={
2: {
@@ -1842,7 +1842,7 @@ def add_mir_diffusion(mir_db: MIRDatabase):
domain="info",
arch="dit",
series=dev_series,
- comp=make_mir_tag(repo)[0],
+ comp=tag_model_from_repo(repo)[0],
repo=repo,
pkg={0: {"generation": {"num_inference_steps": 28}}},
file_256=[
@@ -1863,7 +1863,7 @@ def add_mir_diffusion(mir_db: MIRDatabase):
domain="info",
arch="dit",
series=dev_series,
- comp=make_mir_tag(repo)[0],
+ comp=tag_model_from_repo(repo)[0],
repo=repo,
pkg={0: {"f_lite": "FLitePipeline", "generation": {"num_inference_steps": 28}}},
)
@@ -1874,7 +1874,7 @@ def add_mir_diffusion(mir_db: MIRDatabase):
domain="info",
arch="dit",
series=dev_series,
- comp=make_mir_tag(repo)[0],
+ comp=tag_model_from_repo(repo)[0],
repo=repo,
pkg={0: {"f_lite": "FLitePipeline", "generation": {"num_inference_steps": 28}}},
)
@@ -1885,7 +1885,7 @@ def add_mir_diffusion(mir_db: MIRDatabase):
domain="info",
arch="dit",
series=dev_series,
- comp=make_mir_tag(repo)[0],
+ comp=tag_model_from_repo(repo)[0],
repo=repo,
pkg={0: {"f_lite": "FLitePipeline", "generation": {"num_inference_steps": 28}}},
)
@@ -1896,7 +1896,7 @@ def add_mir_diffusion(mir_db: MIRDatabase):
domain="info",
arch="dit",
series=dev_series,
- comp=make_mir_tag(repo)[0],
+ comp=tag_model_from_repo(repo)[0],
repo=repo,
file_256=["4236455adeaeb4ed444d63b253ec99805022d17e962ed7261ada9c72ce11cfee"],
layer_b3=["c1a6f83585398fe452d20596a79a522e2986f4c2c01a40e7bfd787af113735d3"],
@@ -1909,7 +1909,7 @@ def add_mir_diffusion(mir_db: MIRDatabase):
domain="info",
arch="dit",
series=dev_series,
- comp=make_mir_tag(repo)[0],
+ comp=tag_model_from_repo(repo)[0],
repo=repo,
file_256=[
"0407108e446a4f57efffc5e7518bc374876af970d3c6068dc4074de0d221c615", # modelspec sai
@@ -1929,7 +1929,7 @@ def add_mir_diffusion(mir_db: MIRDatabase):
domain="info",
arch="dit",
series=dev_series,
- comp=make_mir_tag(repo)[0],
+ comp=tag_model_from_repo(repo)[0],
repo=repo,
file_256=[
"5d6dce30a266ccbf530c3a3bf253cd5486720a8fb71cdeed556c28304201dc2f", # modelspec sai
@@ -1949,7 +1949,7 @@ def add_mir_diffusion(mir_db: MIRDatabase):
domain="info",
arch="dit",
series=sd3_series,
- comp=make_mir_tag(repo)[0],
+ comp=tag_model_from_repo(repo)[0],
repo=repo,
pkg={
0: {
@@ -1978,7 +1978,7 @@ def add_mir_diffusion(mir_db: MIRDatabase):
),
)
repo = "Wan-AI/Wan2.1-FLF2V-14B-720P-Diffusers"
- series, comp = make_mir_tag(repo)
+ series, comp = tag_model_from_repo(repo)
mir_db.add(
mir_entry(
domain="info",
@@ -2001,8 +2001,8 @@ def add_mir_diffusion(mir_db: MIRDatabase):
mir_entry(
domain="info",
arch="dit",
- series=make_mir_tag("Alpha-VLLM/Lumina-Image-2.0")[0],
- comp=make_mir_tag(repo)[0],
+ series=tag_model_from_repo("Alpha-VLLM/Lumina-Image-2.0")[0],
+ comp=tag_model_from_repo(repo)[0],
repo=repo,
file_256=[
"dc6cffcfb0ccfca6332ddb5d2fe25bcb5f496f44b481627f48c42626156fa6a8", # 2b 22100 ema unified fp32
@@ -2053,11 +2053,11 @@ def add_mir_diffusion(mir_db: MIRDatabase):
def add_mir_llm(mir_db: MIRDatabase):
base_arch, base_series, base_comp = tag_base_model(repo_path="facebook/chameleon-7b", class_name="ChameleonModel")
repo = "Alpha-VLLM/Lumina-mGPT-7B-1024"
- series, comp = make_mir_tag(repo)
+ series, comp = tag_model_from_repo(repo)
mir_db.add(
mir_entry(
domain="info",
- arch=base_arch,
+ arch="art",
series=base_series,
comp=series,
repo=repo,
@@ -2080,7 +2080,7 @@ def add_mir_llm(mir_db: MIRDatabase):
),
)
repo = "openai/clip-vit-large-patch14"
- series, comp = make_mir_tag(repo)
+ series, comp = tag_model_from_repo(repo)
mir_db.add(
mir_entry(
domain="info",
@@ -2149,7 +2149,7 @@ def add_mir_llm(mir_db: MIRDatabase):
)
)
repo = "laion/CLIP-ViT-g-14-laion2B-s12B-b42K"
- series, comp = make_mir_tag(repo)
+ series, comp = tag_model_from_repo(repo)
mir_db.add(
mir_entry(
domain="info",
@@ -2192,7 +2192,7 @@ def add_mir_llm(mir_db: MIRDatabase):
)
)
repo = "laion/CLIP-ViT-H-14-laion2B-s32B-b79K"
- series, comp = make_mir_tag(repo)
+ series, comp = tag_model_from_repo(repo)
mir_db.add(
mir_entry(
domain="info",
@@ -2221,7 +2221,7 @@ def add_mir_llm(mir_db: MIRDatabase):
)
)
repo = "zai-org/chatglm3-6b" # formerly THUDM
- series, comp = make_mir_tag(repo)
+ series, comp = tag_model_from_repo(repo)
mir_db.add(
mir_entry(
domain="info",
@@ -2246,11 +2246,11 @@ def add_mir_llm(mir_db: MIRDatabase):
)
base_arch, base_series, base_comp = tag_base_model(repo_path="Qwen/Qwen2-7B-beta", class_name="Qwen2Model")
repo = "ByteDance-Seed/BAGEL-7B-MoT"
- series, comp = make_mir_tag(repo)
+ series, comp = tag_model_from_repo(repo)
mir_db.add(
mir_entry(
domain="info",
- arch=base_arch,
+ arch="art",
series=base_series,
comp=series,
repo=repo,
@@ -2262,7 +2262,7 @@ def add_mir_llm(mir_db: MIRDatabase):
def add_mir_audio(mir_db: MIRDatabase):
"""Create MIR audio modality entries"""
repo = "facebook/audiogen-medium"
- series, comp = make_mir_tag(repo)
+ series, comp = tag_model_from_repo(repo)
mir_db.add(
mir_entry(
domain="info",
@@ -2283,7 +2283,7 @@ def add_mir_audio(mir_db: MIRDatabase):
)
)
repo = "parler-tts/parler-tts-tiny-v1"
- series, comp = make_mir_tag(repo)
+ series, comp = tag_model_from_repo(repo)
mir_db.add(
mir_entry(
domain="info",
@@ -2300,7 +2300,7 @@ def add_mir_audio(mir_db: MIRDatabase):
)
)
repo = "Zuellni/snac-24khz-ST"
- series, comp = make_mir_tag(repo)
+ series, comp = tag_model_from_repo(repo)
(
mir_db.add(
mir_entry(
@@ -2324,7 +2324,7 @@ def add_mir_audio(mir_db: MIRDatabase):
),
)
repo = "parler-tts/parler-tts-large-v1"
- series, comp = make_mir_tag(repo)
+ series, comp = tag_model_from_repo(repo)
mir_db.add(
mir_entry(
domain="info",
@@ -2341,7 +2341,7 @@ def add_mir_audio(mir_db: MIRDatabase):
)
)
repo = "hexgrad/Kokoro-82M"
- series, comp = make_mir_tag(repo)
+ series, comp = tag_model_from_repo(repo)
mir_db.add(
mir_entry(
domain="info",
@@ -2373,7 +2373,7 @@ def add_mir_audio(mir_db: MIRDatabase):
)
)
repo = "freddyaboulton/silero-vad"
- series, comp = make_mir_tag(repo)
+ series, comp = tag_model_from_repo(repo)
mir_db.add(
mir_entry(
domain="info",
@@ -2405,7 +2405,7 @@ def add_mir_audio(mir_db: MIRDatabase):
),
)
repo = "facebook/wav2vec2-conformer-rope-large-960h-ft"
- series, comp = make_mir_tag(repo)
+ series, comp = tag_model_from_repo(repo)
mir_db.add(
mir_entry(
domain="info",
@@ -2424,7 +2424,7 @@ def add_mir_audio(mir_db: MIRDatabase):
),
)
repo = "canopylabs/orpheus-3b-0.1-ft"
- series, comp = make_mir_tag(repo)
+ series, comp = tag_model_from_repo(repo)
mir_db.add(
mir_entry(
domain="info",
@@ -2445,7 +2445,7 @@ def add_mir_audio(mir_db: MIRDatabase):
)
)
repo = "OuteAI/OuteTTS-0.3-1B"
- series, comp = make_mir_tag(repo)
+ series, comp = tag_model_from_repo(repo)
mir_db.add(
mir_entry(
domain="info",
@@ -2923,7 +2923,7 @@ def add_mir_vae(mir_db: MIRDatabase):
file_256=["927f7de7f11bbd3b2d5ce402e608d97a7649e0921a9601995b044e8efc81e449"],
)
)
- series, comp = make_mir_tag("Qwen/Qwen-Image")
+ series, comp = tag_model_from_repo("Qwen/Qwen-Image")
mir_db.add(
mir_entry(
domain="info",
@@ -2945,9 +2945,9 @@ def add_mir_vae(mir_db: MIRDatabase):
],
)
)
- series, comp = make_mir_tag("Wan-AI/Wan2.1-I2V-14B-480P-Diffusers")
- sr_series_text2v, _ = make_mir_tag("Skywork/SkyReels-V2-T2V-14B-720P-Diffusers")
- sr_series_image2v, _ = make_mir_tag("Skywork/SkyReels-V2-I2V-14B-720P-Diffusers")
+ series, comp = tag_model_from_repo("Wan-AI/Wan2.1-I2V-14B-480P-Diffusers")
+ sr_series_text2v, _ = tag_model_from_repo("Skywork/SkyReels-V2-T2V-14B-720P-Diffusers")
+ sr_series_image2v, _ = tag_model_from_repo("Skywork/SkyReels-V2-I2V-14B-720P-Diffusers")
mir_db.add(
mir_entry(
domain="info",
@@ -2999,7 +2999,7 @@ def add_mir_vae(mir_db: MIRDatabase):
layer_256=[],
)
)
- series, comp = make_mir_tag("Lightricks/LTX-Video")
+ series, comp = tag_model_from_repo("Lightricks/LTX-Video")
mir_db.add(
mir_entry(
domain="info",
@@ -3015,7 +3015,7 @@ def add_mir_vae(mir_db: MIRDatabase):
layer_256=[],
)
)
- series, comp = make_mir_tag("rhymes-ai/Allegro")
+ series, comp = tag_model_from_repo("rhymes-ai/Allegro")
mir_db.add(
mir_entry(
domain="info",
@@ -3031,9 +3031,9 @@ def add_mir_vae(mir_db: MIRDatabase):
layer_256=[],
)
)
- series, comp = make_mir_tag("zai-org/CogVideoX-5b-I2V")
- series_fun, _ = make_mir_tag("alibaba-pai/CogVideoX-Fun-V1.1-5b-Pose")
- series_wish, _ = make_mir_tag("BestWishYsh/ConsisID-preview")
+ series, comp = tag_model_from_repo("zai-org/CogVideoX-5b-I2V")
+ series_fun, _ = tag_model_from_repo("alibaba-pai/CogVideoX-Fun-V1.1-5b-Pose")
+ series_wish, _ = tag_model_from_repo("BestWishYsh/ConsisID-preview")
mir_db.add(
mir_entry(
domain="info",
@@ -3073,7 +3073,7 @@ def add_mir_vae(mir_db: MIRDatabase):
layer_256=[],
)
)
- series, comp = make_mir_tag("nvidia/Cosmos-1.0-Diffusion-7B-Video2World")
+ series, comp = tag_model_from_repo("nvidia/Cosmos-1.0-Diffusion-7B-Video2World")
mir_db.add(
mir_entry(
domain="info",
@@ -3089,7 +3089,7 @@ def add_mir_vae(mir_db: MIRDatabase):
layer_256=[],
)
)
- series, comp = make_mir_tag("alibaba-pai/EasyAnimateV5.1-7b-zh-diffusers")
+ series, comp = tag_model_from_repo("alibaba-pai/EasyAnimateV5.1-7b-zh-diffusers")
mir_db.add(
mir_entry(
domain="info",
@@ -3105,7 +3105,7 @@ def add_mir_vae(mir_db: MIRDatabase):
layer_256=[],
)
)
- series, comp = make_mir_tag("hunyuanvideo-community/HunyuanVideo-I2V")
+ series, comp = tag_model_from_repo("hunyuanvideo-community/HunyuanVideo-I2V")
mir_db.add(
mir_entry(
domain="info",
@@ -3126,7 +3126,7 @@ def add_mir_vae(mir_db: MIRDatabase):
# layer_256=[],
)
)
- series, comp = make_mir_tag("genmo/mochi-1-preview")
+ series, comp = tag_model_from_repo("genmo/mochi-1-preview")
mir_db.add(
mir_entry(
domain="info",
@@ -3142,7 +3142,7 @@ def add_mir_vae(mir_db: MIRDatabase):
layer_256=[],
)
)
- series, comp = make_mir_tag("rhymes-ai/Allegro")
+ series, comp = tag_model_from_repo("rhymes-ai/Allegro")
mir_db.add(
mir_entry(
domain="info",
@@ -3160,7 +3160,7 @@ def add_mir_vae(mir_db: MIRDatabase):
layer_256=["bfd496586118165a13243997101fc7cdd4f855b2d8a73ee2b771a4484c4c2f9f"],
)
)
- series, comp = make_mir_tag("cvssp/audioldm-s-full-v2")
+ series, comp = tag_model_from_repo("cvssp/audioldm-s-full-v2")
mir_db.add(
mir_entry(
domain="info",
@@ -3179,7 +3179,7 @@ def add_mir_vae(mir_db: MIRDatabase):
)
)
- series, comp = make_mir_tag("Efficient-Large-Model/Sana_1600M_1024px_BF16_diffusers")
+ series, comp = tag_model_from_repo("Efficient-Large-Model/Sana_1600M_1024px_BF16_diffusers")
mir_db.add(
mir_entry(
domain="info",
@@ -3195,7 +3195,7 @@ def add_mir_vae(mir_db: MIRDatabase):
layer_256=["abfc39d1a6d71f03dde7bc40fec4a90478a97d17ae1688be9aad00e0512b9bde"],
)
)
- series, comp = make_mir_tag("stabilityai/stable-audio-open-1.0")
+ series, comp = tag_model_from_repo("stabilityai/stable-audio-open-1.0")
mir_db.add(
mir_entry(
domain="info",
@@ -3211,7 +3211,7 @@ def add_mir_vae(mir_db: MIRDatabase):
# layer_256=[],
)
)
- series, comp = make_mir_tag("stable-video-diffusion-img2vid-xt")
+ series, comp = tag_model_from_repo("stable-video-diffusion-img2vid-xt")
mir_db.add(
mir_entry(
domain="info",
@@ -3295,7 +3295,7 @@ def add_mir_vae(mir_db: MIRDatabase):
domain="info",
arch="vae",
series="kl",
- comp=make_mir_tag(repo)[0],
+ comp=tag_model_from_repo(repo)[0],
# no repo here, may conflict
pkg={
0: {"diffusers": "AutoencoderKL"},
@@ -3373,7 +3373,7 @@ def add_mir_vae(mir_db: MIRDatabase):
domain="info",
arch="vae",
series="kl",
- comp=make_mir_tag(repo)[0],
+ comp=tag_model_from_repo(repo)[0],
# no repo here, may conflict
file_256=[
"16e0c6c7c34e459c19500cc15cf538e6331db14969ea15917caa9b0966e44fd4",
diff --git a/mir/config/constants.py b/mir/config/constants.py
index 07dd812..5736e52 100644
--- a/mir/config/constants.py
+++ b/mir/config/constants.py
@@ -1,65 +1,133 @@
# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
#
-from typing import List, Optional, Union
-from mir.config.json_io import read_json_file
import os
+from dataclasses import dataclass, field
+from typing import Callable, List
-from transformers.models.auto.modeling_auto import MODEL_MAPPING, MODEL_MAPPING_NAMES
-from transformers.models.auto.configuration_auto import CONFIG_MAPPING_NAMES
import transformers
+from transformers.models.auto.configuration_auto import CONFIG_MAPPING_NAMES
+from transformers.models.auto.modeling_auto import MODEL_MAPPING, MODEL_MAPPING_NAMES
+
+from mir.config.json_io import read_json_file
+
def mapped_cls(model_identifier: str):
- """Get model class from identifier without calling huggingface_hub.
-
+ """Get model class from identifier without calling huggingface_hub.\n
:param model_identifier: Model identifier like "bert-base-uncased" or "gpt2"
:return: Model class (e.g., BertModel, GPT2Model)
"""
- # Extract code name from model identifier (e.g., "bert-base-uncased" -> "bert")
- # Handle various formats: "bert-base-uncased", "gpt2", "microsoft/DialoGPT-medium"
code_name = model_identifier.split("/")[-1].split("-")[0].lower()
-
- # Method 1: Direct lookup via MODEL_MAPPING_NAMES (simplest)
+
model_class_name = MODEL_MAPPING_NAMES.get(code_name, None)
-
- # Method 2: Via config class lookup (matches _get_model_class behavior more closely)
config_class_name = CONFIG_MAPPING_NAMES.get(code_name)
if config_class_name:
config_class = getattr(transformers, config_class_name, None)
if config_class:
- # Look up in MODEL_MAPPING using config class
model_class = MODEL_MAPPING.get(config_class, None)
if model_class:
if isinstance(model_class, tuple):
model_class = model_class[0]
return model_class
-
- # Fallback: try with normalized code name (handle underscores/dashes)
+
normalized = code_name.replace("_", "-")
if normalized != code_name:
- print(f"normalized: {normalized}")
- model_class_name = MODEL_MAPPING_NAMES.get(normalized, None)
- if model_class_name:
+ if model_class_name := MODEL_MAPPING_NAMES.get(normalized, None):
+ if isinstance(model_class_name, tuple):
+ model_class_name = model_class_name[0]
return getattr(transformers, model_class_name, None)
- if model_class_name:
- if isinstance(model_class_name, tuple):
- model_class_name = model_class_name[0]
- return getattr(transformers, model_class_name, None)
return None
+def import_submodules(module_name: str, pkg_name_or_abs_path: str) -> Callable:
+ """Convert two strings into a callable function or property\n
+ :param module: The name of the module to import
+ :param library_path: Base package for the module
+ :return: The callable attribute or property
+ """
+ from importlib import import_module
+
+ module = module_name.strip()
+ library = pkg_name_or_abs_path.strip()
+ base_library = import_module(library, module)
+ module = getattr(base_library, module)
+ return module
+
+
+def extract_init_params(module: Callable | str, package_name: str | None = None) -> dict[str, list[str]]:
+ """Pick apart a Diffusers or Transformers pipeline class and find its constituent parts (formerly root_class)\n
+ :param module: Origin pipeline as a class or as a string
+ :param library: name of a library to import the class from, only if a string is provided
+ :return: Dictionary of sub-classes from the `module`"""
+
+ import inspect
+
+ if package_name and isinstance(module, str):
+ module_obj: Callable = import_submodules(module, package_name)
+ else:
+ assert isinstance(module, Callable)
+ module_obj = module
+ signature = inspect.signature(module_obj.__init__)
+ class_names = {}
+ for folder, param in signature.parameters.items():
+ if folder not in ["self", "kwargs", "use_cache"]:
+ sub_module = str(param.annotation).split("'")
+ if len(sub_module) > 1 and sub_module[1] not in [
+ "bool",
+ "int",
+ "float",
+ "complex",
+ "str",
+ "list",
+ "tuple",
+ "dict",
+ "set",
+ "inspect",
+ "_empty",
+ ]:
+ class_names.setdefault(folder, sub_module[1].split("."))
+ return class_names
+
+
+@dataclass
+class ClassMapEntry:
+ """Represents a structured entry of the name of the class and its associated attributes."""
+
+ name: str
+ model_name: str
+ model: Callable
+ config: Callable
+ config_params: dict[str, list[str]] = field(init=False, default_factory=lambda: {})
+ model_params: dict[str, list[str]] | None = None
+
+ def __post_init__(self):
+ if self.model:
+ self.model_params = extract_init_params(self.model)
+ if self.config:
+ self.config_params = extract_init_params(self.config)
+
+
+@dataclass
+class DocStringEntry:
+ """Represents a structured entry of package name, file name, and docstring."""
+
+ package_name: str
+ file_name: str
+ doc_string: str
+
+
class DocParseData:
pipe_class: str
pipe_repo: str
- staged_class: Optional[str] = None
- staged_repo: Optional[str] = None
+ staged_class: str | None = None
+ staged_repo: str | None = None
- def __init__(self, pipe_class, pipe_repo, staged_class=None, staged_repo=None):
- self.pipe_class: str = pipe_class
- self.pipe_repo: str = pipe_repo
- self.staged_class: str = staged_class
- self.staged_repo: str = staged_repo
+ def __init__(self, pipe_class: str, pipe_repo: str, staged_class: str | None = None, staged_repo: str | None = None):
+ self.pipe_class = pipe_class
+ self.pipe_repo = pipe_repo
+ self.staged_class = staged_class
+ self.staged_repo = staged_repo
class DocStringParserConstants:
@@ -100,7 +168,6 @@ class DocStringParserConstants:
root_path = os.path.join(os.getcwd(), "mir")
versions = read_json_file(os.path.join(root_path, "spec", "versions.json"))
template = read_json_file(os.path.join(root_path, "spec", "template.json"))
-print(root_path)
MIR_PATH_NAMED = os.path.join(root_path, "mir.json")
BREAKING_SUFFIX = r".*(?:-)(prior)$|.*(?:-)(diffusers)$|.*[_-](\d{3,4}px|-T2V$|-I2V$)"
diff --git a/mir/config/conversion.py b/mir/config/conversion.py
index ab5d98c..beaee14 100644
--- a/mir/config/conversion.py
+++ b/mir/config/conversion.py
@@ -1,108 +1,71 @@
# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
#
-from typing import Callable, Optional, Union, Type, List, Iterator, Tuple, Dict
-from mir.config.console import dbuq, nfo
+from typing import Callable, Optional, Union, Type, List, Generator, Dict
-def import_submodules(module_name: str, pkg_name_or_abs_path: str) -> Optional[Callable]:
- """Convert two strings into a callable function or property\n
- :param module: The name of the module to import
- :param library_path: Base package for the module
- :return: The callable attribute or property
- """
- from importlib import import_module
-
- module = module_name.strip()
- library = pkg_name_or_abs_path.strip()
- base_library = import_module(library, module)
- try:
- module = getattr(base_library, module)
- return module
- except AttributeError: # as error_log:
- # dbuq(error_log)
- return base_library
-
-
-def code_name_to_class_name(
- code_name: Optional[Union[str, Type]] = None,
- pkg_name: Optional[str] = "transformers",
-) -> Union[List[str], str]:
- """Fetch class names from code names from Diffusers or Transformers\n
- :param class_name: To return only one class, defaults to None
- :param pkg_name: optional field for library, defaults to "transformers"
- :return: A list of all code names, or the one corresponding to the provided class"""
- from mir.config.constants import package_map
-
- pkg_name = pkg_name.lower()
- MAPPING_NAMES = import_submodules(*package_map[pkg_name])
- if code_name:
- return MAPPING_NAMES.get(code_name)
- return list(MAPPING_NAMES.keys())
-
-
-def pkg_path_to_docstring(pkg_name: str, folder_path: bool) -> Iterator[Tuple[str, str, str]]:
- """Processes package folder paths to yield example doc strings if available.\n
- :param pkg_name: The name of the package under diffusers.pipelines.
- :param file_specific: A flag indicating whether processing is specific to certain files.
- :yield: A tuple containing (pkg_name, file_name, EXAMPLE_DOC_STRING) if found.
+from mir.config.console import dbuq, nfo
+from mir.config.constants import DocStringEntry, ClassMapEntry, import_submodules
+
+
+def retrieve_diffusers_docstrings(
+ package_name: str,
+ file_names: list[str],
+) -> Generator[DocStringEntry]:
+ """Yield (pkg, file, EXAMPLE_DOC_STRING) from a folder or a single file.\n
+ :param pkg_name: Package under ``diffusers.pipelines``.\n
+ :param file_names: A list of related file names.\n
+ :param use_folder: True → treat ``source`` as a folder with ``_import_structure``.\n
+ :return: DocString Entry class.\n
"""
import os
from importlib import import_module
- file_names = list(getattr(folder_path, "_import_structure").keys())
- module_path = os.path.dirname(import_module("diffusers.pipelines").__file__)
+ module_location: str | None = import_module("diffusers.pipelines").__file__
+ module_path = os.path.dirname(module_location)
+
for file_name in file_names:
+ assert isinstance(file_name, str)
if file_name == "pipeline_stable_diffusion_xl_inpaint":
continue
- try:
- pkg_path = f"diffusers.pipelines.{str(pkg_name)}.{file_name}"
- dbuq(pkg_path)
- path_exists = os.path.exists(os.path.join(module_path, pkg_name, file_name + ".py"))
- if path_exists:
- print(f"file_name, pkg_path): {file_name, pkg_path}")
- pipe_file = import_submodules(file_name, pkg_path)
- except ModuleNotFoundError:
- if pkg_name != "skyreels_v2":
- nfo(f"Module Not Found for {pkg_name}")
- pipe_file = None
-
- try:
- if pipe_file and hasattr(pipe_file, "EXAMPLE_DOC_STRING"):
- yield (pkg_name, file_name, pipe_file.EXAMPLE_DOC_STRING)
- else:
- if path_exists:
- pipe_file = import_module(pkg_path)
- except (ModuleNotFoundError, AttributeError):
- if pkg_name != "skyreels_v2":
- nfo(f"Doc String Not Found for {pipe_file} {pkg_name}")
-
-
-def file_name_to_docstring(pkg_name: str, file_specific: bool) -> Iterator[Tuple[str, str, str]]:
- """Processes package using file name to yield example doc strings if available.\n
- :param pkg_name: The name of the package under diffusers.pipelines.
- :param file_specific: A flag indicating whether processing is specific to certain files.
- :yield: A tuple containing (pkg_name, file_name, EXAMPLE_DOC_STRING) if found.
- """
- from importlib import import_module
- file_name = f"pipeline_{file_specific}"
- try:
- pkg_path = f"diffusers.pipelines.{str(pkg_name)}"
- pipe_file = import_submodules(file_name, pkg_path)
- except ModuleNotFoundError:
- if pkg_name != "skyreels_v2":
- nfo(f"Module Not Found for {pkg_name}")
- pipe_file = None
- try:
- if pipe_file and hasattr(pipe_file, "EXAMPLE_DOC_STRING"):
- yield (pkg_name, file_name, pipe_file.EXAMPLE_DOC_STRING)
+ pkg_path = f"diffusers.pipelines.{package_name}.{file_name}"
+ dbuq(pkg_path)
+
+ if os.path.exists(os.path.join(module_path, package_name, f"{file_name}.py")):
+ pipe_file = import_submodules(file_name, pkg_path) or import_module(pkg_path) or nfo(f"Failed to import {pkg_path}")
+ if doc_string := getattr(pipe_file, "EXAMPLE_DOC_STRING", None):
+ yield DocStringEntry(package_name=package_name, file_name=file_name, doc_string=doc_string)
+ else:
+ nfo(f"Doc string attribute missing for {package_name}/{file_name}")
else:
- pipe_file = import_module(pkg_path)
+ nfo(f"Path not found for {package_name}/{file_name}")
+
+ return
+
- except AttributeError:
- if pkg_name != "skyreels_v2":
- nfo(f"Doc String Not Found for {pipe_file} {pkg_name}")
+def get_repo_from_class_map(class_map: ClassMapEntry) -> str | None:
+ """The name of the repository that is associated with a transformers configuration class
+ :param class_map: Transformers class information extracted from dependency
+ :returns: A string matching the repo path for the class"""
+
+ import re
+
+ doc_attempt = []
+ if hasattr(class_map.config, "forward"):
+ doc_attempt = [getattr(class_map.config, "forward")]
+ doc_attempt.append(class_map.config)
+ for pattern in doc_attempt:
+ doc_string = pattern.__doc__
+ matches = re.findall(r"\[([^\]]+)\]", doc_string)
+ if matches:
+ try:
+ repo_path = next(iter(snip.strip('"').strip() for snip in matches if "/" in snip))
+ except StopIteration as error_log:
+ nfo(f"ERROR >>{matches} : LOG >> {error_log}")
+ continue
+ return repo_path
+ return None
def class_to_mir_tag(mir_db: Dict[str, str], code_name: str) -> Optional[str]:
diff --git a/mir/doc_parser.py b/mir/doc_parser.py
index 0455b08..9bf6181 100644
--- a/mir/doc_parser.py
+++ b/mir/doc_parser.py
@@ -8,13 +8,6 @@
from mir.config.constants import DocParseData, DocStringParserConstants
-def parse_docs(doc_string: str) -> Tuple[Optional[str], Optional[str], Optional[str], Optional[str]]:
- parser = DocStringParser(doc_string=doc_string)
- result = parser.parse()
- if result is not None:
- return result
-
-
class DocStringValidator:
"""Handles validation of docstring data and extracted values."""
@@ -62,7 +55,7 @@ class DocStringParser(BaseModel):
def normalize_doc(cls, docs: str) -> str:
return DocStringValidator.normalize_doc_string(docs)
- def doc_match(self, prefix_set: List[str] = None):
+ def doc_match(self, prefix_set: List[str] | None = None):
if prefix_set is None:
prefix_set = DocStringParserConstants.pipe_prefixes
candidate = None
@@ -160,3 +153,8 @@ def _resolve_variable(self, reference: str, prior_text: str) -> Optional[str]:
nfo(f"Warning: {search} not found in docstring.")
return None
+
+
+def parse_docs(doc_string: str) -> DocParseData:
+ parser = DocStringParser(doc_string=doc_string)
+ return parser.parse()
diff --git a/mir/indexers.py b/mir/indexers.py
index d173085..7d78c6a 100644
--- a/mir/indexers.py
+++ b/mir/indexers.py
@@ -5,12 +5,13 @@
# pylint:disable=no-name-in-module
import sys
-from typing import Any, Callable, Dict, List, Optional
-from mir.doc_parser import parse_docs
-from mir.tag import make_mir_tag
-from mir.inspect.classes import resolve_code_names, extract_init_params
+from typing import Any, Callable
+
from mir.config.console import nfo
-from mir.config.conversion import import_submodules
+from mir.config.constants import ClassMapEntry, extract_init_params
+from mir.config.conversion import get_repo_from_class_map, import_submodules
+from mir.doc_parser import parse_docs
+from mir.tag import mir_prefix_from_forward_pass, mir_tag_from_config, tag_model_from_repo
if "pytest" in sys.modules:
import diffusers # noqa # pyright:ignore[reportMissingImports] # pylint:disable=unused-import
@@ -20,63 +21,20 @@ def check_migrations(repo_path: str):
"""Replaces old organization names in repository paths with new ones.\n
:param repo_path: Original repository path containing old organization names
:return: Updated repository path with new organization names"""
- org_migration: dict[str, str] = {
- "/helium-2b": "/helium-1-2b",
- "allenai/Olmo2-7B-1124-hf": "allenai/Olmo-2-1124-7B",
- "apple/mobilevitv2-1.0": "apple/mobilevitv2-1.0-imagenet1k-256",
- "caidas/swin2SR-classical-sr-x2-64": "caidas/swin2SR-classical-sr-x2-64",
- "facebook/hiera-base-224": "facebook/hiera-base-224-hf",
- "facebook/sam_hq-vit-huge": "syscv-community/sam-hq-vit-huge",
- "facebook/vit_msn_base": "facebook/vit-msn-base",
- "facebook/wav2vec2-bert-rel-pos-large": "facebook/w2v-bert-2.0",
- "google/gemma-3-4b": "google/gemma-3-4b-it",
- "google/gemma2-7b": "google/gemma-2-9b",
- "google/gemma3_text-7b": "google/gemma-3-12b-it",
- "IDEA-Research/dab_detr-base": "IDEA-Research/dab-detr-resnet-50",
- "LGAI-EXAONE/EXAONE-4.0-Instruct": "LGAI-EXAONE/EXAONE-4.0-32B",
- "meta/chameleon-7B'": "facebook/chameleon-7b",
- "mixtralai/Mixtral-8x7B": "mistralai/Mixtral-8x7B-v0.1",
- "paligemma-hf/paligemma-2b": "google/paligemma2-3b-mix-224",
- "pixtral-hf/pixtral-9b": "mistralai/Pixtral-12B-Base-2409",
- "Qwen/Qwen2-7B-beta": "Qwen/Qwen2-7B",
- "Qwen/Qwen3-15B-A2B": "Qwen/Qwen3-30B-A3B",
- "s-JoL/Open-Llama-V1": "openlm-research/open_llama_3b",
- "Salesforce/instruct-blip-flan-t5": "Salesforce/instructblip-flan-t5-xl",
- "state-spaces/mamba2-2.8b": "AntonV/mamba2-2.7b-hf",
- "ibm-fms/FalconH1-9.8b-2.2T-hf": "tiiuae/Falcon-H1-34B-Instruct",
- "nvidia/nemotron-3-8b-base-4k-hf": "mgoin/nemotron-3-8b-chat-4k-sft-hf",
- "THUDM/": "zai-org/",
- "THUDM/GLM-4-100B-A10B": "zai-org/GLM-4.5-Air",
- "zai-org/GLM-4-100B-A10B": "zai-org/GLM-4.5-Air",
- }
- for old_name, new_name in org_migration.items():
- if old_name in repo_path:
- repo_path = repo_path.replace(old_name, new_name)
- # print(repo_path)
- return repo_path
-
+ import os
-def flag_config(transformers: bool = False, data: dict = None, **kwargs):
- """Set type of MIR prefix depending on model type\n
- :param transformers: Use transformers data instead of diffusers data, defaults to False
- :raises ValueError: Model type not detected
- :return: MIR prefix based on model configuration"""
from mir.config.json_io import read_json_file
- data = read_json_file("mir/spec/template.json")
-
- if transformers:
- flags = data["arch"]["transformer"] # pylint:disable=unsubscriptable-object
- else:
- flags = data["arch"]["diffuser"] # pylint:disable=unsubscriptable-object
- for mir_prefix, key_match in flags.items():
- if any(kwargs.get(param) for param in key_match):
- return mir_prefix
- return None
- # nfo(f"Unrecognized model type with {kwargs}\n" )
+ root_folder = os.path.dirname(__file__)
+ migration_file = os.path.join(os.path.join(root_folder, "spec", "repo_migrations.json"))
+ repo_migrations = read_json_file(migration_file)
+ for old_name, new_name in repo_migrations.items():
+ if old_name in repo_path:
+ repo_path = repo_path.replace(old_name, new_name)
+ return repo_path
-def create_pipe_entry(repo_path: str, class_name: str, model_class_obj: Optional[Callable] = None) -> tuple[str, Dict[str, Dict[Any, Any]]]:
+def create_pipe_entry(repo_path: str, class_name: str, model_class_obj: Callable | None = None) -> tuple[str, dict[str, dict[Any, Any]]]:
"""Create a pipeline article and generate corresponding information according to the provided repo path and pipeline category\n
:param repo_path (str): Repository path.
:param model_class_obj (str): The model class function
@@ -105,7 +63,7 @@ def create_pipe_entry(repo_path: str, class_name: str, model_class_obj: Optional
elif any(maybe for maybe in control_net if maybe.lower() in class_name.lower()):
mir_prefix = "info.controlnet"
else:
- mir_prefix = flag_config(**sub_segments)
+ mir_prefix = mir_prefix_from_forward_pass(**sub_segments)
if mir_prefix is None and class_name not in ["AutoPipelineForImage2Image", "DiffusionPipeline"]:
nfo(f"Failed to detect type for {class_name} {list(sub_segments)}\n")
else:
@@ -115,7 +73,7 @@ def create_pipe_entry(repo_path: str, class_name: str, model_class_obj: Optional
repo_path = "stabilityai/stable-diffusion-3.5-medium"
if class_name == "HunyuanVideoFramepackPipeline" or repo_path in ["hunyuanvideo-community/HunyuanVideo"]:
class_name = "HunyuanVideoPipeline"
- mir_series, mir_comp = list(make_mir_tag(repo_path, decoder))
+ mir_series, mir_comp = list(tag_model_from_repo(repo_path, decoder))
mir_series = mir_prefix + "." + mir_series
repo_path = check_migrations(repo_path)
# modalities = add_mode_types(mir_tag=[mir_series, mir_comp])
@@ -127,7 +85,7 @@ def create_pipe_entry(repo_path: str, class_name: str, model_class_obj: Optional
return mir_series, {mir_comp: prefixed_data}
-def diffusers_index() -> Dict[str, Dict[str, Dict[str, Any]]]:
+def diffusers_index() -> dict[str, dict[str, dict[str, Any]]]:
"""Generate diffusion model data for MIR index\n
:return: Dictionary ready to be applied to MIR data fields
"""
@@ -140,45 +98,53 @@ def diffusers_index() -> Dict[str, Dict[str, Dict[str, Any]]]:
"HunyuanDiTPipeline": "tencent-hunyuan/hunyuandiT-v1.2-diffusers", # NOT hyd .ckpt
"ChromaPipeline": "lodestones/Chroma",
}
- from mir.inspect.metadata import gather_diffusers_metadata
- extracted_docs = list(gather_diffusers_metadata())
+ from mir.inspect.metadata import find_diffusers_docstrings
+
+ extracted_docstrings = find_diffusers_docstrings()
+ model_info = [
+ extract #
+ for pipeline in extracted_docstrings
+ for extract in pipeline
+ ]
pipe_data = {} # pipeline_stable_diffusion_xl_inpaint
- print(f"extracted_docs: {extracted_docs}")
- for code_name, file_name, docs in extracted_docs:
- parse_result = parse_docs(docs)
- print(f"parse_result: {parse_result}")
- if parse_result:
- pipe_class = parse_result.pipe_class
- pipe_repo = parse_result.pipe_repo
- staged_class = parse_result.staged_class
- staged_repo = parse_result.staged_repo
- for class_name, swap_repo in special_classes.items():
- if pipe_class == class_name:
- pipe_repo = swap_repo
- break
- model_class_obj = import_submodules(pipe_class, f"diffusers.pipelines.{code_name}.{file_name}")
- extract_init_params(model_class_obj)
+
+ for extract in model_info:
+ pipe = parse_docs(extract.doc_string)
+ if not pipe:
+ nfo(f"Doc string not found in '{extract.package_name}' in {extract.file_name}")
+ continue
+ for class_name, swap_repo in special_classes.items():
+ if pipe.pipe_class == class_name:
+ pipe.pipe_repo = swap_repo
+ break
+ model_class_obj = import_submodules(pipe.pipe_class, f"diffusers.pipelines.{extract.package_name}.{extract.file_name}")
+ extract_init_params(model_class_obj)
+ try:
+ series, comp_data = create_pipe_entry(pipe.pipe_repo, pipe.pipe_class)
+ except TypeError:
+ pass # Attempt 1
+ if pipe_data.get(series):
+ if "img2img" in pipe.pipe_class.lower():
+ continue
+ pipe_data.setdefault(series, {}).update(comp_data)
+ special_conditions = special_repos | special_classes
+ if pipe.staged_class or pipe.pipe_repo in list(special_conditions):
+ test = special_conditions.get(pipe.pipe_repo)
+ if test:
+ staged_repo = test
+ pipe.staged_class = pipe.pipe_class
try:
- series, comp_data = create_pipe_entry(pipe_repo, pipe_class)
- except TypeError:
- pass # Attempt 1
- if pipe_data.get(series):
- if "img2img" in pipe_class.lower():
- continue
+ series, comp_data = create_pipe_entry(
+ staged_repo if pipe.staged_repo else pipe.pipe_repo,
+ pipe.staged_class #
+ if pipe.staged_class
+ else pipe.pipe_class,
+ )
+ except TypeError as error_log:
+ nfo(series, comp_data)
+ nfo(error_log)
+ continue # Attempt 2,
pipe_data.setdefault(series, {}).update(comp_data)
- special_conditions = special_repos | special_classes
- if staged_class or pipe_repo in list(special_conditions):
- test = special_conditions.get(pipe_repo)
- if test:
- staged_repo = test
- staged_class = pipe_class
- try:
- series, comp_data = create_pipe_entry(staged_repo if staged_repo else pipe_repo, staged_class if staged_class else pipe_class)
- except TypeError as error_log:
- print(series, comp_data)
- print(error_log)
- continue # Attempt 2,
- pipe_data.setdefault(series, {}).update(comp_data)
return dict(pipe_data)
@@ -186,107 +152,37 @@ def transformers_index():
"""Generate LLM model data for MIR index\n
:return: Dictionary ready to be applied to MIR data fields"""
- import re
+ import os
- import transformers
from transformers.models.auto.tokenization_auto import TOKENIZER_MAPPING_NAMES
- from mir.inspect.metadata import gather_transformers_metadata
-
- corrections: dict[dict[str, str | dict[str, list[str]]]] = { # models with incorrect repos or config
- "BarkModel": {
- "repo_path": "suno/bark",
- "sub_segments": {"n_head": [""]},
- },
- "GraniteSpeechForConditionalGeneration": {
- "repo_path": "ibm-granite/granite-speech-3.3-8b",
- "sub_segments": {"encoder_layers": [""], "decoder_layers": [""]},
- },
- "GptOssModel": {
- "repo_path": "openai/gpt-oss-120b",
- },
- "GraniteModel": {
- "repo_path": "ibm-granite/granite-3.3-2b-base",
- "sub_segments": {"rope_theta": [""]},
- },
- "DPRQuestionEncoder": {
- "repo_path": "facebook/dpr-question_encoder-single-nq-base",
- "sub_segments": {"local_attention": [""], "classifier_proj_size": [""]},
- },
- "CohereModel": {
- "repo_path": "CohereForAI/c4ai-command-r-v01",
- "sub_segments": {"attn_config": [""], "num_codebooks": [""]},
- },
- "Cohere2Model": {
- "repo_path": "CohereLabs/c4ai-command-r7b-12-2024",
- "sub_segments": {"attn_config": [""], "num_codebooks": [""]},
- },
- "GraniteMoeHybridModel": {
- "repo_path": "ibm-research/PowerMoE-3b",
- },
- "BertForMaskedLM": {
- "repo_path": "google-bert/bert-base-uncased",
- },
- "DistilBertModel": {
- "repo_path": "distilbert-base-uncased",
- },
- "GraniteMoeModel": {
- "repo_path": "ibm-research/PowerMoE-3b",
- },
- "AriaModel": {
- "repo_path": "rhymes-ai/Aria-Chat",
- "sub_segments": {"vision_config": [""], "text_config": [""]},
- },
- "TimmWrapperModel": {
- "repo_path": "timm/resnet18.a1_in1k",
- "sub_segments": {"_resnet_": [""]},
- },
- "FunnelModel": {
- "repo_path": "funnel-transformer/small",
- "sub_segments": {"separate_cls": [""]},
- },
- }
+ from mir.config.json_io import read_json_file
+
+ root_folder = os.path.dirname(__file__)
+ params_file = os.path.join(os.path.join(root_folder, "spec", "missing_params.json"))
+ missing_config_params = read_json_file(params_file)
+ from mir.inspect.metadata import map_transformers_classes
mir_data = {}
- # transformers_data = stock_llm_data()
- transformers_data: Dict[Callable, List[str]] = gather_transformers_metadata()
- for model_class_obj, model_data in transformers_data.items():
- class_name = model_class_obj.__name__
- if class_name in list(corrections): # conditional correction from mappings above: `extract_init_params` doesn't return anything in these cases
- repo_path = corrections[class_name]["repo_path"]
- sub_segments = corrections[class_name].get("sub_segments", extract_init_params(model_data["config"][-1], "transformers"))
- else:
- repo_path = ""
- if model_data.get("config"):
- doc_attempt = [getattr(transformers, model_data["config"][-1]), model_class_obj.forward]
- for pattern in doc_attempt:
- doc_string = pattern.__doc__
- matches = re.findall(r"\[([^\]]+)\]", doc_string)
- if matches:
- try:
- repo_path = next(iter(snip.strip('"').strip() for snip in matches if "/" in snip))
- except StopIteration as error_log:
- nfo(f"ERROR >>{matches} : LOG >> {error_log}")
- pass
- break
- sub_segments: Dict[str, List[str]] = extract_init_params(model_data["config"][-1], "transformers")
- if sub_segments and list(sub_segments) != ["kwargs"] and list(sub_segments) != ["use_cache", "kwargs"] and repo_path is not None:
- mir_prefix = flag_config(transformers=True, **sub_segments)
- if mir_prefix is None:
- nfo(f"Failed to detect type for {class_name} {list(sub_segments)}\n")
- continue
- else:
- mir_prefix = "info." + mir_prefix
- code_name = resolve_code_names(class_name)
- if code_name != "funnel":
- mir_suffix, mir_comp = list(make_mir_tag(repo_path))
- else:
- mir_suffix, mir_comp = ["funnel", "*"]
- mir_series = mir_prefix + "." + mir_suffix
+ transformers_data: list[ClassMapEntry] = map_transformers_classes()
+ for entry in transformers_data:
+ print(entry)
+ repo_path = get_repo_from_class_map(entry)
+ if config := missing_config_params.get(entry.name, {}):
+ entry.config_params = config.get("params", entry.config_params)
+ if not repo_path:
+ repo_path = config["repo_path"]
+ if not repo_path:
+ raise ValueError(f"Unable to determine repo from {entry}")
+ if entry.config_params and list(entry.config_params) != ["use_cache", "kwargs"]:
+ mir_series, mir_comp, mir_suffix = mir_tag_from_config(entry, repo_path)
# modalities = add_mode_types(mir_tag=[mir_series, mir_comp])
repo_path = check_migrations(repo_path)
tk_pkg = {}
- tokenizer_classes = TOKENIZER_MAPPING_NAMES.get(code_name)
+ tokenizer_classes = TOKENIZER_MAPPING_NAMES.get(entry.name)
+ if isinstance(tokenizer_classes, str):
+ tokenizer_classes = [tokenizer_classes]
+ print(type(tokenizer_classes))
# mode = modalities.get("mode")
if tokenizer_classes:
index = 0
@@ -309,7 +205,7 @@ def transformers_index():
mir_comp: {
"repo": repo_path,
"pkg": {
- 0: {"transformers": class_name},
+ 0: {"transformers": entry.model_name},
},
# "mode": mode,
},
diff --git a/mir/inspect/classes.py b/mir/inspect/classes.py
index 30a1681..23b955c 100644
--- a/mir/inspect/classes.py
+++ b/mir/inspect/classes.py
@@ -80,36 +80,6 @@ def extract_inherited_classes(model_class: Union[Callable, str], pkg_name: Optio
return class_names
-def extract_init_params(module: Union[Callable, str], pkg_name: Optional[str] = None) -> Dict[str, List[str]]:
- """Pick apart a Diffusers or Transformers pipeline class and find its constituent parts (formerly root_class)\n
- :param module: Origin pipeline as a class or as a string
- :param library: name of a library to import the class from, only if a string is provided
- :return: Dictionary of sub-classes from the `module`"""
-
- import inspect
-
- if pkg_name and isinstance(module, str):
- module = import_submodules(module, pkg_name)
- signature = inspect.signature(module.__init__)
- class_names = {}
- for folder, param in signature.parameters.items():
- if folder != "self":
- sub_module = str(param.annotation).split("'")
- if len(sub_module) > 1 and sub_module[1] not in [
- "bool",
- "int",
- "float",
- "complex",
- "str",
- "list",
- "tuple",
- "dict",
- "set",
- ]:
- class_names.setdefault(folder, sub_module[1].split("."))
- return class_names
-
-
# def pull_weight_map(repo_id: str, arch: str) -> Dict[str, str]:
# from nnll.download.hub_cache import download_hub_file
diff --git a/mir/inspect/metadata.py b/mir/inspect/metadata.py
index 1b0befa..190d61b 100644
--- a/mir/inspect/metadata.py
+++ b/mir/inspect/metadata.py
@@ -1,147 +1,98 @@
# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
#
-import pkgutil
-from typing import Dict, Generator, List
+from typing import Callable, Generator
import diffusers
+from mir.config.constants import ClassMapEntry, DocStringEntry, extract_init_params
+from mir.config.conversion import retrieve_diffusers_docstrings
-from mir.inspect.classes import extract_init_params
-from mir.config.conversion import pkg_path_to_docstring, file_name_to_docstring
+# if code_name and "__" not in code_name:
+# tasks = TaskAnalyzer.show_transformers_tasks(code_name=code_name)
+# if tasks and isinstance(tasks, list): # Ensure tasks is a list
+# task_pipe = next(iter(tasks))
+# if isinstance(task_pipe, tuple):
+# task_pipe = task_pipe[0]
+# if task_pipe not in exclude_list:
+# model_class = getattr(__import__("transformers"), task_pipe) # this is done to get the path to the config
+# model_data = extract_init_params(model_class)
+# if model_data and ("inspect" not in model_data["config"]) and ("deprecated" not in list(model_data["config"])):
+# transformer_data.setdefault(model_class, model_data)
+# else:
+# model_data = None
+# # Reset task_pipe if tasks was None or not a list
+# if not tasks or not isinstance(tasks, list):
+# task_pipe = None
-def gather_transformers_metadata() -> Dict[str, List[str]]:
+# if not model_data and code_name not in second_exclude_list: # second attempt
+# if code_name == "donut":
+# code_name = "donut-swin"
+# if not task_pipe and code_name and MODEL_MAPPING_NAMES.get(code_name.replace("_", "-")):
+# model_class = getattr(__import__("transformers"), MODEL_MAPPING_NAMES[code_name.replace("_", "-")], None)
+# elif task_pipe:
+# model_class = getattr(__import__("transformers"), task_pipe)
+# config_class = CONFIG_MAPPING_NAMES.get(code_name.replace("_", "-"))
+# if not config_class:
+# config_class = CONFIG_MAPPING_NAMES.get(code_name.replace("-", "_"))
+# if config_class:
+# config_class_obj = getattr(__import__("transformers"), config_class)
+# model_data = {"config": str(config_class_obj.__module__ + "." + config_class_obj.__name__).split(".")}
+# if model_data and ("inspect" not in model_data) and ("deprecated" not in model_data) and model_class:
+# transformer_data.setdefault(model_class, model_data)
+# return transformer_data
+
+
+def map_transformers_classes() -> list[ClassMapEntry]:
"""Eat the 🤗Transformers classes as a treat, leaving any tasty subclass class morsels neatly arranged as a dictionary.\n
Nom.
:return: Tasty mapping of subclasses to their class references"""
+ from transformers.models.auto.configuration_auto import CONFIG_MAPPING
+ from transformers.models.auto.modeling_auto import MODEL_MAPPING # config: model map
- transformer_data = {}
- exclude_list = [
- "DecisionTransformerModel",
- "DistilBertModel",
- "GraphormerModel",
- "GPTBigCodeModel",
- "TimmBackbone",
- "PerceptionEncoder",
- "SeamlessM4Tv2Model",
- "SeamlessM4TModel",
- "VisionTextDualEncoderModel",
- ]
- second_exclude_list = [
- "vision-text-dual-encoder",
- "vision_text_dual_encoder",
- "gpt_bigcode",
- "data2vec",
- "vision-text",
- "mllama"
- "bert_japanese",
- "cpm",
- "dab_detr",
- "decision_transformer",
- "timm_backbone",
- ] # there just isnt a repo in this one
- import os
-
- import transformers
- from transformers.models.auto.modeling_auto import CONFIG_MAPPING_NAMES, MODEL_MAPPING_NAMES
-
- from mir.inspect.tasks import TaskAnalyzer
-
- model_data = None
- task_pipe = None
- model_names = list(dict(MODEL_MAPPING_NAMES).keys())
- folder_data = {*model_names}
- models_folder = os.path.join(os.path.dirname(transformers.__file__), "models")
- folder_data = folder_data.union(os.listdir(models_folder))
- for code_name in folder_data:
- model_class = None
- if code_name and "__" not in code_name:
- tasks = TaskAnalyzer.show_transformers_tasks(code_name=code_name)
- if tasks and isinstance(tasks, list): # Ensure tasks is a list
- task_pipe = next(iter(tasks))
- if isinstance(task_pipe, tuple):
- task_pipe = task_pipe[0]
- if task_pipe not in exclude_list:
- model_class = getattr(__import__("transformers"), task_pipe) # this is done to get the path to the config
- model_data = extract_init_params(model_class)
- if model_data and ("inspect" not in model_data["config"]) and ("deprecated" not in list(model_data["config"])):
- transformer_data.setdefault(model_class, model_data)
- else:
- model_data = None
- # Reset task_pipe if tasks was None or not a list
- if not tasks or not isinstance(tasks, list):
- task_pipe = None
-
- if not model_data and code_name not in second_exclude_list: # second attempt
- if code_name == "donut":
- code_name = "donut-swin"
- if not task_pipe and code_name and MODEL_MAPPING_NAMES.get(code_name.replace("_", "-")):
- model_class = getattr(__import__("transformers"), MODEL_MAPPING_NAMES[code_name.replace("_", "-")], None)
- elif task_pipe:
- model_class = getattr(__import__("transformers"), task_pipe)
- config_class = CONFIG_MAPPING_NAMES.get(code_name.replace("_", "-"))
- if not config_class:
- config_class = CONFIG_MAPPING_NAMES.get(code_name.replace("-", "_"))
- if config_class:
- config_class_obj = getattr(__import__("transformers"), config_class)
- model_data = {"config": str(config_class_obj.__module__ + "." + config_class_obj.__name__).split(".")}
- if model_data and ("inspect" not in model_data) and ("deprecated" not in model_data) and model_class:
- transformer_data.setdefault(model_class, model_data)
- return transformer_data
+ model_data = []
+ for config_name, config_obj in CONFIG_MAPPING.items():
+ model_params = None
+ if model_obj := MODEL_MAPPING.get(config_obj, None):
+ if isinstance(model_obj, Callable):
+ model_obj = (model_obj,)
+ assert isinstance(model_obj, tuple)
+ for model_class in model_obj:
+ if model_params and ("inspect" not in model_params["config"]) and ("deprecated" not in list(model_params["config"])):
+ pass
+ else:
+ model_params = None
+ model_name = model_class.__name__
+ model_data.append(
+ ClassMapEntry(
+ name=config_name,
+ model_name=model_name.split(".")[-1],
+ model=model_class, # type: ignore
+ config=config_obj,
+ ),
+ )
+ return model_data
-def gather_diffusers_metadata() -> Generator:
- """Draw down docstrings from 🤗Diffusers library, minimizing internet requests\n
+def find_diffusers_docstrings() -> Generator[list[DocStringEntry]]:
+ """Pull down docstrings from 🤗Diffusers pipelines, minimizing internet requests\n
:return: Docstrings for common diffusers models"""
+ import os
- non_standard = {
- "cogvideo": "cogvideox",
- "cogview3": "cogview3plus",
- "deepfloyd_if": "if",
- "cosmos": "cosmos2_text2image", # search folder for all files containing 'EXAMPLE DOC STRING'
- "visualcloze": "visualcloze_generation",
- }
+ from diffusers.pipelines import _import_structure
- exclusion_list = [ # no doc string or other issues. all can be be gathered by other means
- "autopipeline", #
- "dance_diffusion", # no doc_string
- "ddim",
- "ddpm",
- "deprecated",
- "diffusionpipeline", #
- "dit",
- "latent_consistency_models", # "latent_consistency_text2img",
- "latent_diffusion", # no doc_string
- "ledits_pp", # "leditspp_stable_diffusion",
- "marigold", # specific processing routines
- "omnigen", # tries to import torchvision
- "pag", # not model based
- "paint_by_example", # no docstring
- "pia", # lora adapter
- "semantic_stable_diffusion", # no_docstring
- "stable_diffusion_attend_and_excite",
- "stable_diffusion_diffedit",
- "stable_diffusion_k_diffusion", # tries to import k_diffusion
- "stable_diffusion_panorama",
- "stable_diffusion_safe", # impossible
- "stable_diffusion_sag", #
- "t2i_adapter",
- "text_to_video_synthesis",
- "unclip",
- "unidiffuser",
- "controlnet_hunyuandit",
- "hunyuandit",
- # these are uncommon afaik
- ]
+ from mir.config.json_io import read_json_file
- for _, pkg_name, is_pkg in pkgutil.iter_modules(diffusers.pipelines.__path__):
- if is_pkg and pkg_name not in exclusion_list:
- file_specific = non_standard.get(pkg_name, pkg_name)
- folder_name = getattr(diffusers.pipelines, str(pkg_name))
- if folder_name:
- if hasattr(folder_name, "_import_structure"):
- yield from pkg_path_to_docstring(pkg_name, folder_name)
- else:
- yield from file_name_to_docstring(pkg_name, file_specific)
+ project_root = os.path.dirname(os.path.dirname(__file__))
+ pattern_file = os.path.join(project_root, "spec", "docstring_patterns.json")
+ docstring_patterns = read_json_file(pattern_file)
+ exclusion_list = docstring_patterns["exclusion_list"]
+ uncommon_naming = docstring_patterns["uncommon_naming"]
+ for pipe_name in _import_structure.keys():
+ if pipe_name not in exclusion_list:
+ file_specific = uncommon_naming.get(pipe_name, pipe_name)
+ if import_name := getattr(diffusers.pipelines, str(pipe_name)):
+ file_names = list(getattr(import_name, "_import_structure", {}).keys()) or [f"pipeline_{file_specific}"]
+ yield list(retrieve_diffusers_docstrings(pipe_name, file_names))
else:
continue
diff --git a/mir/inspect/pipes.py b/mir/inspect/pipes.py
index 8bcc738..8ef1d06 100644
--- a/mir/inspect/pipes.py
+++ b/mir/inspect/pipes.py
@@ -4,23 +4,22 @@
from typing import List, Optional
-def get_transformer_config_classes(parameter_filter: Optional[str] = None) -> List[str]:
+def show_shared_hyperparameters(parameter_filter: Optional[str] = None) -> List[str]:
"""Show all config classes in the Transformer package with the specified init annotation\n
:param from_match: Narrow the classes to only those with an exact key inside
:return: A list of all Classes"""
- from mir.inspect.metadata import gather_transformers_metadata
- from mir.inspect.classes import extract_init_params
+ from mir.inspect.metadata import map_transformers_classes
+ from mir.config.constants import extract_init_params
- transformers_data = gather_transformers_metadata()
+ transformers_data = map_transformers_classes()
config_data = []
- for model_path in list(transformers_data.values()):
- config_class = model_path["config"][-1]
+ for entry in transformers_data:
if parameter_filter:
- segments = extract_init_params(config_class, pkg_name="transformers")
+ segments = extract_init_params(module=entry.config, package_name="transformers")
if parameter_filter in list(segments):
- config_data.append(config_class)
+ config_data.append(entry.config)
else:
- config_data.append(config_class)
+ config_data.append(entry.config)
return config_data
diff --git a/mir/inspect/tasks.py b/mir/inspect/tasks.py
index 1e10cc4..3356ef5 100644
--- a/mir/inspect/tasks.py
+++ b/mir/inspect/tasks.py
@@ -69,6 +69,7 @@ def show_transformers_tasks(class_name: str | None = None, code_name: str | None
elif code_name:
from mir.config.constants import mapped_cls
from httpx import HTTPStatusError
+
try:
model_class = mapped_cls(code_name)
if model_class is not None:
@@ -181,13 +182,13 @@ async def tag_class(self, pipe_class: Callable, pipe_role: str, series: str, mir
:param mir_db: MIRDatabase instance for querying tags/IDs
:return: Tuple containing MIR tag and class name"""
- from mir.tag import make_scheduler_tag
+ from mir.tag import tag_scheduler
mir_tag = None
class_name = pipe_class.__name__
if pipe_role in ["scheduler", "image_noising_scheduler", "prior_scheduler"]:
sub_field = pipe_class.__module__.split(".")[0]
- scheduler_series, scheduler_comp = make_scheduler_tag(class_name)
+ scheduler_series, scheduler_comp = tag_scheduler(class_name)
mir_tag = [f"ops.scheduler.{scheduler_series}", scheduler_comp]
if not mir_db.database.get(mir_tag[0], {}).get(mir_tag[1]):
mir_tag = mir_db.find_tag(field="pkg", target=class_name, sub_field=sub_field, domain="ops.scheduler")
@@ -266,3 +267,65 @@ def trace_classes(pipe_class: str, pkg_name: str) -> Dict[str, List[str]]:
related_pipes = set(related_pipes)
related_pipes.update(tuple(x) for x in extract_inherited(model_class=pipe_class, pkg_name=pkg_name))
return related_pipes
+
+
+def main(mir_db: MIRDatabase = None):
+ """Parse arguments to feed to dict header reader"""
+ import argparse
+ import asyncio
+ from mir.automata import assimilate
+ from sys import modules as sys_modules
+
+ if "pytest" not in sys_modules:
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawTextHelpFormatter,
+ description="Scrape the task classes from currently installed libraries and attach them to an existing MIR database.\nOffline function.",
+ usage="mir-tasks",
+ epilog="Can be run automatically with `python -m nnll.mir.maid` Should only be used after `mir-maid`.\n\nOutput:\n INFO ('Wrote #### lines to MIR database file.',)",
+ )
+ parser.parse_args()
+
+ if not mir_db:
+ mir_db = MIRDatabase()
+
+ auto_pkg = TaskAnalyzer()
+ task_tuple = asyncio.run(auto_pkg.detect_tasks(mir_db))
+
+ assimilate(mir_db, [task for task in task_tuple])
+
+ mir_db.write_to_disk()
+ return mir_db
+
+
+def run_task():
+ main()
+
+
+def pipe(mir_db: MIRDatabase = None):
+ import argparse
+ import asyncio
+ from sys import modules as sys_modules
+
+ if "pytest" not in sys_modules:
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawTextHelpFormatter,
+ description="Infer pipe components from Diffusers library and attach them to an existing MIR database.\nOffline function.",
+ usage="mir-pipe",
+ epilog="Can be run automatically with `python -m nnll.mir.maid` Should only be used after `mir-maid`.\n\nOutput:\n INFO ('Wrote #### lines to MIR database file.',)",
+ )
+ parser.parse_args()
+
+ from mir.automata import assimilate
+
+ if not mir_db:
+ mir_db = MIRDatabase()
+
+ auto_pkg = TaskAnalyzer()
+ pipe_tuple = asyncio.run(auto_pkg.detect_pipes(mir_db))
+ assimilate(mir_db, [pipe for pipe in pipe_tuple])
+ mir_db.write_to_disk()
+ return mir_db
+
+
+if __name__ == "__main__":
+ pipe()
diff --git a/mir/maid.py b/mir/maid.py
index e26a7fd..a25a3eb 100644
--- a/mir/maid.py
+++ b/mir/maid.py
@@ -20,7 +20,7 @@ def __init__(self, database: dict | None = None) -> None:
if not database:
try:
- self.database = read_json_file(MIR_PATH_NAMED)
+ self.database: dict[str, Any] = read_json_file(MIR_PATH_NAMED)
except JSONDecodeError as error_log:
dbuq(error_log)
self.database = {}
@@ -32,7 +32,7 @@ def add(self, resource: dict[str, Any]) -> None:
parent_key = next(iter(resource))
if self.database is not None:
if self.database.get(parent_key, 0):
- self.database[parent_key] = {**self.database[parent_key], **resource[parent_key]}
+ self.database[parent_key] = self.database[parent_key] | resource[parent_key]
else:
self.database[parent_key] = resource[parent_key]
@@ -65,7 +65,7 @@ def read_from_disk(self, data: Optional[dict] = None) -> dict[str, Any]:
self.database = read_json_file(MIR_PATH_NAMED)
return self.database
- def _stage_maybes(self, maybe_match: str, target: str, series: str, compatibility: str) -> List[str]:
+ def _stage_maybes(self, maybe_match: str, target: str, series: str, compatibility: str) -> list[str | bool]:
"""Process a single value for matching against the target\n
:param value: An unknown string value
:param target: The search target
@@ -79,7 +79,7 @@ def _stage_maybes(self, maybe_match: str, target: str, series: str, compatibilit
results = []
if isinstance(maybe_match, str):
- maybe_match = [maybe_match]
+ maybe_match: list[str] = [maybe_match]
elif isinstance(maybe_match, dict):
if isinstance(next(iter(maybe_match)), int):
maybe_match = list(maybe_match.values())
@@ -97,7 +97,7 @@ def _stage_maybes(self, maybe_match: str, target: str, series: str, compatibilit
return results
@staticmethod
- def grade_maybes(matches: List[List[str]], target: str) -> list[str, str]:
+ def grade_maybes(matches: List[List[str]], target: str) -> list[str] | None:
"""Evaluate and select the best match from a list of potential matches\n
:param matches: Possible matches to compare
:param target: Desired entry to match
@@ -151,7 +151,6 @@ def find_tag(self, field: str, target: str, sub_field: Optional[str] = None, dom
parameters = r"-gguf|-exl2|-exl3|-onnx|-awq|-mlx|-ov" #
target = target.lower().strip("-")
target = re.sub(parameters, "", target)
- self.matches = None
self.matches = []
for series, comp in self.database.items():
@@ -229,6 +228,9 @@ def main(mir_db: Callable | None = None, remake: bool = True) -> None:
add_mir_diffusion(mir_db)
add_mir_llm(mir_db)
add_mir_vae(mir_db)
+ mir_db.write_to_disk()
+ mir_db = MIRDatabase()
+ mir_db = MIRDatabase()
mir_update(mir_db)
mir_db.write_to_disk()
@@ -243,8 +245,6 @@ def main(mir_db: Callable | None = None, remake: bool = True) -> None:
if "pytest" not in sys_modules: #
import argparse
- from mir.config.console import nfo
-
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
description="Build a custom MIR model database from the currently installed system environment.\nOffline function.",
@@ -283,6 +283,7 @@ def main(mir_db: Callable | None = None, remake: bool = True) -> None:
pipes = not args.pipes_off
main(remake=remake)
+ update_mir()
from mir.inspect.tasks import pipe, run_task
mir_db = run_task()
diff --git a/mir/mir.json b/mir/mir.json
index 59ae13b..78868b2 100644
--- a/mir/mir.json
+++ b/mir/mir.json
@@ -1,3 +1,5027 @@
{
- "expected": "data"
+ "info.controlnet.sd-controlnet-canny": {
+ "*": {
+ "repo": "lllyasviel/sd-controlnet-canny",
+ "pkg": {
+ "0": {
+ "diffusers": "ControlNetModel"
+ }
+ }
+ }
+ },
+ "info.controlnet.blipdiffusion-controlnet": {
+ "*": {
+ "repo": "Salesforce/blipdiffusion-controlnet",
+ "pkg": {
+ "0": {
+ "diffusers": "BlipDiffusionControlNetPipeline"
+ }
+ }
+ }
+ },
+ "info.controlnet.control-v11p-sd15-inpaint": {
+ "*": {
+ "repo": "lllyasviel/control_v11p_sd15_inpaint",
+ "pkg": {
+ "0": {
+ "diffusers": "ControlNetModel"
+ }
+ }
+ }
+ },
+ "info.controlnet.controlnet-canny-sdxl-1": {
+ "*": {
+ "repo": "diffusers/controlnet-canny-sdxl-1.0",
+ "pkg": {
+ "0": {
+ "diffusers": "ControlNetModel"
+ }
+ }
+ }
+ },
+ "info.controlnet.controlnet-depth-sdxl-1": {
+ "*": {
+ "repo": "diffusers/controlnet-depth-sdxl-1.0-small",
+ "pkg": {
+ "0": {
+ "diffusers": "ControlNetModel"
+ }
+ }
+ }
+ },
+ "info.controlnet.controlnet-union-sdxl-1": {
+ "*": {
+ "repo": "xinsir/controlnet-union-sdxl-1.0",
+ "pkg": {
+ "0": {
+ "diffusers": "ControlNetUnionModel"
+ }
+ }
+ }
+ },
+ "info.controlnet.sd3-controlnet-canny": {
+ "*": {
+ "repo": "InstantX/SD3-Controlnet-Canny",
+ "pkg": {
+ "0": {
+ "diffusers": "SD3ControlNetModel"
+ }
+ }
+ }
+ },
+ "info.controlnet.sd3-controlnet-inpainting": {
+ "*": {
+ "repo": "alimama-creative/SD3-Controlnet-Inpainting",
+ "pkg": {
+ "0": {
+ "diffusers": "SD3ControlNetModel"
+ }
+ }
+ }
+ },
+ "info.controlnet.testing-conrolnetxs-sd2-canny": {
+ "*": {
+ "repo": "UmerHA/Testing-ConrolNetXS-SD2.1-canny",
+ "pkg": {
+ "0": {
+ "diffusers": "ControlNetXSAdapter"
+ }
+ }
+ }
+ },
+ "info.controlnet.testing-conrolnetxs-sdxl-canny": {
+ "*": {
+ "repo": "UmerHA/Testing-ConrolNetXS-SDXL-canny",
+ "pkg": {
+ "0": {
+ "diffusers": "ControlNetXSAdapter"
+ }
+ }
+ }
+ },
+ "info.unet.stable-diffusion-v1-5": {
+ "*": {
+ "repo": "stable-diffusion-v1-5/stable-diffusion-v1-5",
+ "pkg": {
+ "0": {
+ "diffusers": "StableDiffusionPipeline"
+ }
+ }
+ }
+ },
+ "info.unet.stable-unclip-2-1-l": {
+ "*": {
+ "repo": "fusing/stable-unclip-2-1-l",
+ "pkg": {
+ "0": {
+ "diffusers": "StableUnCLIPPipeline"
+ }
+ }
+ }
+ },
+ "info.unet.stable-diffusion-2-1-unclip": {
+ "*": {
+ "repo": "stabilityai/stable-diffusion-2-1-unclip-small",
+ "pkg": {
+ "0": {
+ "diffusers": "StableUnCLIPImg2ImgPipeline"
+ }
+ }
+ }
+ },
+ "info.unet.stable-diffusion-xl-1": {
+ "*": {
+ "repo": "stabilityai/stable-diffusion-xl-base-1.0",
+ "pkg": {
+ "0": {
+ "diffusers": "StableDiffusionXLPipeline"
+ }
+ }
+ },
+ "pony-diffusion": {
+ "file_256": [
+ "67ab2fd8ec439a89b3fedb15cc65f54336af163c7eb5e4f2acc98f090a29b0b3"
+ ],
+ "layer_256": [
+ "465425d4420dcf5aa4b4d5b456db11a1fcc7c8f61b2e4a87e2470297c98bb96e"
+ ],
+ "layer_b3": [
+ "bf4c2154daa4ece7292277b210d081f98759e9ed4d5c889564632e3ccc4a1071"
+ ]
+ },
+ "pony-diffusion-turbo": {
+ "file_256": [
+ "7555ac941f3a767833830ba5cc9a4508a9777cbf97b487b6baf0400ab7000587",
+ "9322f9d91b28abf09e4137bc02ec806af23510221a164e71b81778e61cc3b4b2"
+ ],
+ "layer_256": [
+ "7edf51ef09b39c46937a4e4141707c040cd12af0d95299a4d3cd2b7d3fabe035",
+ "74e4dbc89d57d61ff7e8af8b0fddcf7466ba233d53ca4ffb7777138991bc3d52"
+ ],
+ "layer_b3": [
+ "1e8f23fcd4be0f00eb52368b91c709fffa8a3b8e21772b92b2e0671eed9117d0",
+ "5c8b3f34f9d0a58135cf72fbfe9b5d75b5545a10e3d726478543fa7cc510a8bc"
+ ]
+ },
+ "animagine-xl-4": {
+ "repo": "cagliostrolab/animagine-xl-4.0",
+ "file_256": [
+ "8ece83aa1bed1fb39a2b81f1660f0ce6889218e493c1f2ed55e9f15f59a7e03f",
+ "6327eca98bfb6538dd7a4edce22484a1bbc57a8cff6b11d075d40da1afb847ac",
+ "1449e5b0b9de87b0f414c5f29cb11ce3b3dc61fa2b320e784c9441720bf7b766",
+ "e3c47aedb06418c6c331443cd89f2b3b3b34b7ed2102a3d4c4408a8d35aad6b0"
+ ],
+ "layer_256": [
+ "c21d1c38813e078817122e12866ab39f5aa7f56945dd4a8beee3cae1e0f139e7",
+ "b916c162c981155aaf74e93d5314038af6767bb5a129c51ee05a1fb6a206c6ac",
+ "ecc6bfc73824a2d7c3b0ca184854a235859f329c83768f017b07a19a535d17b4",
+ "97f6ca05de7fbdae7aacb2427a552f924492176c474a23dd252c192e1c0e9d65"
+ ],
+ "layer_b3": [
+ "268ffbb120670b9c4b25158bd474c787740884b7738b48203aa03c4c3f00028f",
+ "18fda1a55cad137d62c81d4328f5ece85d88b126261e06b9e14ab68055d5d484",
+ "bae9bc8a5c43145bcf92ee3391618d9eaddd689f626991bae202de9cf5f1e70e",
+ "d6bc5ccafa2b97c867b13a1e7a8c2c7ad9c4877055a66c71bb773557bc306447"
+ ]
+ },
+ "illustrious-xl-v2": {
+ "repo": "OnomaAIResearch/Illustrious-XL-v2.0",
+ "file_256": [
+ "c2a1a3eaa13d4c107dc7e00c3fe830cab427aa026362740ea094745b3422a331",
+ "536863e9f0c13b0ce834e2f8a19ada425ee4f722c0ad3d0051ec7e6adaa8156c",
+ "3e15ba00387db678ab4a099f75771c4f5ac67fda9e7100a01d263eaf30145aa9",
+ "e3d12d0f76d61aa31d2668a2217e5b642592193f2946842c44d7056ea5469cce",
+ "735cf3fefcbdc4f7817f53247e38b836ffd27c7641af6d8daa21d245242cb4bd"
+ ],
+ "layer_256": [
+ "397791b3d77affb7bd35c5ded7377493c6bf456920a41388ba95bd0157109803",
+ "b23c02b8519c6777a1f271662f4251a59468c4b3e11184a2d722fa8929b4ea48",
+ "a373981494f5508c124a1960bdd096bbc96935fbb54b1218f563206d3892c176",
+ "b709df257c40d9d981f686f2880bbe64f43b78805b7213768d659a142a593efd",
+ "f1e6b4cab0fce608dca6fa851384e8728202449f16270fbd1f0c4c5ec4946c10"
+ ],
+ "layer_b3": [
+ "93b061baf21d743d592327a61f027d099d8e18da9808a76c7704ad123eba4a29",
+ "dc05fed2acbc73cef4c377cfa2a681c5cf6d065b88d8bf70d371bbcce6a223a8",
+ "8eb1c30327e5b71b35b9a4513dc5f2cac9f244667393c0eedb10a26aa9991cd8",
+ "3dafbe31f6ebaffa3d054e1b37049e1147faa2474ceb6dab7bc3c4cded0c845e",
+ "892533778ee14454938f7b50830093f58e12f1e14560a148f71927e4ccff5f5c"
+ ]
+ },
+ "playground-v2---aesthetic": {
+ "repo": "playgroundai/playground-v2.5-1024px-aesthetic",
+ "pkg": {
+ "0": {
+ "diffusers": "DiffusionPipeline",
+ "precision": "ops.precision.float.F16",
+ "generation": {
+ "num_inference_steps": 50,
+ "guidance_scale": 3
+ }
+ }
+ },
+ "file_256": [
+ "11b6d7bce65674659cc6b7ea960658436edfd80e566cb240ebd4bfbc3e2076c8",
+ "bcaa7dd6780974f000b17b5a6c63e6f867a75c51ffa85c67d6b196882c69b992",
+ "956dca99114aaa5c3eb526381309d37ee96737e78ed64c8ae613409f47c3f65a",
+ "933778ce76c1fc0ca918b37e1488411b8a99bbd3279c12f527a3ac995a340864",
+ "5c7d38880d0940e6795158b7608ccef89217272b1f2a9331c5b0a2adffcd82c4",
+ "0411e988479884b1a3ecd184123efe38d051d8d0ef24270585a7d1d57499464a"
+ ],
+ "layer_256": [
+ "adb7be228d4ee6e583c3e5ae4ddb579fef64c3987617ce4d4aff3eb7f8d6a3f7",
+ "d4813e9f984aa76cb4ac9bf0972d55442923292d276e97e95cb2f49a57227843",
+ "fe2e9edf7e3923a80e64c2552139d8bae926cc3b028ca4773573a6ba60e67c20",
+ "bc7021473a04a6de3fe0d0fed600875d852ad1ad9d47c445278f66ce9e8ec7a0fc94481f0c52b21c5ac1fdade8d9c5b210f7239253f86ef21e6198fe393ed60e",
+ "a6f31493ceeb51c88c5239188b9078dc64ba66d3fc5958ad48c119115b06120c"
+ ],
+ "layer_b3": [
+ "d55b22740da2d5b98020ad2390cdc0a7ee08cf9e0d98c11957f16cc20c49815b",
+ "7e9be9bd9a3aed1ad7207e2f77c98c24c3a75f6adcc9b53514033c6c3365d289",
+ "5c6dfcc8d01dfb64723f8f5785caa080e2987859c0a050470bfdbe5312be9efc",
+ "703f775c6e48ed5b0eba6e847414f047bcd4adc677dbc1bf221b3ef05b2ac471",
+ "72d4ebe4af61f8a7add8fe36b8acd16602894279fb5a744ad50b5b5bac7067b8",
+ "acb757b851db12cdf9d4365a45ee0d6e64afa77ac95583bb82711baf7c4125fd"
+ ]
+ },
+ "segmind-vega": {
+ "repo": "segmind/Segmind-Vega",
+ "file_256": [
+ "94762e983e5942056be73c5c1d4464b8ffa1ada500b4fef1267550e2447953ce",
+ "1ab33e37fbb2566c55cd729e4ab79cc2f99cd9d0a578fabc7a2cf4ee47968be1",
+ "8cfa375669b1222d6fecf470f41b2abb370c76a90ab9568964c4bb15b34ec8a2"
+ ],
+ "layer_256": [
+ "029b89ee311110c8f945dbdfc52c1d5daeb1e78c353c38aa3141ec68ce28e7cc",
+ "5cdb948e5f3873300679073391d48fc648171f02093d7737d078557ff75762bb",
+ "f73afbe43cc76571cb86ebcfced618668a2fb2252b0bc6ba88d6e942bae75741"
+ ],
+ "layer_b3": [
+ "2f353c5e6ed0a2c05af00d014e18e65f69f1ce8c48f8eefbf8ad71b34f940fbf",
+ "cc34bd3135d7cafc3cb6e3f6e7cb6896c98277bad52877a952ddbd2ffe222e01",
+ "b90efdc848f5386d5250b6fb233ce380cf6cc299f497cfa1d2feaef22f87c9d1"
+ ]
+ },
+ "ssd": {
+ "repo": "segmind/SSD-1B",
+ "file_256": [
+ "7cb406ec0662e91570a79f3c4fb8f0ea5325bffe6af5d9382edae838698f72bd",
+ "1895a00bfc769a00b0c0c43a95e433e79e9db8a85402b45a33e8448785bde94d",
+ "0bf1ce6b065a6b969ab02dc8e8fa21eb20ee189b10935c49ce68c77a7e432c1c",
+ "02ed8ebd0ed55aec686fcf20946d7a1659a31f9f8d9c3798cd254ba6b67434ca",
+ "40d8ea9159f3e875278dacc7879442d58c45850cf13c62f5e26681061c51829a"
+ ],
+ "layer_256": [
+ "52267d5d327a2ba92c7a14261a9d081df621b8366819b1bb3a47d130523a813c",
+ "b365a3631c6c74532f3a571c84c68e088be35496d35be1e932031713ddd2a2f4",
+ "52267d5d327a2ba92c7a14261a9d081df621b8366819b1bb3a47d130523a813c",
+ "89f86d9c846495870416b4945b6a46a517f28405e5bab666feb4057f012340be",
+ "535b47e9b70da6494878ca6d45af3f2e201b7f17748432911c12232e586855e6"
+ ],
+ "layer_b3": [
+ "c074dc38e8ec836816b91cbcc2ca17f80d6106de8d196d416ef9a27c8837ee45",
+ "1d6c0216da57fe98e7ad29e9653566725f5b2a87845fdbdcda257b3be817b5f4",
+ "c074dc38e8ec836816b91cbcc2ca17f80d6106de8d196d416ef9a27c8837ee45",
+ "89f86d9c846495870416b4945b6a46a517f28405e5bab666feb4057f012340be",
+ "535b47e9b70da6494878ca6d45af3f2e201b7f17748432911c12232e586855e6"
+ ]
+ }
+ },
+ "info.unet.stable-diffusion-xl-refiner-1": {
+ "*": {
+ "repo": "stabilityai/stable-diffusion-xl-refiner-1.0",
+ "pkg": {
+ "0": {
+ "diffusers": "StableDiffusionXLImg2ImgPipeline"
+ }
+ }
+ }
+ },
+ "info.unet.sdxl-pix2pix-768": {
+ "*": {
+ "repo": "diffusers/sdxl-instructpix2pix-768",
+ "pkg": {
+ "0": {
+ "diffusers": "StableDiffusionXLInstructPix2PixPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.allegro": {
+ "*": {
+ "repo": "rhymes-ai/Allegro",
+ "pkg": {
+ "0": {
+ "diffusers": "AllegroPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.amused-512": {
+ "*": {
+ "repo": "amused/amused-512",
+ "pkg": {
+ "0": {
+ "diffusers": "AmusedInpaintPipeline"
+ }
+ }
+ }
+ },
+ "info.lora.animatediff-motion-adapter-v1-5-2": {
+ "*": {
+ "repo": "guoyww/animatediff-motion-adapter-v1-5-2",
+ "pkg": {
+ "0": {
+ "diffusers": "AnimateDiffVideoToVideoPipeline"
+ }
+ }
+ }
+ },
+ "info.lora.animatediff-motion-adapter-sdxl": {
+ "*": {
+ "repo": "a-r-r-o-w/animatediff-motion-adapter-sdxl-beta",
+ "pkg": {
+ "0": {
+ "diffusers": "AnimateDiffSDXLPipeline"
+ }
+ }
+ }
+ },
+ "info.controlnet.animatediff-sparsectrl-scribble": {
+ "*": {
+ "repo": "guoyww/animatediff-sparsectrl-scribble",
+ "pkg": {
+ "0": {
+ "diffusers": "SparseControlNetModel"
+ }
+ }
+ }
+ },
+ "info.controlnet.animatelcm": {
+ "*": {
+ "repo": "wangfuyun/AnimateLCM",
+ "pkg": {
+ "0": {
+ "diffusers": "ControlNetModel"
+ }
+ }
+ }
+ },
+ "info.dit.bria-3": {
+ "*": {
+ "repo": "briaai/BRIA-3.2",
+ "pkg": {
+ "0": {
+ "diffusers": "BriaPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.flux2-dev": {
+ "*": {
+ "repo": "black-forest-labs/FLUX.2-dev",
+ "pkg": {
+ "0": {
+ "diffusers": "Flux2Pipeline"
+ }
+ }
+ }
+ },
+ "info.dit.flux1-schnell": {
+ "*": {
+ "repo": "black-forest-labs/FLUX.1-schnell",
+ "pkg": {
+ "0": {
+ "diffusers": "FluxInpaintPipeline"
+ }
+ }
+ },
+ "shuttle-3-aesthetic": {
+ "repo": "shuttleai/shuttle-3.1-aesthetic",
+ "pkg": {
+ "2": {
+ "diffusers": "DiffusionPipeline",
+ "generation": {
+ "guidance_scale": 3.5,
+ "num_inference_steps": 4
+ }
+ }
+ },
+ "file_256": [
+ "176871da1d5d2d511a52ae9b0dd70faa1f5d1b7734b7e33ed6b4bffa52050e0d",
+ "4b80d37681eaed07b7f5b3825a392da929d1620933ede7c2749ef3613cc53f42"
+ ],
+ "layer_256": [
+ "e5d95de314cbfc49b79479118a1ac0b90fc95ccd6bb1a5c95803996d6cebf8fe",
+ "d299e8ea4a605917ab98a4a7330d4d398b4ae295efbf458eeeceb5ff1bd7959a"
+ ],
+ "layer_b3": [
+ "ff422d1734abf33366e87bbf44267dc6096c5d499e695287c35558174877412e",
+ "5ad8034eac6b82d842311437101c52b5d35826ce34994940d9e667e702a0d45c"
+ ]
+ },
+ "shuttle-3-diffusion": {
+ "repo": "shuttleai/shuttle-3-diffusion",
+ "pkg": {
+ "2": {
+ "diffusers": "DiffusionPipeline",
+ "generation": {
+ "guidance_scale": 3.5,
+ "num_inference_steps": 4
+ }
+ }
+ },
+ "file_256": [
+ "a5b04df4072698395387c21e8da0176d03f6557e0c38ff1dd3bf469ebab9d0fd",
+ "a91b46de2055b3511ee87523b57862648856e8c00100161d5b520543a7302755",
+ "23a77c86189d5934da48bf44bb871cf80ba99177ffd3fd5272cdecb208c8b8be",
+ "d3782d5a8f6e82c6676e8e26d54020934ada589d2aceb17fc5ca604b1bd55da8"
+ ],
+ "layer_256": [
+ "14d0e1b573023deb5a4feaddf85ebca10ab2abf3452c433e2e3ae93acb216443",
+ "7ce8d449b32a9c959431ade729b513ee7a6457f11e1c13e3ef04dd8db3494621",
+ "9c3395f67a3d844483b77f0ddd5e2ea64b61732fa9d9da19845bb8ae574c1f8c"
+ ],
+ "layer_b3": [
+ "4dd3174edf6b680ce9daf3de643e33ae2c4f09a4d5968da61ea48885f3a193c0",
+ "9fdf191b2c58b2a6e190396e12314530593dca4f2a2bee389ec5175da5e52af8",
+ "ad203ad6a00d8b1315337e34069e7c41016ea407469a536de8ad6807042017fd"
+ ]
+ },
+ "shuttle-jaguar": {
+ "repo": "shuttleai/shuttle-jaguar",
+ "pkg": {
+ "2": {
+ "diffusers": "DiffusionPipeline",
+ "generation": {
+ "guidance_scale": 3.5,
+ "num_inference_steps": 4
+ }
+ }
+ },
+ "file_256": [
+ "dcbc4f2470b177eed12c7d7515c0e7342515a849ebd31a50c8d8d43913d7bd32",
+ "26a7aa64c0798a3549e1d767932da0a7fb82b49f8edcbdcde804a20d9ed1478f"
+ ],
+ "layer_b3": [
+ "9906c29933d0c33a6ee8d9712f33fa8bd4b35b46a1c7b565ae48832b757dd980",
+ "89c453c4bf99220405687eed984dace4492bdae1b6fb08f3d9629145b1a11672"
+ ]
+ }
+ },
+ "info.controlnet.flux1-canny-dev": {
+ "*": {
+ "repo": "black-forest-labs/FLUX.1-Canny-dev",
+ "pkg": {
+ "0": {
+ "diffusers": "FluxControlPipeline"
+ }
+ }
+ }
+ },
+ "info.controlnet.flux1-dev-controlnet-canny": {
+ "*": {
+ "repo": "InstantX/FLUX.1-dev-controlnet-canny",
+ "pkg": {
+ "0": {
+ "diffusers": "FluxControlNetModel"
+ }
+ }
+ }
+ },
+ "info.controlnet.flux1-dev-controlnet-canny-alpha": {
+ "*": {
+ "repo": "InstantX/FLUX.1-dev-Controlnet-Canny-alpha",
+ "pkg": {
+ "0": {
+ "diffusers": "FluxControlNetModel"
+ }
+ }
+ }
+ },
+ "info.dit.flux1-fill-dev": {
+ "*": {
+ "repo": "black-forest-labs/FLUX.1-Fill-dev",
+ "pkg": {
+ "0": {
+ "diffusers": "FluxFillPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.flux1-kontext-dev": {
+ "*": {
+ "repo": "black-forest-labs/FLUX.1-Kontext-dev",
+ "pkg": {
+ "0": {
+ "diffusers": "FluxKontextInpaintPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.flux1-dev": {
+ "*": {
+ "repo": "black-forest-labs/FLUX.1-dev",
+ "pkg": {
+ "0": {
+ "diffusers": "FluxPipeline"
+ }
+ }
+ },
+ "mystic": {
+ "repo": "enhanceaiteam/Mystic",
+ "pkg": {
+ "0": {
+ "generation": {
+ "num_inference_steps": 16,
+ "guidance_scale": 7.5,
+ "width": 768,
+ "height": 1024
+ }
+ }
+ },
+ "file_256": [
+ "179d4000e44295f6dfadc0e4ac210146454724d46371b82657200ff9fb5c68a9",
+ "48ca85274e3b67f07f70dd84b67725e62395c2f7b188394342716f783ea4c6ac"
+ ],
+ "layer_256": [
+ "3942e6a52dbb0abaf63b031d9c4eda0df47576b51d4c81361978a3dc27b1309e"
+ ],
+ "layer_b3": [
+ "91074aaebe1b5f3b2e7755d3c092af7eb240e92a192360690f1033949d3c8a68"
+ ]
+ },
+ "flux1-lite": {
+ "repo": "freepik/flux.1-lite-8b",
+ "pkg": {
+ "0": {
+ "generation": {
+ "num_inference_steps": 28
+ }
+ }
+ },
+ "file_256": [
+ "09e970a7b8d1813ea7cacd48f9a944fd223882b137a8f4f3b61d864cdc20bbec",
+ "de90e69945c2f4afcb9b6a057ce48190905c984370fce76b16ba3b97d46e2747"
+ ],
+ "layer_256": [
+ "e1afe2f9b1ca55b3c659293cf3237f6b5571f5c4e826bad025ff0f7b54dc34ee"
+ ],
+ "layer_b3": [
+ "9276fa4805efeb45c08cca32c5b51d490e57a2ce5c15ef476a8e468a509c5cdf"
+ ]
+ },
+ "f-lite": {
+ "repo": "freepik/f-lite",
+ "pkg": {
+ "0": {
+ "f_lite": "FLitePipeline",
+ "generation": {
+ "num_inference_steps": 28
+ }
+ }
+ }
+ },
+ "f-lite-texture": {
+ "repo": "freepik/f-lite-texture",
+ "pkg": {
+ "0": {
+ "f_lite": "FLitePipeline",
+ "generation": {
+ "num_inference_steps": 28
+ }
+ }
+ }
+ },
+ "flux": {
+ "repo": "TencentARC/flux-mini",
+ "file_256": [
+ "4236455adeaeb4ed444d63b253ec99805022d17e962ed7261ada9c72ce11cfee"
+ ],
+ "layer_256": [
+ "e4a0d8cf2034da094518ab058da1d4aea14e00d132c6152a266ec196ffef02d0"
+ ],
+ "layer_b3": [
+ "c1a6f83585398fe452d20596a79a522e2986f4c2c01a40e7bfd787af113735d3"
+ ]
+ },
+ "flex2": {
+ "repo": "ostris/Flex.2-preview",
+ "file_256": [
+ "0407108e446a4f57efffc5e7518bc374876af970d3c6068dc4074de0d221c615",
+ "df168ba94d5f96c478b24604a6beedff6189047152190509c73c162ea0d8ec02"
+ ],
+ "layer_256": [
+ "5063de856be5365807d12b47ef6919b4ac611a72651739b2b4050e113bed7a83"
+ ],
+ "layer_b3": [
+ "7f85cdc186896da6965b57d5edb672f08663075d2b207f0e20e328c4034a8076"
+ ]
+ },
+ "flex1-alpha": {
+ "repo": "ostris/Flex.1-alpha",
+ "file_256": [
+ "5d6dce30a266ccbf530c3a3bf253cd5486720a8fb71cdeed556c28304201dc2f",
+ "7acf8771b80a91eaa21566abe8c7d9d3ba33d8688e6e98446827749aee7ca1ee"
+ ],
+ "layer_256": [
+ "a6b9af6efc25fa77cd24046b81ee66fea09a9987d2a8e56ffca9b7a1c9c9c519"
+ ],
+ "layer_b3": [
+ "cb3d3edafd81651eefd62894b3572deb02c5304f4b5d4f7ab8654f1fb922ecd6"
+ ]
+ }
+ },
+ "info.dit.prx-512-t2i-sft": {
+ "*": {
+ "repo": "Photoroom/prx-512-t2i-sft",
+ "pkg": {
+ "0": {
+ "diffusers": "PRXPipeline"
+ }
+ }
+ }
+ },
+ "info.unet.audioldm-s-v2": {
+ "*": {
+ "repo": "cvssp/audioldm-s-full-v2",
+ "pkg": {
+ "0": {
+ "diffusers": "AudioLDMPipeline"
+ }
+ }
+ }
+ },
+ "info.unet.audioldm2": {
+ "*": {
+ "repo": "cvssp/audioldm2",
+ "pkg": {
+ "0": {
+ "diffusers": "AudioLDM2Pipeline"
+ }
+ }
+ }
+ },
+ "info.unet.blipdiffusion": {
+ "*": {
+ "repo": "Salesforce/blipdiffusion",
+ "pkg": {
+ "0": {
+ "diffusers": "BlipDiffusionPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.chroma": {
+ "*": {
+ "repo": "lodestones/Chroma",
+ "pkg": {
+ "0": {
+ "diffusers": "ChromaPipeline"
+ }
+ }
+ },
+ "chroma1-hd": {
+ "repo": "lodestones/Chroma1-HD",
+ "pkg": {
+ "0": {
+ "generation": {
+ "num_inference_steps": 40
+ }
+ }
+ },
+ "file_256": [
+ "d845553f11e6afe8139c41ca73678f9f03eab2e68d2e1c6f03ae19509a4d546",
+ "1b2993a44e63b2250496f69edce643bac2fb79833cf92ba8dd95cbd764d970c7",
+ "2dd46f08516246df1f582047cc09268ce4f747357baff05b13148e71519029fc"
+ ]
+ },
+ "chroma1-flash": {
+ "repo": "lodestones/Chroma1-Flash",
+ "pkg": {
+ "0": {
+ "diffusers": "ChromaPipeline",
+ "generation": {
+ "num_inference_steps": 8,
+ "guidance_scale": 1.0,
+ "num_images_per_prompt": 1
+ }
+ }
+ },
+ "file_256": [
+ "2c0c7d908d04418a48b453c293237a9826d54472cf0ba76e28697d1309d1021b",
+ "c88f6794753ba23e8f6bf8c84cf220daa35a6aa16d54ea0c3e0136f52e5da7e1",
+ "c759d67ca3ef50a9a1c242e3291c57f406646f226a95f43f66577996494986db"
+ ]
+ }
+ },
+ "info.dit.chroma1-hd": {
+ "*": {
+ "repo": "lodestones/Chroma1-HD",
+ "pkg": {
+ "0": {
+ "diffusers": "ChromaImg2ImgPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.cogvideox": {
+ "*": {
+ "repo": "zai-org/CogVideoX-2b",
+ "pkg": {
+ "0": {
+ "diffusers": "CogVideoXPipeline"
+ }
+ }
+ }
+ },
+ "info.controlnet.cogvideox-fun-v-pose": {
+ "*": {
+ "repo": "alibaba-pai/CogVideoX-Fun-V1.1-5b-Pose",
+ "pkg": {
+ "0": {
+ "diffusers": "CogVideoXFunControlPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.cogvideox-i2v": {
+ "*": {
+ "repo": "zai-org/CogVideoX-5b-I2V",
+ "pkg": {
+ "0": {
+ "diffusers": "CogVideoXImageToVideoPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.cogview3": {
+ "*": {
+ "repo": "zai-org/CogView3-Plus-3B",
+ "pkg": {
+ "0": {
+ "diffusers": "CogView3PlusPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.cogview4": {
+ "*": {
+ "repo": "zai-org/CogView4-6B",
+ "pkg": {
+ "0": {
+ "diffusers": "CogView4Pipeline"
+ }
+ }
+ }
+ },
+ "info.controlnet.cogview4-control": {
+ "*": {
+ "repo": "zai-org/CogView4-6B-Control",
+ "pkg": {
+ "0": {
+ "diffusers": "CogView4ControlPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.pre-trianed": {
+ "*": {
+ "repo": "model_id, revision=\"diffusers/base/pre-trianed",
+ "pkg": {
+ "0": {
+ "diffusers": "Cosmos2_5_PredictBasePipeline"
+ }
+ }
+ }
+ },
+ "info.dit.cosmos-predict2-text2image": {
+ "*": {
+ "repo": "nvidia/Cosmos-Predict2-2B-Text2Image",
+ "pkg": {
+ "0": {
+ "diffusers": "Cosmos2TextToImagePipeline"
+ }
+ }
+ }
+ },
+ "info.dit.cosmos-predict2-video2world": {
+ "*": {
+ "repo": "nvidia/Cosmos-Predict2-2B-Video2World",
+ "pkg": {
+ "0": {
+ "diffusers": "Cosmos2VideoToWorldPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.cosmos-1-diffusion-text2world": {
+ "*": {
+ "repo": "nvidia/Cosmos-1.0-Diffusion-7B-Text2World",
+ "pkg": {
+ "0": {
+ "diffusers": "CosmosTextToWorldPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.cosmos-1-diffusion-video2world": {
+ "*": {
+ "repo": "nvidia/Cosmos-1.0-Diffusion-7B-Video2World",
+ "pkg": {
+ "0": {
+ "diffusers": "CosmosVideoToWorldPipeline"
+ }
+ }
+ }
+ },
+ "info.unet.if-i-xl-v1": {
+ "*": {
+ "repo": "DeepFloyd/IF-I-XL-v1.0",
+ "pkg": {
+ "0": {
+ "diffusers": "IFPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.easyanimatev5-zh": {
+ "diffusers": {
+ "repo": "alibaba-pai/EasyAnimateV5.1-7b-zh-diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "EasyAnimatePipeline"
+ }
+ }
+ }
+ },
+ "info.controlnet.easyanimatev5-zh-control": {
+ "diffusers": {
+ "repo": "alibaba-pai/EasyAnimateV5.1-12b-zh-Control-diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "EasyAnimateControlPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.easyanimatev5-zh-inp": {
+ "diffusers": {
+ "repo": "alibaba-pai/EasyAnimateV5.1-12b-zh-InP-diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "EasyAnimateInpaintPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.hidream-i1": {
+ "*": {
+ "repo": "HiDream-ai/HiDream-I1-Full",
+ "pkg": {
+ "0": {
+ "diffusers": "HiDreamImagePipeline"
+ }
+ }
+ }
+ },
+ "info.dit.hunyuandit-v1": {
+ "diffusers": {
+ "repo": "tencent-hunyuan/hunyuandiT-v1.2-diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "HunyuanDiTPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.hunyuanvideo": {
+ "*": {
+ "repo": "hunyuanvideo-community/HunyuanVideo",
+ "pkg": {
+ "0": {
+ "diffusers": "HunyuanVideoPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.hunyuanvideo-i2v": {
+ "*": {
+ "repo": "hunyuanvideo-community/HunyuanVideo-I2V",
+ "pkg": {
+ "0": {
+ "diffusers": "HunyuanVideoImageToVideoPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.hunyuanvideo-1-480p-t2v": {
+ "*": {
+ "repo": "hunyuanvideo-community/HunyuanVideo-1.5-480p_t2v",
+ "pkg": {
+ "0": {
+ "diffusers": "HunyuanVideo15Pipeline"
+ }
+ }
+ }
+ },
+ "info.dit.hunyuanvideo-1-480p-i2v": {
+ "*": {
+ "repo": "hunyuanvideo-community/HunyuanVideo-1.5-480p_i2v",
+ "pkg": {
+ "0": {
+ "diffusers": "HunyuanVideo15ImageToVideoPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.hunyuanimage-2": {
+ "diffusers": {
+ "repo": "hunyuanvideo-community/HunyuanImage-2.1-Diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "HunyuanImagePipeline"
+ }
+ }
+ }
+ },
+ "info.dit.hunyuanimage-2-refiner": {
+ "diffusers": {
+ "repo": "hunyuanvideo-community/HunyuanImage-2.1-Refiner-Diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "HunyuanImageRefinerPipeline"
+ }
+ }
+ }
+ },
+ "info.unet.kandinsky-2-1": {
+ "prior": {
+ "repo": "kandinsky-community/kandinsky-2-1-prior",
+ "pkg": {
+ "0": {
+ "diffusers": "KandinskyPriorPipeline"
+ }
+ }
+ }
+ },
+ "info.unet.kandinsky-2-2": {
+ "prior": {
+ "repo": "kandinsky-community/kandinsky-2-2-prior",
+ "pkg": {
+ "0": {
+ "diffusers": "KandinskyPriorPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.latte-1": {
+ "*": {
+ "repo": "maxin-cn/Latte-1",
+ "pkg": {
+ "0": {
+ "diffusers": "LattePipeline"
+ }
+ }
+ }
+ },
+ "info.dit.ltx-video": {
+ "*": {
+ "repo": "Lightricks/LTX-Video",
+ "pkg": {
+ "0": {
+ "diffusers": "LTXImageToVideoPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.ltx-video-09": {
+ "*": {
+ "repo": "Lightricks/LTX-Video-0.9.5",
+ "pkg": {
+ "0": {
+ "diffusers": "LTXConditionPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.lumina-next-sft": {
+ "diffusers": {
+ "repo": "Alpha-VLLM/Lumina-Next-SFT-diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "LuminaPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.lumina-image-2": {
+ "*": {
+ "repo": "Alpha-VLLM/Lumina-Image-2.0",
+ "pkg": {
+ "0": {
+ "diffusers": "Lumina2Pipeline"
+ }
+ }
+ },
+ "illustrious-lumina-v3": {
+ "repo": "OnomaAIResearch/Illustrious-Lumina-v0.03",
+ "file_256": [
+ "dc6cffcfb0ccfca6332ddb5d2fe25bcb5f496f44b481627f48c42626156fa6a8",
+ "2ac549741fa1c6de2d6cd8be06abcdce52d472eeae2439f948e285258b66a214"
+ ],
+ "layer_256": [
+ "39086c199b9ac296dcba53461ba1e113906d91fbc1b12556d92f5cc77ca11f9f",
+ "e51ba2ded40f1af5ca6f78c46eed8305fbd87cd6401e9d439837e10d35cc5828"
+ ],
+ "layer_b3": [
+ "a97b4a63e1e7678e8e7154fae55252267bd1f0ba76b03dba622d801644e657ac",
+ "aa6c1b2d1971cea3c4ed0963c8d68d4c50db683f8eab9f77f60ea2d04ed6ce5c"
+ ]
+ }
+ },
+ "info.dit.longcat-image": {
+ "*": {
+ "repo": "meituan-longcat/LongCat-Image",
+ "pkg": {
+ "0": {
+ "diffusers": "LongCatImagePipeline"
+ }
+ }
+ }
+ },
+ "info.dit.longcat-image-edit": {
+ "*": {
+ "repo": "meituan-longcat/LongCat-Image-Edit",
+ "pkg": {
+ "0": {
+ "diffusers": "LongCatImageEditPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.mochi-1": {
+ "*": {
+ "repo": "genmo/mochi-1-preview",
+ "pkg": {
+ "0": {
+ "diffusers": "MochiPipeline"
+ }
+ }
+ }
+ },
+ "info.unet.musicldm": {
+ "*": {
+ "repo": "ucsd-reach/musicldm",
+ "pkg": {
+ "0": {
+ "diffusers": "MusicLDMPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.omnigen-v1": {
+ "diffusers": {
+ "repo": "Shitao/OmniGen-v1-diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "OmniGenPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.ovis-image": {
+ "*": {
+ "repo": "AIDC-AI/Ovis-Image-7B",
+ "pkg": {
+ "0": {
+ "diffusers": "OvisImagePipeline"
+ }
+ }
+ }
+ },
+ "info.dit.visualclozepipeline-384": {
+ "*": {
+ "repo": "VisualCloze/VisualClozePipeline-384",
+ "pkg": {
+ "0": {
+ "diffusers": "VisualClozeGenerationPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.pixart-xl-2-1024-ms": {
+ "*": {
+ "repo": "PixArt-alpha/PixArt-XL-2-1024-MS",
+ "pkg": {
+ "0": {
+ "diffusers": "PixArtAlphaPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.pixart-sigma-xl-2-1024-ms": {
+ "*": {
+ "repo": "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS",
+ "pkg": {
+ "0": {
+ "diffusers": "PixArtSigmaPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.sana-1024px-bf16": {
+ "diffusers": {
+ "repo": "Efficient-Large-Model/Sana_1600M_1024px_BF16_diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "SanaPipeline"
+ }
+ }
+ }
+ },
+ "info.controlnet.sana-1024px-controlnet": {
+ "diffusers": {
+ "repo": "ishan24/Sana_600M_1024px_ControlNetPlus_diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "SanaControlNetPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.sana-sprint-1024px": {
+ "diffusers": {
+ "repo": "Efficient-Large-Model/Sana_Sprint_1.6B_1024px_diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "SanaSprintPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.sana-video": {
+ "*": {
+ "repo": "Efficient-Large-Model/SANA-Video_2B_480p_diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "SanaImageToVideoPipeline"
+ }
+ }
+ }
+ },
+ "info.unet.shap-e": {
+ "*": {
+ "repo": "openai/shap-e",
+ "pkg": {
+ "0": {
+ "diffusers": "ShapEPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.stable-audio-open-1": {
+ "*": {
+ "repo": "stabilityai/stable-audio-open-1.0",
+ "pkg": {
+ "0": {
+ "diffusers": "StableAudioPipeline"
+ }
+ }
+ }
+ },
+ "info.unet.stable-cascade": {
+ "prior": {
+ "repo": "stabilityai/stable-cascade-prior",
+ "pkg": {
+ "0": {
+ "diffusers": "StableCascadePriorPipeline"
+ }
+ }
+ }
+ },
+ "info.unet.flux1-dev": {
+ "decoder": {
+ "repo": "black-forest-labs/FLUX.1-dev",
+ "pkg": {
+ "0": {
+ "diffusers": "WuerstchenDecoderPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.auraflow": {
+ "*": {
+ "repo": "fal/AuraFlow",
+ "pkg": {
+ "0": {
+ "diffusers": "AuraFlowPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.stable-diffusion-3": {
+ "*": {
+ "repo": "stabilityai/stable-diffusion-3.5-medium",
+ "pkg": {
+ "0": {
+ "diffusers": "StableDiffusion3Pipeline"
+ }
+ }
+ },
+ "stable-diffusion-3-turbo": {
+ "repo": "tensorart/stable-diffusion-3.5-medium-turbo",
+ "pkg": {
+ "0": {
+ "precision": "ops.precision.bfloat.B16",
+ "generation": {
+ "num_inference_steps": 8,
+ "guidance_scale": 1.5,
+ "height": 1024,
+ "width": 768
+ }
+ }
+ },
+ "file_256": [
+ "5b0530e8d71b49fa1358f1208047cd789a40bae5b44406c9524b0f0d88f8b246",
+ "07119c77c3548a1d9eb30923df4dd55ec74914dc5ec81626804dcbe51ce17a5d",
+ "3c379381344d2a2b3ee3d7a1bc97f7d1e58fa95c6b5187fb48b3ce446f99f17b",
+ "6b3806cafdb4303ea2638e9e08eb186067b4a46a95ddf344ccdbe56537afaf6e"
+ ],
+ "layer_256": [
+ "3c324055a1ec6eb4ee0242e344bb2b6356afcbd2e215fdd9d160cda691a72fae",
+ "7284d2027523482af9ef47405667ca891cc518bfb6ebf1f1d4666cb0accc8cd5",
+ "d938ee5738c73f701760ed18acad274b074d2796123aee3f2eee1328b6c36ea4",
+ "c4c40056c2a77959083b5a69a1a4b205caa463ccabde057352c5c4e38b2c67b6"
+ ],
+ "layer_b3": [
+ "873821614080a98e1ebfe56673bc96c2ac57379720d4ad2f97e4bca317571d48",
+ "7284d2027523482af9ef47405667ca891cc518bfb6ebf1f1d4666cb0accc8cd5",
+ "d938ee5738c73f701760ed18acad274b074d2796123aee3f2eee1328b6c36ea4",
+ "c4c40056c2a77959083b5a69a1a4b205caa463ccabde057352c5c4e38b2c67b6"
+ ]
+ }
+ },
+ "info.unet.gligen-1-4-inpainting-text-box": {
+ "*": {
+ "repo": "masterful/gligen-1-4-inpainting-text-box",
+ "pkg": {
+ "0": {
+ "diffusers": "StableDiffusionGLIGENPipeline"
+ }
+ }
+ }
+ },
+ "info.unet.gligen-inpainting-text-image": {
+ "*": {
+ "repo": "anhnct/Gligen_Inpainting_Text_Image",
+ "pkg": {
+ "0": {
+ "diffusers": "StableDiffusionGLIGENTextImagePipeline"
+ }
+ }
+ }
+ },
+ "info.unet.stable-video-diffusion-img2vid-xt": {
+ "*": {
+ "repo": "stabilityai/stable-video-diffusion-img2vid-xt",
+ "pkg": {
+ "0": {
+ "diffusers": "StableVideoDiffusionPipeline"
+ }
+ }
+ }
+ },
+ "info.unet.ldm3d-4c": {
+ "*": {
+ "repo": "Intel/ldm3d-4c",
+ "pkg": {
+ "0": {
+ "diffusers": "StableDiffusionLDM3DPipeline"
+ }
+ }
+ }
+ },
+ "info.unet.i2vgen-xl": {
+ "*": {
+ "repo": "ali-vilab/i2vgen-xl",
+ "pkg": {
+ "0": {
+ "diffusers": "I2VGenXLPipeline"
+ }
+ }
+ }
+ },
+ "info.unet.wuerstchen": {
+ "prior": {
+ "repo": "warp-ai/wuerstchen-prior",
+ "pkg": {
+ "0": {
+ "diffusers": "WuerstchenPriorPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.wan2-t2v": {
+ "diffusers": {
+ "repo": "Wan-AI/Wan2.1-T2V-14B-Diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "WanPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.wan-animate": {
+ "diffusers": {
+ "repo": "Wan-AI/Wan2.2-Animate-14B-Diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "WanAnimatePipeline"
+ }
+ }
+ }
+ },
+ "info.dit.wan2-i2v-480p": {
+ "diffusers": {
+ "repo": "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "WanImageToVideoPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.wan21-vace": {
+ "diffusers": {
+ "repo": "Wan-AI/Wan2.1-VACE-1.3B-diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "WanVACEPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.wan21-t2v": {
+ "diffusers": {
+ "repo": "Wan-AI/Wan2.1-T2V-1.3B-Diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "WanVideoToVideoPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.kandinsky-5-t2v-lite-sft-5s": {
+ "diffusers": {
+ "repo": "kandinskylab/Kandinsky-5.0-T2V-Lite-sft-5s-Diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "Kandinsky5T2VPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.kandinsky-5-i2i-lite-sft": {
+ "diffusers": {
+ "repo": "kandinskylab/Kandinsky-5.0-I2I-Lite-sft-Diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "Kandinsky5I2IPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.kandinsky-5-i2v-sft-5s": {
+ "diffusers": {
+ "repo": "kandinskylab/Kandinsky-5.0-I2V-Pro-sft-5s-Diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "Kandinsky5I2VPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.kandinsky-5-t2i-lite-sft": {
+ "diffusers": {
+ "repo": "kandinskylab/Kandinsky-5.0-T2I-Lite-sft-Diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "Kandinsky5T2IPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.z-image-turbo": {
+ "*": {
+ "repo": "Z-a-o/Z-Image-Turbo",
+ "pkg": {
+ "0": {
+ "diffusers": "ZImageOmniPipeline"
+ }
+ }
+ }
+ },
+ "info.controlnet.z-image-turbo": {
+ "*": {
+ "repo": "Tongyi-MAI/Z-Image-Turbo",
+ "pkg": {
+ "0": {
+ "diffusers": "ZImageControlNetInpaintPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.skyreels-v2-t2v-720p": {
+ "diffusers": {
+ "repo": "Skywork/SkyReels-V2-T2V-14B-720P-Diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "SkyReelsV2Pipeline"
+ }
+ }
+ }
+ },
+ "info.dit.skyreels-v2-df-720p": {
+ "diffusers": {
+ "repo": "Skywork/SkyReels-V2-DF-14B-720P-Diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "SkyReelsV2DiffusionForcingVideoToVideoPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.skyreels-v2-i2v-720p": {
+ "diffusers": {
+ "repo": "Skywork/SkyReels-V2-I2V-14B-720P-Diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "SkyReelsV2ImageToVideoPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.qwen-image": {
+ "*": {
+ "repo": "Qwen/Qwen-Image",
+ "pkg": {
+ "0": {
+ "diffusers": "QwenImageInpaintPipeline"
+ }
+ }
+ }
+ },
+ "info.controlnet.qwen-image-controlnet-union": {
+ "*": {
+ "repo": "InstantX/Qwen-Image-ControlNet-Union",
+ "pkg": {
+ "0": {
+ "diffusers": "QwenImageControlNetModel"
+ }
+ }
+ }
+ },
+ "info.controlnet.qwen-image-controlnet-inpainting": {
+ "*": {
+ "repo": "InstantX/Qwen-Image-ControlNet-Inpainting",
+ "pkg": {
+ "0": {
+ "diffusers": "QwenImageControlNetModel"
+ }
+ }
+ }
+ },
+ "info.dit.qwen-image-edit": {
+ "*": {
+ "repo": "Qwen/Qwen-Image-Edit",
+ "pkg": {
+ "0": {
+ "diffusers": "QwenImageEditInpaintPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.qwen-image-edit-2509": {
+ "*": {
+ "repo": "Qwen/Qwen-Image-Edit-2509",
+ "pkg": {
+ "0": {
+ "diffusers": "QwenImageEditPlusPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.qwen-image-layered": {
+ "*": {
+ "repo": "Qwen/Qwen-Image-Layered",
+ "pkg": {
+ "0": {
+ "diffusers": "QwenImageLayeredPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.chronoedit": {
+ "diffusers": {
+ "repo": "nvidia/ChronoEdit-14B-Diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "ChronoEditPipeline"
+ }
+ }
+ }
+ },
+ "info.unet.kolors": {
+ "diffusers": {
+ "repo": "Kwai-Kolors/Kolors-diffusers",
+ "pkg": {
+ "0": {
+ "diffusers": "KolorsPipeline"
+ }
+ }
+ }
+ },
+ "info.encoder.tokenizer": {
+ "funnel": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "13": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "14": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "nllb-moe": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "deberta-v2-x": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "13": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "14": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "15": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "16": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "17": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "xlm-roberta": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "13": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "14": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "15": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "16": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "17": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "18": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "gpt2": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "13": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "14": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "megatron-bert-uncased": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "blenderbot": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "13": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "14": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "15": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "16": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "17": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "18": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "19": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "20": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "21": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "22": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "23": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "omdet-turbo-swin-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "13": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "14": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "15": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "16": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "mgp-str": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "13": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "14": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "blip2-opt": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "efficient-mlm-m0-0": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "13": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "14": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "15": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "wav2vec2-conformer-rel-pos": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "13": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "14": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "15": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "16": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "17": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "18": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "19": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "wav2vec2-bert-rel-pos": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "13": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "14": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "15": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "16": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "17": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "18": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "19": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "mm-grounding-dino-o365v1-goldg-v3det": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "bert-for-seq-generation-l-24-bbc-encoder": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "13": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "14": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "15": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "16": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "17": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "18": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "19": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "20": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "21": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "22": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "grounding-dino": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ },
+ "xlm-roberta-xl": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "1": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "2": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "3": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "4": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "5": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "6": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "7": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "8": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "9": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "10": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "11": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "12": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "13": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "14": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "15": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "16": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "17": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ },
+ "18": {
+ "transformers": "transformers.utils.import_utils.transformers"
+ }
+ }
+ }
+ },
+ "info.aet.funnel": {
+ "*": {
+ "repo": "funnel-transformer/small",
+ "pkg": {
+ "0": {
+ "transformers": "FunnelModel"
+ }
+ }
+ }
+ },
+ "info.stst.nllb-moe": {
+ "*": {
+ "repo": "facebook/nllb-moe-54b",
+ "pkg": {
+ "0": {
+ "transformers": "NllbMoeModel"
+ }
+ }
+ }
+ },
+ "info.art.deberta-v2-x": {
+ "*": {
+ "repo": "microsoft/deberta-v2-xlarge",
+ "pkg": {
+ "0": {
+ "transformers": "DebertaV2Model"
+ }
+ }
+ }
+ },
+ "info.art.xlm-roberta": {
+ "*": {
+ "repo": "FacebookAI/xlm-roberta-base",
+ "pkg": {
+ "0": {
+ "transformers": "XLMRobertaModel"
+ }
+ }
+ }
+ },
+ "info.art.gpt2": {
+ "*": {
+ "repo": "openai-community/gpt2",
+ "pkg": {
+ "0": {
+ "transformers": "GPT2Model"
+ }
+ }
+ }
+ },
+ "info.art.megatron-bert-uncased": {
+ "*": {
+ "repo": "nvidia/megatron-bert-uncased-345m",
+ "pkg": {
+ "0": {
+ "transformers": "MegatronBertModel"
+ }
+ }
+ }
+ },
+ "info.stst.blenderbot": {
+ "*": {
+ "repo": "facebook/blenderbot_small-90M",
+ "pkg": {
+ "0": {
+ "transformers": "BlenderbotSmallModel"
+ }
+ }
+ }
+ },
+ "info.detr.omdet-turbo-swin-hf": {
+ "*": {
+ "repo": "omlab/omdet-turbo-swin-tiny-hf",
+ "pkg": {
+ "0": {
+ "transformers": "OmDetTurboForObjectDetection"
+ }
+ }
+ }
+ },
+ "info.vit.ast-finetuned-audioset-10-10-0593": {
+ "*": {
+ "repo": "MIT/ast-finetuned-audioset-10-10-0.4593",
+ "pkg": {
+ "0": {
+ "transformers": "ASTModel"
+ }
+ }
+ }
+ },
+ "info.vit.mgp-str": {
+ "*": {
+ "repo": "alibaba-damo/mgp-str-base",
+ "pkg": {
+ "0": {
+ "transformers": "MgpstrForSceneTextRecognition"
+ }
+ }
+ }
+ },
+ "info.vit.blip2-opt": {
+ "*": {
+ "repo": "Salesforce/blip2-opt-2.7b",
+ "pkg": {
+ "0": {
+ "transformers": "Blip2Model"
+ }
+ }
+ }
+ },
+ "info.art.efficient-mlm-m0-0": {
+ "*": {
+ "repo": "andreasmadsen/efficient_mlm_m0.40",
+ "pkg": {
+ "0": {
+ "transformers": "RobertaPreLayerNormModel"
+ }
+ }
+ }
+ },
+ "info.aet.wav2vec2-conformer-rel-pos": {
+ "*": {
+ "repo": "facebook/wav2vec2-conformer-rel-pos-large",
+ "pkg": {
+ "0": {
+ "transformers": "Wav2Vec2ConformerModel"
+ }
+ }
+ }
+ },
+ "info.aet.unispeech-sat-100h-libri-ft": {
+ "*": {
+ "repo": "microsoft/unispeech-sat-base-100h-libri-ft",
+ "pkg": {
+ "0": {
+ "transformers": "UniSpeechSatModel"
+ }
+ }
+ }
+ },
+ "info.detr.table-transformer-detection": {
+ "*": {
+ "repo": "microsoft/table-transformer-detection",
+ "pkg": {
+ "0": {
+ "transformers": "TableTransformerModel"
+ }
+ }
+ }
+ },
+ "info.detr.dab-detr": {
+ "*": {
+ "repo": "IDEA-Research/dab-detr-resnet-50",
+ "pkg": {
+ "0": {
+ "transformers": "DabDetrModel"
+ }
+ }
+ }
+ },
+ "info.aet.wav2vec2-bert-rel-pos": {
+ "*": {
+ "repo": "facebook/w2v-bert-2.0",
+ "pkg": {
+ "0": {
+ "transformers": "Wav2Vec2BertModel"
+ }
+ }
+ }
+ },
+ "info.detr.mm-grounding-dino-o365v1-goldg-v3det": {
+ "*": {
+ "repo": "openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det",
+ "pkg": {
+ "0": {
+ "transformers": "MMGroundingDinoModel"
+ }
+ }
+ }
+ },
+ "info.art.bert-for-seq-generation-l-24-bbc-encoder": {
+ "*": {
+ "repo": "google/bert_for_seq_generation_L-24_bbc_encoder",
+ "pkg": {
+ "0": {
+ "transformers": "BertGenerationEncoder"
+ }
+ }
+ }
+ },
+ "info.detr.grounding-dino": {
+ "*": {
+ "repo": "IDEA-Research/grounding-dino-tiny",
+ "pkg": {
+ "0": {
+ "transformers": "GroundingDinoModel"
+ }
+ }
+ }
+ },
+ "info.art.xlm-roberta-xl": {
+ "*": {
+ "repo": "facebook/xlm-roberta-xl",
+ "pkg": {
+ "0": {
+ "transformers": "XLMRobertaXLModel"
+ }
+ }
+ }
+ },
+ "info.aet.sew-d": {
+ "*": {
+ "repo": "asapp/sew-d-tiny-100k",
+ "pkg": {
+ "0": {
+ "transformers": "SEWDModel"
+ }
+ }
+ }
+ },
+ "ops.precision.uint": {
+ "U8": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "uint8": {
+ "variant": "uint8"
+ }
+ }
+ }
+ }
+ },
+ "U16": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "uint16": {
+ "variant": "uint16"
+ }
+ }
+ }
+ }
+ },
+ "U32": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "uint32": {
+ "variant": "uint32"
+ }
+ }
+ }
+ }
+ },
+ "U64": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "uint64": {
+ "variant": "uint64"
+ }
+ }
+ }
+ }
+ },
+ "U1": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "uint1": {
+ "variant": "uint1"
+ }
+ }
+ }
+ }
+ },
+ "U2": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "uint2": {
+ "variant": "uint2"
+ }
+ }
+ }
+ }
+ },
+ "U3": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "uint3": {
+ "variant": "uint3"
+ }
+ }
+ }
+ }
+ },
+ "U4": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "uint4": {
+ "variant": "uint4"
+ }
+ }
+ }
+ }
+ },
+ "U5": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "uint5": {
+ "variant": "uint5"
+ }
+ }
+ }
+ }
+ },
+ "U6": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "uint6": {
+ "variant": "uint6"
+ }
+ }
+ }
+ }
+ },
+ "U7": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "uint7": {
+ "variant": "uint7"
+ }
+ }
+ }
+ }
+ }
+ },
+ "ops.precision.int": {
+ "I8": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "int8": {
+ "variant": "int8"
+ }
+ }
+ }
+ }
+ },
+ "I16": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "int16": {
+ "variant": "int16"
+ }
+ }
+ }
+ }
+ },
+ "I32": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "int32": {
+ "variant": "int32"
+ }
+ }
+ }
+ }
+ },
+ "I64": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "int64": {
+ "variant": "int64"
+ }
+ }
+ }
+ }
+ },
+ "Q8": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "qint8": {
+ "variant": "qint8"
+ }
+ }
+ }
+ }
+ },
+ "Q32": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "qint32": {
+ "variant": "qint32"
+ }
+ }
+ }
+ }
+ },
+ "I1": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "int1": {
+ "variant": "int1"
+ }
+ }
+ }
+ }
+ },
+ "I2": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "int2": {
+ "variant": "int2"
+ }
+ }
+ }
+ }
+ },
+ "I3": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "int3": {
+ "variant": "int3"
+ }
+ }
+ }
+ }
+ },
+ "I4": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "int4": {
+ "variant": "int4"
+ }
+ }
+ }
+ }
+ },
+ "I5": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "int5": {
+ "variant": "int5"
+ }
+ }
+ }
+ }
+ },
+ "I6": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "int6": {
+ "variant": "int6"
+ }
+ }
+ }
+ }
+ },
+ "I7": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "int7": {
+ "variant": "int7"
+ }
+ }
+ }
+ }
+ }
+ },
+ "ops.precision.float": {
+ "F16": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "float16": {
+ "variant": "fp16"
+ }
+ }
+ }
+ }
+ },
+ "F32": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "float32": {
+ "variant": "fp32"
+ }
+ }
+ }
+ }
+ },
+ "F64": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "float64": {
+ "variant": "fp64"
+ }
+ }
+ }
+ }
+ },
+ "F8_E5M2": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "float8_e5m2": {
+ "variant": "fp8_e5m2"
+ }
+ }
+ }
+ }
+ },
+ "F8_E4M3": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "float8_e4m3fn": {
+ "variant": "fp8_e4m3fn"
+ }
+ }
+ }
+ }
+ },
+ "F8_E5M2FNUZ": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "float8_e5m2fnuz": {
+ "variant": "fp8_e5m2fnuz"
+ }
+ }
+ }
+ }
+ },
+ "F8_E4M3FNUZ": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "float8_e4m3fnuz": {
+ "variant": "fp8_e4m3fnuz"
+ }
+ }
+ }
+ }
+ },
+ "F8_E8M0FNU": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "float8_e8m0fnu": {
+ "variant": "fp8_e8m0fnu"
+ }
+ }
+ }
+ }
+ },
+ "F8_E2M1": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "float4_e2m1fn_x2": {
+ "variant": "fp4_e2m1fn_x2"
+ }
+ }
+ }
+ }
+ }
+ },
+ "ops.precision.complex": {
+ "C32": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "complex32": {
+ "variant": "complex32"
+ }
+ }
+ }
+ }
+ },
+ "C64": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "complex64": {
+ "variant": "complex64"
+ }
+ }
+ }
+ }
+ },
+ "C128": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "complex128": {
+ "variant": "complex128"
+ }
+ }
+ }
+ }
+ }
+ },
+ "ops.precision.bool": {
+ "Bbool": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "bool": {
+ "variant": "bool"
+ }
+ }
+ }
+ }
+ }
+ },
+ "ops.precision.quint": {
+ "Q8": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "quint8": {
+ "variant": "quint8"
+ }
+ }
+ }
+ }
+ },
+ "Q4x2": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "quint4x2": {
+ "variant": "quint4x2"
+ }
+ }
+ }
+ }
+ },
+ "Q2x4": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "quint2x4": {
+ "variant": "quint2x4"
+ }
+ }
+ }
+ }
+ }
+ },
+ "ops.precision.bfloat": {
+ "B16": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "bfloat16": {
+ "variant": "bf16"
+ }
+ }
+ }
+ }
+ }
+ },
+ "ops.precision.bits": {
+ "B1x8": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "bits1x8": {
+ "variant": "bits1x8"
+ }
+ }
+ }
+ }
+ },
+ "B2x4": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "bits2x4": {
+ "variant": "bits2x4"
+ }
+ }
+ }
+ }
+ },
+ "B4x2": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "bits4x2": {
+ "variant": "bits4x2"
+ }
+ }
+ }
+ }
+ },
+ "B8": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "bits8": {
+ "variant": "bits8"
+ }
+ }
+ }
+ }
+ },
+ "B16": {
+ "pkg": {
+ "0": {
+ "torch": {
+ "bits16": {
+ "variant": "bits16"
+ }
+ }
+ }
+ }
+ }
+ },
+ "ops.scheduler.amused": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "AmusedScheduler",
+ "module_path": "diffusers.schedulers.scheduling_amused"
+ }
+ }
+ }
+ },
+ "ops.scheduler.cmstochasticiterative": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "CMStochasticIterativeScheduler",
+ "module_path": "diffusers.schedulers.scheduling_consistency_models"
+ }
+ }
+ }
+ },
+ "ops.scheduler.cogvideoxddim": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "CogVideoXDDIMScheduler",
+ "module_path": "diffusers.schedulers.scheduling_ddim_cogvideox"
+ }
+ }
+ }
+ },
+ "ops.scheduler.cogvideoxdpm": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "CogVideoXDPMScheduler",
+ "module_path": "diffusers.schedulers.scheduling_dpm_cogvideox"
+ }
+ }
+ }
+ },
+ "ops.scheduler.ddiminverse": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "DDIMInverseScheduler",
+ "module_path": "diffusers.schedulers.scheduling_ddim_inverse"
+ }
+ }
+ }
+ },
+ "ops.scheduler.ddimparallel": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "DDIMParallelScheduler",
+ "module_path": "diffusers.schedulers.scheduling_ddim_parallel"
+ }
+ }
+ }
+ },
+ "ops.scheduler.ddim": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "DDIMScheduler",
+ "module_path": "diffusers.schedulers.scheduling_ddim"
+ }
+ }
+ }
+ },
+ "ops.scheduler.ddpmparallel": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "DDPMParallelScheduler",
+ "module_path": "diffusers.schedulers.scheduling_ddpm_parallel"
+ }
+ }
+ }
+ },
+ "ops.scheduler.ddpm": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "DDPMScheduler",
+ "module_path": "diffusers.schedulers.scheduling_ddpm"
+ }
+ }
+ }
+ },
+ "ops.scheduler.ddpmwuerstchen": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "DDPMWuerstchenScheduler",
+ "module_path": "diffusers.schedulers.scheduling_ddpm_wuerstchen"
+ }
+ }
+ }
+ },
+ "ops.scheduler.deis": {
+ "multistep": {
+ "pkg": {
+ "0": {
+ "diffusers": "DEISMultistepScheduler",
+ "module_path": "diffusers.schedulers.scheduling_deis_multistep"
+ }
+ }
+ }
+ },
+ "ops.scheduler.dpminverse": {
+ "multistep": {
+ "pkg": {
+ "0": {
+ "diffusers": "DPMSolverMultistepInverseScheduler",
+ "module_path": "diffusers.schedulers.scheduling_dpmsolver_multistep_inverse"
+ }
+ }
+ }
+ },
+ "ops.scheduler.dpm": {
+ "multistep": {
+ "pkg": {
+ "0": {
+ "diffusers": "DPMSolverMultistepScheduler",
+ "module_path": "diffusers.schedulers.scheduling_dpmsolver_multistep"
+ }
+ }
+ }
+ },
+ "ops.scheduler.dpmsinglestep": {
+ "solver": {
+ "pkg": {
+ "0": {
+ "diffusers": "DPMSolverSinglestepScheduler",
+ "module_path": "diffusers.schedulers.scheduling_dpmsolver_singlestep"
+ }
+ }
+ }
+ },
+ "ops.scheduler.edmdpm": {
+ "multistep": {
+ "pkg": {
+ "0": {
+ "diffusers": "EDMDPMSolverMultistepScheduler",
+ "module_path": "diffusers.schedulers.scheduling_edm_dpmsolver_multistep"
+ }
+ }
+ }
+ },
+ "ops.scheduler.edmeuler": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "EDMEulerScheduler",
+ "module_path": "diffusers.schedulers.scheduling_edm_euler"
+ }
+ }
+ }
+ },
+ "ops.scheduler.eulerancestral": {
+ "discrete": {
+ "pkg": {
+ "0": {
+ "diffusers": "EulerAncestralDiscreteScheduler",
+ "module_path": "diffusers.schedulers.scheduling_euler_ancestral_discrete"
+ }
+ }
+ }
+ },
+ "ops.scheduler.euler": {
+ "discrete": {
+ "pkg": {
+ "0": {
+ "diffusers": "EulerDiscreteScheduler",
+ "module_path": "diffusers.schedulers.scheduling_euler_discrete"
+ }
+ }
+ }
+ },
+ "ops.scheduler.flowmatcheuler": {
+ "discrete": {
+ "pkg": {
+ "0": {
+ "diffusers": "FlowMatchEulerDiscreteScheduler",
+ "module_path": "diffusers.schedulers.scheduling_flow_match_euler_discrete"
+ }
+ }
+ }
+ },
+ "ops.scheduler.flowmatchheun": {
+ "discrete": {
+ "pkg": {
+ "0": {
+ "diffusers": "FlowMatchHeunDiscreteScheduler",
+ "module_path": "diffusers.schedulers.scheduling_flow_match_heun_discrete"
+ }
+ }
+ }
+ },
+ "ops.scheduler.flowmatchlcm": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "FlowMatchLCMScheduler",
+ "module_path": "diffusers.schedulers.scheduling_flow_match_lcm"
+ }
+ }
+ }
+ },
+ "ops.scheduler.heun": {
+ "discrete": {
+ "pkg": {
+ "0": {
+ "diffusers": "HeunDiscreteScheduler",
+ "module_path": "diffusers.schedulers.scheduling_heun_discrete"
+ }
+ }
+ }
+ },
+ "ops.scheduler.ipndm": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "IPNDMScheduler",
+ "module_path": "diffusers.schedulers.scheduling_ipndm"
+ }
+ }
+ }
+ },
+ "ops.scheduler.karrasve": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "KarrasVeScheduler",
+ "module_path": "diffusers.schedulers.deprecated.scheduling_karras_ve"
+ }
+ }
+ }
+ },
+ "ops.scheduler.kdpm2ancestral": {
+ "discrete": {
+ "pkg": {
+ "0": {
+ "diffusers": "KDPM2AncestralDiscreteScheduler",
+ "module_path": "diffusers.schedulers.scheduling_k_dpm_2_ancestral_discrete"
+ }
+ }
+ }
+ },
+ "ops.scheduler.kdpm2": {
+ "discrete": {
+ "pkg": {
+ "0": {
+ "diffusers": "KDPM2DiscreteScheduler",
+ "module_path": "diffusers.schedulers.scheduling_k_dpm_2_discrete"
+ }
+ }
+ }
+ },
+ "ops.scheduler.lcm": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "LCMScheduler",
+ "module_path": "diffusers.schedulers.scheduling_lcm"
+ }
+ }
+ }
+ },
+ "ops.scheduler.pndm": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "PNDMScheduler",
+ "module_path": "diffusers.schedulers.scheduling_pndm"
+ }
+ }
+ }
+ },
+ "ops.scheduler.repaint": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "RePaintScheduler",
+ "module_path": "diffusers.schedulers.scheduling_repaint"
+ }
+ }
+ }
+ },
+ "ops.scheduler.sa": {
+ "solver": {
+ "pkg": {
+ "0": {
+ "diffusers": "SASolverScheduler",
+ "module_path": "diffusers.schedulers.scheduling_sasolver"
+ }
+ }
+ }
+ },
+ "ops.scheduler.scm": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "SCMScheduler",
+ "module_path": "diffusers.schedulers.scheduling_scm"
+ }
+ }
+ }
+ },
+ "ops.scheduler.scoresdeve": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "ScoreSdeVeScheduler",
+ "module_path": "diffusers.schedulers.scheduling_sde_ve"
+ }
+ }
+ }
+ },
+ "ops.scheduler.tcd": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "TCDScheduler",
+ "module_path": "diffusers.schedulers.scheduling_tcd"
+ }
+ }
+ }
+ },
+ "ops.scheduler.unclip": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "UnCLIPScheduler",
+ "module_path": "diffusers.schedulers.scheduling_unclip"
+ }
+ }
+ }
+ },
+ "ops.scheduler.unipc": {
+ "multistep": {
+ "pkg": {
+ "0": {
+ "diffusers": "UniPCMultistepScheduler",
+ "module_path": "diffusers.schedulers.scheduling_unipc_multistep"
+ }
+ }
+ }
+ },
+ "ops.scheduler.vqdiffusion": {
+ "scheduler": {
+ "pkg": {
+ "0": {
+ "diffusers": "VQDiffusionScheduler",
+ "module_path": "diffusers.schedulers.scheduling_vq_diffusion"
+ }
+ }
+ }
+ },
+ "ops.scheduler.karrasdiffusion": {
+ "schedulers": {
+ "pkg": {
+ "0": {
+ "diffusers": "KarrasDiffusionSchedulers",
+ "module_path": "diffusers.schedulers.scheduling_utils"
+ }
+ }
+ }
+ },
+ "info.lora.dmd": {
+ "stable-diffusion-xl-1": {
+ "repo": "tianweiy/DMD2",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ },
+ "generation": {
+ "num_inference_steps": 4,
+ "guidance_scale": 0,
+ "timesteps": [
+ 999,
+ 749,
+ 499,
+ 249
+ ]
+ },
+ "scheduler": {
+ "ops.scheduler.lcm": ""
+ }
+ }
+ },
+ "file_256": [
+ "b3d9173815a4b595991c3a7a0e0e63ad821080f314a0b2a3cc31ecd7fcf2cbb8",
+ "a374289e9446d7f14d2037c4b3770756b7b52c292142a691377c3c755010a1bb"
+ ]
+ }
+ },
+ "info.lora.dpo": {
+ "stable-diffusion-xl-1": {
+ "repo": "radames/sdxl-DPO-LoRA",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ },
+ "generation": {
+ "guidance_scale": 7.5,
+ "num_inference_steps": 4
+ },
+ "scheduler": {
+ "ops.scheduler.dpm": {
+ "algorithm_type": "sde-dpmsolver++",
+ "use_karras_sigmas": true,
+ "order": 2
+ }
+ }
+ }
+ },
+ "file_256": [
+ "666f71a833fc41229ec7e8a264fb7b0fcb8bf47a80e366ae7486c18f38ec9fc0",
+ "6b1dcbfb234d7b6000948b5b95ccebc8f903450ce2ba1b50bc3456987c9087ad"
+ ]
+ }
+ },
+ "info.lora.flash": {
+ "stable-diffusion-xl-1": {
+ "repo": "jasperai/flash-sdxl",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ },
+ "scheduler": "ops.scheduler.lcm"
+ }
+ },
+ "file_256": [
+ "afe2ca6e27c4c6087f50ef42772c45d7b0efbc471b76e422492403f9cae724d7"
+ ]
+ },
+ "pixart-alpha": {
+ "repo": "jasperai/flash-pixart",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ }
+ }
+ },
+ "file_256": [
+ "99ef037fe3c1fb6d6bbefdbb85ad60df434fcc0577d34c768d752d60cf69681b"
+ ]
+ },
+ "stable-diffusion-3": {
+ "repo": "jasperai/flash-sd3",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ }
+ }
+ },
+ "file_256": [
+ "85fce13c36e3739aa42930f745eb9fceb6c53d53fb17e2a687e3234c1a58ee15"
+ ]
+ },
+ "stable-diffusion-v1-5": {
+ "repo": "jasperai/flash-sd",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ },
+ "generation": {
+ "num_inference_steps": 4,
+ "guidance_scale": 0
+ }
+ }
+ },
+ "file_256": [
+ "99353444c1a0f40719a1b3037049dbd24800317979a73c312025c05af3574a5f"
+ ]
+ }
+ },
+ "info.lora.hyper": {
+ "stable-diffusion-xl-1": {
+ "repo": "ByteDance/Hyper-SD",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {
+ "fuse": 1.0
+ }
+ }
+ }
+ },
+ "file_256": {
+ "0b97f447b5878323a28fbe7c51ba7acebd21f4d77552ba77b04b11c8911825b6": {
+ "num_inference_steps": 12
+ },
+ "55b51334c85061afff5eff7c550b61963c8b8607a5868bbe4f26db49374719b1": {
+ "num_inference_steps": 8
+ },
+ "c912df184c5116792d2c604d26c6bc2aa916685f4a793755255cda1c43a3c78a": {
+ "num_inference_steps": 1,
+ "guidance_scale": 0.0
+ },
+ "69b25c0187ced301c3603c599c0bc509ac99b8ac34db89a2aecc3d5f77a35187": {
+ "num_inference_steps": 2,
+ "guidance_scale": 0.0
+ },
+ "12f81a27d00a751a40d68fd15597091896c5a90f3bd632fb6c475607cbdad76e": {
+ "num_inference_steps": 4,
+ "guidance_scale": 0.0
+ },
+ "ca689190e8c46038550384b5675488526cfe5a40d35f82b27acb75c100f417c1": {
+ "num_inference_steps": 8,
+ "guidance_scale": 0.0
+ }
+ }
+ },
+ "flux1-dev": {
+ "repo": "ByteDance/Hyper-SD",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {
+ "fuse": 0.125
+ }
+ }
+ }
+ },
+ "file_256": {
+ "6461f67dfc1a967ae60344c3b3f350877149ccab758c273cc37f5e8a87b5842e": {
+ "num_inference_steps": 16,
+ "guidance_scale": 0.0
+ },
+ "e0ab0fdf569cd01a382f19bd87681f628879dea7ad51fe5a3799b6c18c7b2d03": {
+ "num_inference_steps": 8,
+ "guidance_scale": 0.0
+ }
+ }
+ },
+ "stable-diffusion-3": {
+ "repo": "ByteDance/Hyper-SD",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {
+ "fuse": 0.125
+ }
+ }
+ }
+ },
+ "file_256": {
+ "5b4d0b99d58deb811bdbbe521a06f4dbf56a2e9148ff3211c594e0502b656bc9": {
+ "num_inference_steps": 16
+ },
+ "0ee4e529abd17b06d4295e3bb91c0d4ddae393afad86b2b43c4f5eeb9e401602": {
+ "num_inference_steps": 4
+ },
+ "fc6a3e73e14ed11e21e4820e960d7befcffe7e333850ada9545f239e9aa6027e": {
+ "num_inference_steps": 8
+ }
+ }
+ },
+ "stable-diffusion-v1-5": {
+ "repo": "ByteDance/Hyper-SD",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ }
+ }
+ },
+ "file_256": {
+ "64b98437383537cd968fda6f87a05c33160ece9c79ff4757949a1e212ff78361": {
+ "num_inference_steps": 12
+ },
+ "f6123d5b950d5250ab6c33600e27f4dcf71b3099ebf888685e01e9e8117ce482": {
+ "num_inference_steps": 8
+ },
+ "a04fd9a535c1e56d38f7590ee72a13fd5ca0409853b4fff021e5a9482cf1ca3b": {
+ "num_inference_steps": 1,
+ "guidance_scale": 0.0
+ },
+ "2f26dcc1d883feb07557a552315baae2ca2a04ac08556b08a355a244547e8c3a": {
+ "num_inference_steps": 2,
+ "guidance_scale": 0.0
+ },
+ "c5dd058616461ed5053e2b14eec4dbe3fa0eea3b13688642f6d6c80ea2ba5958": {
+ "num_inference_steps": 4,
+ "guidance_scale": 0.0
+ },
+ "91fc3186236e956d64dbb4357f2e120c69b968b78af7d2db9884a5ca74d3cd13": {
+ "num_inference_steps": 8,
+ "guidance_scale": 0.0
+ }
+ }
+ }
+ },
+ "info.lora.lcm": {
+ "stable-diffusion-xl-1": {
+ "repo": "latent-consistency/lcm-lora-sdxl",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {
+ "fuse": 1.0
+ }
+ },
+ "scheduler": {
+ "ops.scheduler.lcm": {
+ "timestep_spacing": "trailing"
+ }
+ },
+ "generation": {
+ "num_inference_steps": 8
+ }
+ }
+ },
+ "file_256": [
+ "a764e6859b6e04047cd761c08ff0cee96413a8e004c9f07707530cd776b19141"
+ ]
+ },
+ "ssd": {
+ "repo": "latent-consistency/lcm-lora-ssd-1b",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ },
+ "generation": {
+ "num_inference_steps": 8
+ }
+ }
+ },
+ "file_256": [
+ "7adaaa69db6f011058a19fd1d5315fdf19ef79fcd513cdab30e173833fd5c59b"
+ ]
+ },
+ "segmind-vega": {
+ "repo": "segmind/Segmind-VegaRT",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ },
+ "gen_kwargs": {
+ "num_inference_steps": 8
+ }
+ }
+ },
+ "file_256": [
+ "9b6e8cd833fa205eaeeed391ca623a6f2546e447470bd1c5dcce3fa8d2f26afb"
+ ]
+ },
+ "stable-diffusion-v1-5": {
+ "repo": "latent-consistency/lcm-lora-sdv1-5",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ },
+ "generation": {
+ "num_inference_steps": 8
+ }
+ }
+ },
+ "file_256": [
+ "8f90d840e075ff588a58e22c6586e2ae9a6f7922996ee6649a7f01072333afe4"
+ ]
+ }
+ },
+ "info.lora.lightning": {
+ "stable-diffusion-xl-1": {
+ "repo": "ByteDance/SDXL-Lightning",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ },
+ "generation": {
+ "num_inference_steps": 4,
+ "guidance_scale": 0
+ }
+ }
+ }
+ }
+ },
+ "info.lora.pcm": {
+ "stable-diffusion-xl-1": {
+ "repo": "wangfuyun/PCM_Weights",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ }
+ }
+ },
+ "file_256": {
+ "0365f6107250a4fed1b83e8ae6a070065e026a2ba54bff65f55a50284232bbe6": {
+ "num_inference_steps": 4,
+ "guidance_scale": 0.0
+ },
+ "04ea827435d5750e63d113dc509174b4f6e8a069ff8f91970c3d25299c10b1f8": {
+ "num_inference_steps": 16
+ },
+ "7eb353b2abcaabab6251ba4e17d6cbe2e763feb0674b0f950555552212b44621": {
+ "num_inference_steps": 16
+ },
+ "a85cf70ac16ed42011630a5cd6b5927722cb7c40a2107eff85e2670f9a38c893": {
+ "num_inference_steps": 4
+ },
+ "9f7f13bb019925eacd89aeff678e4fd831f7b60245b986855dff6634aee4eba9": {
+ "num_inference_steps": 4
+ },
+ "3b9c970a3e4c0e182931e71b3f769c1956f16c6b06db98b4d67236790d4d0b1d": {
+ "num_inference_steps": 8
+ },
+ "7f04ba8911b4c25ef2c7cbf74abcb6daa3b4f0e4bc6a03896bdae7601f2f180b": {
+ "num_inference_steps": 8
+ },
+ "13fb038025ce9dad93b8ee1b67fc81bac8affb59a77b67d408d286e0b0365a1d": {
+ "num_inference_steps": 16,
+ "guidance_scale": 0.0
+ },
+ "3442eff271aa3b60a094fd6f9169d03e49e4051044a974f6fcf690507959191f": {
+ "num_inference_steps": 16,
+ "guidance_scale": 0.0
+ },
+ "242cbe4695fe3f2e248faa71cf53f2ccbf248a316973e4b2f38ab9e34f35a5ab": {
+ "num_inference_steps": 2,
+ "guidance_scale": 0.0
+ },
+ "e1f600491bb8e0cd94f41144321e44fdb2cb346447f31e71f6e53f1c24cccfbf": {
+ "num_inference_steps": 2,
+ "guidance_scale": 0.0
+ },
+ "d0bf40a7f280829195563486bec7253f043a06b1f218602b20901c367641023e": {
+ "num_inference_steps": 4,
+ "guidance_scale": 0.0
+ },
+ "212150d7953627fb89df99aad579d6763645a1cb2ef26b19fee8b398d5e5ff4d": {
+ "num_inference_steps": 4,
+ "guidance_scale": 0.0
+ },
+ "e80fcf46d15f4d3821d3d9611bdb3022a4a8b647b2536833b168d317a91e4f74": {
+ "num_inference_steps": 8,
+ "guidance_scale": 0.0
+ },
+ "56ed9dc9f51f4bb0d6172e13b7947f215c347fc0da341c8951b2c12b9507d09e": {
+ "num_inference_steps": 8,
+ "guidance_scale": 0.0
+ }
+ }
+ },
+ "stable-diffusion-v1-5": {
+ "repo": "wangfuyun/PCM_Weights",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ }
+ }
+ },
+ "file_256": {
+ "b80b27dd6504f1c3a7637237dda86bc7e26fa5766da30c4fc853c0a1d46bad31": {
+ "num_inference_steps": 4,
+ "guidance_scale": 0.0
+ },
+ "8f605ffde3616592deb37ed8c6bacb83fe98963c1fd0883c2a4f93787098aa45": {
+ "num_inference_steps": 16
+ },
+ "fa6acb94f11dba3bf4120af5a12e3c88cd2b9572d43ec1a6fb04eede9f32829e": {
+ "num_inference_steps": 4
+ },
+ "bff3d4499718b61455b0757b5f8d98fe23e73a768b538c82ecf91c693b69dbcd": {
+ "num_inference_steps": 8
+ },
+ "c7ac2fa3df3a5b7080ebe63f259ab13630014f104c93c3c706d77b05cc48506b": {
+ "num_inference_steps": 16,
+ "guidance_scale": 0.0
+ },
+ "4c5f27a727d12146de4b1d987cee3343bca89b085d12b03c45297af05ce88ef4": {
+ "num_inference_steps": 2,
+ "guidance_scale": 0.0
+ },
+ "29278bc86274fdfc840961e3c250758ff5e2dc4666d940f103e78630d5b879d3": {
+ "num_inference_steps": 4,
+ "guidance_scale": 0.0
+ },
+ "41a7f0b966d18f643d16c4401f0b5ef6b9ef7362c20e17128322f17874709107": {
+ "num_inference_steps": 8,
+ "guidance_scale": 0.0
+ }
+ }
+ },
+ "stable-diffusion-3": {
+ "repo": "wangfuyun/PCM_Weights",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ }
+ }
+ },
+ "file_256": {
+ "8a45878ecc34e53855fe21146cb6ef32682053b7c4eacc013be89fb08c4c19d8": {
+ "num_inference_steps": 2,
+ "guidance_scale": 1.2
+ },
+ "9444a5cead551c56c4d1c455ce829ba9f96f01fbcca31294277e0862a6a15b76": {
+ "num_inference_steps": 4,
+ "guidance_scale": 1.2
+ },
+ "e365902c208cbc0456ca5e7c41a490f637c15f3f7b98691cbba21f96a8c960b4": {
+ "num_inference_steps": 4,
+ "guidance_scale": 1.2
+ },
+ "3550fa018cd0b60d9e36ac94c31b30f27e402d3855ed63e47668bb181b35a0ad": {
+ "num_inference_steps": 4,
+ "guidance_scale": 1.2
+ }
+ }
+ }
+ },
+ "info.lora.slam": {
+ "stable-diffusion-xl-1": {
+ "repo": "alimama-creative/slam-lora-sdxl",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ },
+ "scheduler": {
+ "ops.scheduler.lcm": {
+ "timestep_spacing": "trailing"
+ }
+ },
+ "generation": {
+ "num_inference_steps": 4,
+ "guidance_scale": 1
+ }
+ }
+ },
+ "file_256": [
+ "22569a946b0db645aa3b8eb782c674c8e726a7cc0d655887c21fecf6dfe6ad91"
+ ]
+ },
+ "stable-diffusion-v1-5": {
+ "repo": "alimama-creative/slam-sd1.5",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ }
+ }
+ }
+ }
+ },
+ "info.lora.spo": {
+ "stable-diffusion-xl-1": {
+ "repo": "SPO-Diffusion-Models/SPO-SDXL_4k-p_10ep_LoRA",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ },
+ "generation": {
+ "guidance_scale": 5.0
+ }
+ }
+ },
+ "file_256": [
+ "0b9896f30d29daa5eedcfc9e7ad03304df6efc5114508f6ca9c328c0b4f057df"
+ ]
+ },
+ "stable-diffusion-v1-5": {
+ "repo": "SPO-Diffusion-Models/SPO-SD-v1-5_4k-p_10ep_LoRA",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ },
+ "generation": {
+ "guidance_scale": 7.5
+ }
+ }
+ },
+ "file_256": [
+ "1be130c5be2de0beacadd3bf0bafe3bedd7e7a380729932a1e369fb29efa86f4"
+ ]
+ }
+ },
+ "info.lora.tcd": {
+ "stable-diffusion-xl-1": {
+ "repo": "h1t/TCD-SDXL-LoRA",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ },
+ "generation": {
+ "num_inference_steps": 4,
+ "guidance_scale": 0,
+ "eta": 0.3
+ },
+ "scheduler": {
+ "ops.scheduler.tcd": {}
+ }
+ }
+ },
+ "file_256": [
+ "2c777bc60abf41d3eb0fe405d23d73c280a020eea5adf97a82a141592c33feba"
+ ]
+ },
+ "stable-diffusion-v1-5": {
+ "repo": "h1t/TCD-SD15-LoRA",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {}
+ }
+ }
+ },
+ "file_256": [
+ "eaecb24a1cda4411eab67275b1d991071216ac93693e8fa0c9226c9df0386232"
+ ],
+ "layer_256": [
+ "e9825b81bca684126ac3cc8867d2ebc655f74268bc26bea4e4b7e58a52ad6c75"
+ ],
+ "layer_b3": [
+ "90158259812a89beb8874216009c799f420334aac49bbf4fa1bf0ebf4bbd256b"
+ ]
+ }
+ },
+ "info.lora.turbo": {
+ "stable-diffusion-xl-1": {
+ "file_256": [
+ "a599c42a9f4f7494c7f410dbc0fd432cf0242720509e9d52fa41aac7a88d1b69"
+ ]
+ },
+ "flux1-dev": {
+ "repo": "alimama-creative/FLUX.1-Turbo-Alpha",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {
+ "fuse": 0.125
+ }
+ },
+ "generation": {
+ "guidance_scale": 3.5,
+ "num_inference_steps": 8,
+ "max_sequence_length": 512
+ }
+ }
+ },
+ "file_256": [
+ "77f7523a5e9c3da6cfc730c6b07461129fa52997ea06168e9ed5312228aa0bff"
+ ]
+ },
+ "stable-diffusion-3": {
+ "repo": "tensorart/stable-diffusion-3.5-large-TurboX",
+ "pkg": {
+ "0": {
+ "diffusers": {
+ "load_lora_weights": {
+ "fuse": 1.0
+ }
+ },
+ "scheduler": {
+ "ops.scheduler.flow-match": {
+ "shift": 5
+ }
+ }
+ }
+ },
+ "file_256": {
+ "fae59d1b749c0d14a8fd4c68cc94eaac92876cee7b91fa75cf8fde3160e09548": {
+ "num_inference_steps": "8"
+ }
+ }
+ }
+ },
+ "info.art.audiogen": {
+ "*": {
+ "repo": "facebook/audiogen-medium",
+ "pkg": {
+ "0": {
+ "audiocraft": "models.AudioGen",
+ "generation": {
+ "duration": 5
+ },
+ "stage_2": {
+ "audiocraft": ".data.audioaudio_write",
+ "generation": {
+ "strategy": "loudness",
+ "loudness_compressor": true
+ }
+ }
+ }
+ }
+ }
+ },
+ "info.art.parler-tts-v1": {
+ "*": {
+ "repo": "parler-tts/parler-tts-large-v1",
+ "pkg": {
+ "0": {
+ "parler_tts": "ParlerTTSForConditionalGeneration",
+ "generation": {
+ "return_tensors": "pt"
+ }
+ }
+ }
+ }
+ },
+ "info.gan.snac-st": {
+ "*": {
+ "repo": "Zuellni/snac-24khz-ST",
+ "pkg": {
+ "0": {
+ "snac": "SNAC"
+ },
+ "1": {
+ "mlx_audio": "tts.generate.generate_audio"
+ }
+ },
+ "file_256": [
+ "e61ae2f638f56ee07a37592cd5a6a9e7d642560ddc78a76ee4a7f96d6922f1be",
+ "973ee1be4032319fd9685ec54eee1b93e79c7bc98c786e67f17c04669714f11d"
+ ],
+ "layer_256": [
+ "35ba9aa1feb931010559a178fcac243673d2efdd1396a4b69d406c9853a88300",
+ "5a22c4707ed6c928043f23b59f2d102a579db3a9af41cf6e60d7c3958f182841"
+ ],
+ "layer_b3": [
+ "18307b00460a64cc4893f9061592ce8d7e15b70fc54065cc8ae0f0155381ec46",
+ "d599b1bb36dee3cee4674b7922fcd69e5ec05b74413f611d21cfdfdf8f9b6119"
+ ]
+ }
+ },
+ "info.gan.kokoro": {
+ "*": {
+ "repo": "hexgrad/Kokoro-82M",
+ "pkg": {
+ "0": {
+ "kokoro": "KPipeline"
+ },
+ "1": {
+ "mlx_audio": "tts.generate.generate_audio",
+ "generation": {
+ "audio_format": "wav",
+ "join_audio": true,
+ "verbose": false
+ }
+ }
+ },
+ "file_256": [
+ "5a5cb3d87478f2e74dfca208ee52209ccfce024095e137097fd276026506e45f",
+ "496dba118d1a58f5f3db2efc88dbdc216e0483fc89fe6e47ee1f2c53f18ad1e4"
+ ],
+ "layer_256": [
+ "dbedf0e2115aa309b92689f86534be4a77b91d7900365e1717879fbb19b849f6",
+ "2c68574571b3f9229e015a909788116ea2251142e29c1bd5c687863192124e8b"
+ ],
+ "layer_b3": [
+ "3e9b5017cfe67a7804ac717b18b6add42ffc0bd3353490df2bcc520eaaef79b6",
+ "379660a87a64524bab69a267e3d9580f04b5eec4f7e3fbd48c6597d164d9b17d",
+ "997f154f5a78879ef3ba1a1556977c40b28b9c21076b8f583f752c57ecc36e932dc3dba29452b85ea85266084a6248f9e0efe642d5f75b43e64f25b9f2837f92"
+ ]
+ }
+ },
+ "info.stst.silero-vad": {
+ "*": {
+ "repo": "freddyaboulton/silero-vad",
+ "pkg": {
+ "0": {
+ "onnx": "onnx"
+ },
+ "1": {
+ "mlx_audio": "tts.generate.generate_audio",
+ "generation": {
+ "audio_format": "wav",
+ "join_audio": true,
+ "verbose": false
+ }
+ }
+ },
+ "file_256": [
+ "591f853590d11ddde2f2a54f9e7ccecb2533a8af7716330e8adfa6f3849787a9"
+ ],
+ "layer_256": [
+ "2ffef1834d5fe14ad8db58fc78d769d5dc38dda5eddbfc396786f74b326215fd"
+ ],
+ "layer_b3": [
+ "41ca5931452b3ffee588c6c7e5bd327c4e914141604eaf3fd05f4a790ac83bb2",
+ "7dc736cd5d840182792bde4edfbf5ddc5aeaf16826a9c72d1ba8166c1e3fab9b",
+ "6e2c1bdbad74f56663ffb5710c7cb849a2b91ba331d81acdba47a21f69107434",
+ "ab5ff443aece9171af5e7603d0b4309d3ecc934e3940ccedefff10f0b54b931e"
+ ]
+ }
+ },
+ "info.stst.wav2vec2-conformer-rope-960h-ft": {
+ "*": {
+ "repo": "facebook/wav2vec2-conformer-rope-large-960h-ft",
+ "pkg": {
+ "0": {
+ "transformers": "Wav2Vec2ConformerForCTC"
+ }
+ },
+ "file_256": [
+ "97bb9761fb71ec1225100bc81ccf7d002e0d0ba3d0604c1fd2dbda7d7d491f1d"
+ ],
+ "layer_256": [
+ "1afcfda68307a75caa1a1c4456cf97e20c7914e8aba828006e9fe17e8675a79d"
+ ],
+ "layer_b3": [
+ "6c9c5642aa8dce62bcb3eb577bc519619a2d868005c767c5e65371c583a8a8eb"
+ ]
+ }
+ },
+ "info.art.orpheus-0-ft": {
+ "*": {
+ "repo": "canopylabs/orpheus-3b-0.1-ft",
+ "pkg": {
+ "0": {
+ "orpheus_tts": "OrpheusModel",
+ "generation": {
+ "max_model_len": 2048
+ }
+ },
+ "1": {
+ "mlx_audio": "tts.generate.generate_audio",
+ "generation": {
+ "audio_format": "wav",
+ "join_audio": true,
+ "verbose": false
+ }
+ }
+ }
+ }
+ },
+ "info.art.outetts-0": {
+ "*": {
+ "repo": "OuteAI/OuteTTS-0.3-1B",
+ "pkg": {
+ "0": {
+ "outetts": "InterfaceHF"
+ },
+ "1": {
+ "mlx_audio": "tts.generate.generate_audio",
+ "generation": {
+ "audio_format": "wav",
+ "join_audio": true,
+ "verbose": false
+ }
+ }
+ }
+ }
+ },
+ "info.gan.speecht5-hifigan": {
+ "*": {
+ "file_256": [
+ "d9dc6513c30a5b86c2497712690c04fe74b4aa79fdab6d490b34fcb4e24c590c"
+ ],
+ "layer_256": [
+ "bd52b538e7ac05711be9321cfb7619d4056996ce32923c9c91ee02cf69154770"
+ ],
+ "layer_b3": [
+ "85b5acdf29ad04c63f885383340d8e3445ae0055521f82cabb82bd09cfb9a956"
+ ]
+ }
+ },
+ "info.dit.wan2-flf2v-720p": {
+ "diffusers": {
+ "repo": "Wan-AI/Wan2.1-FLF2V-14B-720P-Diffusers",
+ "file_256": [
+ "",
+ ""
+ ],
+ "layer_256": [
+ ""
+ ],
+ "layer_b3": [
+ ""
+ ]
+ }
+ },
+ "ops.patch.hidiffusion": {
+ "stable-diffusion-xl-1": {
+ "pkg": {
+ "0": {
+ "hidiffusion": {
+ "apply_hidiffusion": {
+ "timesteps": "StableDiffusionXLTimesteps"
+ }
+ },
+ "generation": {
+ "height": 2048,
+ "width": 2048,
+ "eta": 1.0,
+ "guidance_scale": 7.5,
+ "num_inference_steps": 10
+ }
+ }
+ }
+ }
+ },
+ "ops.scheduler.align-your-steps": {
+ "stable-diffusion-xl-1": {
+ "pkg": {
+ "0": {
+ "diffusers": "schedulers.scheduling_utils.AysSchedules",
+ "generation": {
+ "timesteps": "StableDiffusionXLTimesteps",
+ "num_inference_steps": 10
+ }
+ }
+ }
+ }
+ },
+ "info.art.chameleon": {
+ "lumina-mgpt-1024": {
+ "repo": "Alpha-VLLM/Lumina-mGPT-7B-1024",
+ "pkg": {
+ "0": {
+ "inference_solver": {
+ "FlexARInferenceSolver": {
+ "precision": "bf16",
+ "target_size": 768
+ }
+ },
+ "generation": {
+ "images": [],
+ "qas": [
+ [
+ "q1",
+ null
+ ]
+ ],
+ "max_gen_len": 8192,
+ "temperature": 1.0
+ }
+ },
+ "1": {
+ "inference_solver": "ChameleonXLLMXForConditionalGeneration"
+ }
+ },
+ "file_256": [
+ "6b71408a7c574d98f00114ab770ac6addc71471770456e482e7b5ec641c02345",
+ "1d5d8d5532bae0f32ba35d10d411e506d61e4378dc9fc338f2b1e6af2aa322ec",
+ "a8fe636bbee30fef06dcd8e806ffc65b2aed0ad08a07fdc62f35717d0f851be5",
+ "6420fa13483576d46263996627ba7add2237a01f46dedd3b7750112c0cc2d95b"
+ ],
+ "layer_256": [
+ "eaa882db6a69cf8ed0104a15b2cdbbb570a23a06ab8c8f65f4c6c21719c6ba25"
+ ],
+ "layer_b3": [
+ "6cd6b3caaea270feb5aff8e9fec205a27da4f48a1e740e63dc9a08f16e70a656"
+ ]
+ }
+ },
+ "info.vit.clip-vit-patch14": {
+ "*": {
+ "repo": "openai/clip-vit-large-patch14",
+ "pkg": {
+ "0": {
+ "transformers": "CLIPTextModel"
+ }
+ },
+ "file_256": [
+ "cb0cba1ead482a850532ebe5ff6b5c8d4456aee32a5228acf0a31e7d9472415e",
+ "39e79c916feca4ddf546d9fe923e664714b59ea61074f7228037d17c302f3d17",
+ "893d67a23f4693ed42cdab4cbad7fe3e727cf59609c40da28a46b5470f9ed082",
+ "778d02eb9e707c3fbaae0b67b79ea0d1399b52e624fb634f2f19375ae7c047c3",
+ "660c6f5b1abae9dc498ac2d21e1347d2abdb0cf6c0c0c8576cd796491d9a6cdd",
+ "71e183d11db0c6b6282a4d9e0abb74125edc8692393e89ed8ee5571005f35cb1",
+ "5c3d6454dd2d23414b56aa1b5858a72487a656937847b6fea8d0606d7a42cdbc",
+ "87c1c0b0894c9e9e10b962e597e8d64dd3a3a2d372c389922b335a53c250b2ae",
+ "bd289dd57fee86bc8816b55919a2b03f9c3c75af6025e21777325a6730872325",
+ "8377b1ca9d88fe06ec483dd7b3cfc62e5e8dbf8ddd252f455e79d659fa0553c5",
+ "5487ea0eee9c9a9bff8abd097908d4deff3ae1fa87b3b67397f8b9538139d447",
+ "92b998a9a64549bfa05c019bde114be6681549a0c79caee903fe30c9444d08b9",
+ "1e090d6a828fd92401be5f83e615fd7b4fb1f4a22e9af9040a38f602e839317c",
+ "11807cb2522cfe99240e5ee2bbeb1ccb42cecca2215102ee872567c7773b28b9",
+ "d008943c017f0092921106440254dbbe00b6a285f7883ec8ba160c3faad88334",
+ "77795e2023adcf39bc29a884661950380bd093cf0750a966d473d1718dc9ef4e",
+ "b70c11ad5d7e9abf6109348908f599ea382f8019e1f36910bbc8ebecde936633",
+ "fc42badf529dd83f2f7c3d20fe6bda1e22036162f37c4c668b9e130884e20561",
+ "e27bafa0b3029ad637ef3ace24ce1efe85b8d0dbd22e03a2e70bda6fc88963a1"
+ ],
+ "layer_256": [
+ "48daa3d8f939972e69f044533a4312a941971c18c78255f5e555fa26faf664c1",
+ "60f5734a74c342be8b0011fc704e718431839790bcfdc7d7004fc39d70f7fec6",
+ "6e76e25b4a55dddfa2eecf4b7ab189a8148658a9f6df165c00170f6ce661033c",
+ "2d5249df489fec9137cc3a5e9bda499dd9b72a957ddd8e7ad4e99ff3684bad99",
+ "3bf085e701713ed3e79775dafea375c3e2a43659ad1ee788b1b393c0aeff9f0e",
+ "efb7976800692772e449c81a739339f59394886590ff3f768b0f9ddd87d2a94c",
+ "9b0ac8d127c6c457b2eb8c7236f18c4e4ba9e8bbf27130aa8fe854d7c3f7b1e0",
+ "24a9ee3d60cdde6c967f08e4b2ec7088fe1bfe308c6896e73caa874860570a5c",
+ "5d6d9d0cc7943eb1b8c16862bfd5bee5c3766d0df027ec837e90fac715ac2bd3",
+ "68fb122f7d6c3cfbef320341b2af8f5916678e36a69ed36fa8cfcb19e7d5c43d",
+ "11807cb2522cfe99240e5ee2bbeb1ccb42cecca2215102ee872567c7773b28b9",
+ "50c46cdddbe9f0162278c69b9a1f818519330e3a91b994272e19b5c789670471",
+ "ffe1c4f55e07c2010ace7b9cf35798bb9f431bc954a32784e5acbdc16acc0364",
+ "146ea48d234e05a934db9d8988e9a9dd86b2ac70f535eaa550ecb0ee23ec135e",
+ "d97560cf9704cf71711f6121df2bf55e55a1eda4b574a6ddba074767420bc8c3"
+ ],
+ "layer_b3": [
+ "f58a22a381f79985b6d38782f6110a52c2f319b40fdedd3b88b24945dfcbdf64",
+ "8faa00b8fd1dbd9286a7237df18caeb8c91af100a6813849b6bae272a01dd7b7",
+ "ab5bebc98299c155251a06deccde599ba0128038ee3ce021e8c59a45f58f72c0",
+ "c70e9d86a9dcbbbe7c269ef9dfac96ce9c96c46922577338cc1902e5fe936315",
+ "f285e9b7b70745df81adc8b558ec74b536b79b6fc02a453ecc61ea9d13f25f1a",
+ "7ab17bfa06ab8d65840997ef641f3f593d096860e20141f1eeb0169d131c1c23",
+ "2737d3f327e8176dbb549b9c5c4994821430a6c3b07e3bbc925d97511c802636",
+ "58a826a4a5fe555b4df188a1ebc0d8d9c96cedae3a26ce84c247861dbb93388f",
+ "1540fd8844898960e18ce8fd153e5f21a8c446bd8c4d6f536a7cf11418f02bf3",
+ "c4c9caccdbec12b965d93688c521893f75e0bf9a5e0aad70a6a962b669e7b9d5",
+ "e43fae8d5fd1e562607da172369cc0c5ec99b834e42502e682287ff7d12baacc",
+ "c6f79f7416a882891957b815fbdfd6edfaa253c43970b1a25ef14e217599c7bc",
+ "daf5e09f67ad09a909f58a01298fec0132324634cb8fca2a604c3a240c2c453f",
+ "3f62bfb6bbde05f01435129326166c44aeb113ac0d9f735f31ed3f7dd04f6980",
+ "22f866f3c96a92bc61e9965cf366d706db942ad047ba8cb82109edcd4e68fa40",
+ "f3fa9d7a8f15741621c1fe82f8a1bcc5c601c900d947ac09fba7016615a252a5"
+ ]
+ }
+ },
+ "info.vit.clip-vit-g-14-laion-s-b": {
+ "*": {
+ "repo": "laion/CLIP-ViT-g-14-laion2B-s12B-b42K",
+ "pkg": {
+ "0": {
+ "transformers": "CLIPTextModelWithProjection"
+ }
+ },
+ "file_256": [
+ "ca18e0c67c1ef1e64cac22926266765b60688f692307ecc06283d987c5768134",
+ "ec310df2af79c318e24d20511b601a591ca8cd4f1fce1d8dff822a356bcdb1f4",
+ "fa5b2e6f4c2efc2d82e4b8312faec1a5540eabfc6415126c9a05c8436a530ef4",
+ "b84f413eebecbd049b72874c1df533a516510cb5a2489ae58c7e320209cf0ebe",
+ "d3df577f6e3799c8e1bd9b40e30133710e02e8e25d0ce48cdcc790e7dfe12d6d",
+ "943a2924ee888295a156dd47089d67181d633b782337890af11ef4b15af17ec5",
+ "5b98e4a57a9292eeb819d67e2d2100f66f17db723cde4ecea27a7c3741160d0c",
+ "4d6effa7a5e600cabf7528ed7234146a13ead1b2c151211d706b293a060b112a",
+ "3a6032f63d37ae02bbc74ccd6a27440578cd71701f96532229d0154f55a8d3ff",
+ "162042ac6556e73f93d4172d4c67532c1cbe4dc7a6a8fa7e44dd2e3d7cbb772b"
+ ],
+ "layer_256": [
+ "270e998633eb22145100a3889a62ca270d5080654735e5ff8dda09a7c233af8d",
+ "df18800c2a9d9318c4323d991a0fb24a6a9afceb41bea203812f60517c301536",
+ "4c228b104f6b9b383e0808c9baa1998957f5125d8f90a4d98c1a86e71edd72dc",
+ "f7fc81d8b5ae91ec28a5106ecc0d067be9a94fd3f394c4aa4686ed131ce5a5b3",
+ "61ab42bd5c0fcb9fd3db1d4014cb844ccae8dc17fd69a108cf077a573d092946",
+ "6c64e36cdda3bec7067e94b05619f882f5d31070792acaadac60ddbef580453a",
+ "43c9e64995b485a7f128771c48defce128640df28e65c7f79537d472f43ebe46"
+ ],
+ "layer_b3": [
+ "d754db276f2d89d2808abb7086b3b8eccee43ac521c128d21a071f3a631474a8",
+ "2eb93685b34719e1d1e0541d8902b0a592d95848f80657e32816cf3b152a0f31",
+ "e253a5cf3a6242c58037abd6b378bf0281f278e441f28dff7ca1bcfcd3cd6bd8",
+ "16d0eec4e55b0aa63cdca4e4d36f78f66a4b1b9605ce3b1089305026f853c3d2",
+ "f606463295ecf3bae8920d3d45bb9d180793418b3d08c3e84d4c4135c7dc2aa5",
+ "7060993a5eb32d94d1ea8aef7a7301e7be73b199c639c63f8f7cfbfcd2abf10e",
+ "b92af95334c657371af6051a91374a41b5455907fa6622bb66a8c112dc511600"
+ ]
+ }
+ },
+ "info.vit.clip-vit-h-14-laion-s-b": {
+ "*": {
+ "repo": "laion/CLIP-ViT-H-14-laion2B-s32B-b79K",
+ "pkg": {
+ "0": {
+ "transformers": "CLIPModel"
+ }
+ },
+ "file_256": [
+ "036e6e2bd49697511f4f8b8cb5ee465f93025f7a69a145eadeb9a881ace9b18d",
+ "0084e75319a50ad85ef45377bad5bc38f2f58824459eb690048d51c9f8863be5",
+ "64a7ef761bfccbadbaa3da77366aac4185a6c58fa5de5f589b42a65bcc21f161"
+ ],
+ "layer_256": [
+ "130a94ed12569e099196a6ca27388181922e20148dee5bcb58c5e309acfc2352",
+ "cfdbd3fd2b90b64ba12d395a62dd7c3c3ea3e811f0a54593e91bae6516ca5061",
+ "9125ce5970c649d6f9368c25493d3aaa6b41e224d4cc427e955115f7b7e53d1c"
+ ],
+ "layer_b3": [
+ "227f26ed63120b9034f4a0c90b6b37eede721a8260f2c1e8f7ea3ccc0d109e7e",
+ "3a38ffd1b60499cf2f451f3065079ff26efb9190a86f23ad1c8d993bbeb9af05",
+ "ce06cf1fd684269ee96631b2bf9334c6ecde6a84a55760dfa0d9d2a6411f28e4"
+ ]
+ }
+ },
+ "info.aet.chatglm3": {
+ "*": {
+ "repo": "zai-org/chatglm3-6b",
+ "pkg": {
+ "0": {
+ "transformers": "AutoModel"
+ }
+ },
+ "file_256": [
+ "0054d03310248928fdabdeef3fdc753170218dc49a1e9eb5f98323e27683f654",
+ "b1052386eac358a18add3d0f92521c85ab338979da8eeb08a6499555b857f80d"
+ ],
+ "layer_256": [
+ "174924fd7a07f370bb6fcd1ad07a73eecb7de901f15eefb80f420c1042c47d44"
+ ],
+ "layer_b3": [
+ "a45dfba6a9fa8739777c76deb845fc9589b40f88670d3ce4661646a7b7b1d481"
+ ]
+ }
+ },
+ "info.art.qwen2": {
+ "bagel-mot": {
+ "repo": "ByteDance-Seed/BAGEL-7B-MoT",
+ "pkg": {
+ "0": {
+ "Bagel": "app"
+ }
+ }
+ }
+ },
+ "info.vae.tae": {
+ "stable-diffusion-3": {
+ "repo": "madebyollin/taesd3",
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderTiny"
+ }
+ },
+ "file_256": [
+ "6f79c1397cb9ce1dac363722dbe70147aee0ccca75e28338f8482fe515891399"
+ ]
+ },
+ "stable-diffusion-xl-1": {
+ "repo": "madebyollin/taesdxl",
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderTiny"
+ }
+ },
+ "file_256": [
+ "ff4824aca94dd6111e0340fa749347fb74101060d9712cb5ef1ca8f1cf17502f"
+ ]
+ },
+ "stable-diffusion-v1-5": {
+ "repo": "madebyollin/taesd",
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderTiny"
+ }
+ },
+ "file_256": [
+ "db169d69145ec4ff064e49d99c95fa05d3eb04ee453de35824a6d0f325513549"
+ ]
+ },
+ "flux1-dev": {
+ "repo": "madebyollin/taef1",
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderTiny"
+ }
+ },
+ "file_256": [
+ "927f7de7f11bbd3b2d5ce402e608d97a7649e0921a9601995b044e8efc81e449"
+ ]
+ }
+ },
+ "info.vae.kl": {
+ "qwen-image": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKLQwenImage"
+ }
+ },
+ "file_256": [
+ "0c8bc8b758c649abef9ea407b95408389a3b2f610d0d10fcb054fe171d0a8344"
+ ],
+ "layer_256": [
+ "42f255440ef1d379a8a731456bc44312a73a8568716caa6100803990cd5ea7dc"
+ ],
+ "layer_b3": [
+ "64af8fb08d2054c81ad2aef94965be8fb1366fcc6136cb9222ae046550af014b"
+ ]
+ },
+ "ltx-video": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKLLTXVideo"
+ }
+ },
+ "file_256": [],
+ "layer_256": [],
+ "layer_b3": []
+ },
+ "allegro": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKLAllegro"
+ }
+ },
+ "file_256": [
+ "47871a698b18f92f15019d361a81cbc8af4676f8eef9a47fd2b95354a39f831a"
+ ],
+ "layer_256": [
+ "bfd496586118165a13243997101fc7cdd4f855b2d8a73ee2b771a4484c4c2f9f"
+ ],
+ "layer_b3": [
+ "93654cbab7541504d2377c66e72943c7fd9947fca2eb1be01bcc8877c322c1e0"
+ ]
+ },
+ "cosmos-1-diffusion-video2world": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKLCosmos"
+ }
+ },
+ "file_256": [],
+ "layer_256": [],
+ "layer_b3": []
+ },
+ "easyanimatev5-zh": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKLMagvit"
+ }
+ },
+ "file_256": [],
+ "layer_256": [],
+ "layer_b3": []
+ },
+ "hunyuanvideo-i2v": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKLHunyuanVideo"
+ }
+ },
+ "file_256": [
+ "95d1fc707c1421ccd88ea542838ab4c5d45a5babb48205bac9ce0985525f9818",
+ "7c68a6295f9034a88225fbafb1f3258291a08d57a1fdb938233fa57b1b8f4883",
+ "fbe5ea338431bc8ba20f7019b474e83379fe5763abfd562adcc04b1c0d35c728",
+ "019973c147e0c3462629d8d06bdbdbb83408f3ebd4ea4b4ae21a99c3cdcb54c0"
+ ]
+ },
+ "mochi-1": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKLMochi"
+ }
+ },
+ "file_256": [],
+ "layer_256": [],
+ "layer_b3": []
+ },
+ "audioldm-s-v2": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKL"
+ }
+ },
+ "file_256": [
+ "42f64f7565b23eabde68c9694e39f18b8bba5f7a14f477e7ed4b51e0ea7de8a5"
+ ],
+ "layer_256": [
+ "54d075953d5253a3abac651de070736c1d5510b857a8ab24c624304f428146b6"
+ ],
+ "layer_b3": [
+ "00959677dae940b9cfdbe5380c8cbb5a6b4951864cd26f8211d74a3d22b4f3de"
+ ]
+ },
+ "stable-video-diffusion-img2vid-xt": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKLTemporalDecoder"
+ }
+ }
+ },
+ "stable-diffusion-xl-1": {
+ "repo": "madebyollin/sdxl-vae-fp16-fix",
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKL"
+ }
+ },
+ "file_256": [
+ "235745af8d86bf4a4c1b5b4f529868b37019a10f7c0b2e79ad0abca3a22bc6e1",
+ "1b909373b28f2137098b0fd9dbc6f97f8410854f31f84ddc9fa04b077b0ace2c",
+ "78f6189c8492013e3cac81637a1f657f790a237387f8a9dfd6bfa5fee28eb646",
+ "6353737672c94b96174cb590f711eac6edf2fcce5b6e91aa9d73c5adc589ee48",
+ "bcb60880a46b63dea58e9bc591abe15f8350bde47b405f9c38f4be70c6161e68",
+ "1598f3d24932bcfe6634e8b618ea1e30ab1d57f5aad13a6d2de446d2199f2341",
+ "703abdcd7c389316b5128faa9b750a530ea1680b453170b27afebac5e4db30c4",
+ "98a14dc6fe8d71c83576f135a87c61a16561c9c080abba418d2cc976ee034f88"
+ ],
+ "layer_256": [
+ "c9399a4cd39a180a0bb2af96a8297b9330541e090c21e83317cebb2f7cc651da",
+ "2240ae134a3b983abf45200c198f07e3d8068012fbbd2f658bbaa1fd6a0629c0"
+ ],
+ "layer_b3": [
+ "bd5b356b509814025a9cf692710b87116d4fcd0e30a8232ed1db133e908d0e74",
+ "9106380403dee83238af63ff1738396d2fdff9f6d78d0d9c1d0bf770ae4294d0"
+ ]
+ },
+ "stable-diffusion-xl-1*": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKL"
+ }
+ },
+ "file_256": [
+ "235745af8d86bf4a4c1b5b4f529868b37019a10f7c0b2e79ad0abca3a22bc6e1",
+ "27ed3b02e09638568e99d4398c67bc654dde04e6c0db61fb2d21dba630e7058a",
+ "eb6516ab7e1104d5d1a174a4d65c57835ae38061531d0a2192103aecfb790cc1",
+ "e6bb9ea85bbf7bf6478a7c6d18b71246f22e95d41bcdd80ed40aa212c33cfeff"
+ ],
+ "layer_256": [
+ "c9399a4cd39a180a0bb2af96a8297b9330541e090c21e83317cebb2f7cc651da",
+ "2240ae134a3b983abf45200c198f07e3d8068012fbbd2f658bbaa1fd6a0629c0"
+ ],
+ "layer_b3": [
+ "bd5b356b509814025a9cf692710b87116d4fcd0e30a8232ed1db133e908d0e74"
+ ]
+ },
+ "shuttle-jaguar": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKL"
+ }
+ },
+ "file_256": [
+ "6fdfa2add4f04d94f36157cbb0197f97966b612e3f8eff4095315aefea74b904"
+ ],
+ "layer_256": [
+ "9b28f36873ea283905094a64e1ccb7cfc2b0f0aa166201d0ca63807ac37caa7b"
+ ],
+ "layer_b3": [
+ "0ebf9b7010accc44e219e355dd24bf1e3128004093c0c1dfc06f88c0a39fdbdd",
+ "d0e7ef3c4af06fa08b4c0485a073e2df55f7b1e9e3ba8f7b261688bc562568f0"
+ ]
+ },
+ "flux1-dev": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKL"
+ }
+ },
+ "file_256": [
+ "afc8e28272cd15db3919bacdb6918ce9c1ed22e96cb12c4d5ed0fba823529e38",
+ "f5b59a26851551b67ae1fe58d32e76486e1e812def4696a4bea97f16604d40a3",
+ "8c717328c8ad41faab2ccfd52ae17332505c6833cf176aad56e7b58f2c4d4c94",
+ "8f53304a79335b55e13ec50f63e5157fee4deb2f30d5fae0654e2b2653c109dc"
+ ],
+ "layer_256": [
+ "7950e4f3897c75affaa5f9f3c51c88b4d9a27bfd9b05ad41c3f71d8c1c620b89",
+ "79d2bfe93a2ac037cdc59ccb5576e32d00d75d4741fba49fc7e82b9724928216",
+ "8f084dc91fd5b481875bc9c86a4ef05e5f176896b7d31c6a5c2ce45c2e174004",
+ "322e01bd511e20bc2a3c27cd611f81ed85f0046b7c023b5622c2c9a5b8b34f80"
+ ],
+ "layer_b3": [
+ "b6db93ed78c4a10d69e80831c1b8fbc1447f04e9b3d494889ee2056b98d41f17",
+ "a8a3ebdec4d7b38d65b7169d3604c19b587330e5e66f69ebf0ded56a24ec6903"
+ ]
+ },
+ "musicldm": {
+ "file_256": [
+ "16e0c6c7c34e459c19500cc15cf538e6331db14969ea15917caa9b0966e44fd4"
+ ],
+ "layer_256": [
+ "1610c0ce39d1379091eb9ab2a4d14a8567e0f1a5dc6cca40fc0fa6f8e4e97c0f"
+ ],
+ "layer_b3": [
+ "c5c32b3fb3e73799838836ccce27d883254254daecd10f86ba8ddc55214014e0"
+ ]
+ },
+ "stable-diffusion-v1-5": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKL"
+ }
+ },
+ "file_256": [
+ "0b204ad0cae549e0a7e298d803d57e36363760dec71c63109c1da3e1147ec520",
+ "95f26a5ab04779d5467d1fcecaf93160ffa523afe399b835b3e1bb77ff2d937a",
+ "32db726da04f06c1b6b14c0043ce115cc87a501482945c5add89a40d838fcb46",
+ "c6a580b13a5bc05a5e16e4dbb80608ff2ec251a162311590c1f34c013d7f3dab",
+ "735e4c3a447a3255760d7f86845f09f937809baa529c17370d83e4c3758f3c75",
+ "a1d993488569e928462932c8c38a0760b874d166399b14414135bd9c42df5815",
+ "a2b5134f4dbc140d9c11f11cba3233099e00af40f262f136c691fb7d38d2194c",
+ "4fbcf0ebe55a0984f5a5e00d8c4521d52359af7229bb4d81890039d2aa16dd7c"
+ ],
+ "layer_256": [
+ "e43f3a227b5ecb43a6272fa92ed6011d2e9abcadadd1032dfa7ea7f875f9d5bd",
+ "2494154245becf98891be884f943276aa3f54e9b3f0ea1042903fc15fba488f3"
+ ],
+ "layer_b3": [
+ "82e2dc440a23d78bb91df8c9fce069a8512da51f8f54ea29e3431f545808171e",
+ "2230487833925a104bee96e7ecfebaa4c3c43cc426c7a5b863f2584313dd4833"
+ ]
+ }
+ },
+ "info.vae.wan": {
+ "wan2-i2v-480p": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKLWan",
+ "precision": "ops.precision.float.F32"
+ }
+ },
+ "file_256": [
+ "d6e524b3fffede1787a74e81b30976dce5400c4439ba64222168e607ed19e793",
+ "2fc39d31359a4b0a64f55876d8ff7fa8d780956ae2cb13463b0223e15148976b"
+ ],
+ "layer_256": [
+ "121b3974b39263dcca9d644d1b5c9b9251a911b6a8a8e307fcb21ca778e78ed2",
+ "364be43a8959012d798d3f98e17d8b5c4b99ba1e70077008dd19acca3ced395e"
+ ],
+ "layer_b3": [
+ "f867543d636029ebfc05b8075e572be0b313a83b0470e56bcf4bbad07a6db010",
+ "6b5b229727a2d4e37993687c62c94ff8519a371ab4103c699ff1f5969ca0b433"
+ ]
+ },
+ "skyreels-v2-t2v-720p": {
+ "file_256": [],
+ "layer_256": [],
+ "layer_b3": []
+ },
+ "skyreels-v2-i2v-720p": {
+ "file_256": [],
+ "layer_256": [],
+ "layer_b3": []
+ }
+ },
+ "info.vae.cogvideox": {
+ "cogvideox-i2v": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKLCogVideoX"
+ }
+ },
+ "file_256": [
+ "a410e48d988c8224cef392b68db0654485cfd41f345f4a3a81d3e6b765bb995e"
+ ],
+ "layer_256": [
+ "43c7e9cb4364e55fd563817f01484ede8a09ff19a8e69eb61a32a12f93d6f66e"
+ ],
+ "layer_b3": [
+ "246addb8dc798240638bffee4546a3c5c83572139b4a2a602d68b4c4146226eb"
+ ]
+ },
+ "cogvideox-fun-v-pose": {
+ "file_256": [],
+ "layer_256": [],
+ "layer_b3": []
+ },
+ "consisid": {
+ "file_256": [],
+ "layer_256": [],
+ "layer_b3": []
+ }
+ },
+ "info.vae.dc": {
+ "sana-1024px-bf16": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderDC"
+ }
+ },
+ "file_256": [
+ "15a4b09e56d95b768a0ec9da50b702e21d920333fc9b3480d66bb5c7fad9d87f"
+ ],
+ "layer_256": [
+ "abfc39d1a6d71f03dde7bc40fec4a90478a97d17ae1688be9aad00e0512b9bde"
+ ],
+ "layer_b3": [
+ "cf4ecc6697d18b0663e4eac58203f1dd6d9fb689cf99adfeadbc0019de0c73d0"
+ ]
+ }
+ },
+ "info.vae.oobleck": {
+ "stable-audio-open-1": {
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderOobleck"
+ }
+ }
+ }
+ },
+ "info.vae.eq": {
+ "stable-diffusion-xl-1": {
+ "repo": "KBlueLeaf/EQ-SDXL-VAE",
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKL"
+ }
+ }
+ }
+ },
+ "info.vae.ms-lc-eq": {
+ "stable-diffusion-xl-1": {
+ "repo": "Anzhc/MS-LC-EQ-D-VR_VAE",
+ "pkg": {
+ "0": {
+ "diffusers": "AutoencoderKL"
+ }
+ }
+ }
+ }
}
\ No newline at end of file
diff --git a/mir/spec/mir.py b/mir/spec/__init__.py
similarity index 100%
rename from mir/spec/mir.py
rename to mir/spec/__init__.py
diff --git a/mir/spec/docstring_patterns.json b/mir/spec/docstring_patterns.json
new file mode 100644
index 0000000..691ab3c
--- /dev/null
+++ b/mir/spec/docstring_patterns.json
@@ -0,0 +1,41 @@
+{
+ "uncommon_naming": {
+ "blip_diffusion": "blip_diffusion",
+ "cogvideo": "cogvideox",
+ "cogview3": "cogview3plus",
+ "deepfloyd_if": "if",
+ "cosmos": "cosmos2_text2image",
+ "visualcloze": "visualcloze_generation",
+ "marigold": "marigold_depth"
+ },
+ "exclusion_list": [
+ "auto_pipeline",
+ "consistency_models",
+ "pipeline_utils",
+ "deprecated",
+ "ddim",
+ "ddpm",
+ "deprecated",
+ "autopipeline",
+ "dance_diffusion",
+ "diffusionpipeline",
+ "dit",
+ "latent_consistency_models",
+ "latent_diffusion",
+ "ledits_pp",
+ "pag",
+ "paint_by_example",
+ "semantic_stable_diffusion",
+ "stable_diffusion_attend_and_excite",
+ "stable_diffusion_diffedit",
+ "stable_diffusion_k_diffusion",
+ "stable_diffusion_panorama",
+ "stable_diffusion_safe",
+ "stable_diffusion_sag",
+ "t2i_adapter",
+ "text_to_video_synthesis",
+ "unclip",
+ "unidiffuser",
+ "controlnet_hunyuandit"
+ ]
+}
\ No newline at end of file
diff --git a/mir/spec/missing_params.json b/mir/spec/missing_params.json
new file mode 100644
index 0000000..de3dc44
--- /dev/null
+++ b/mir/spec/missing_params.json
@@ -0,0 +1,62 @@
+{
+ "bark": {
+ "repo_path": "suno/bark",
+ "params": {
+ "n_head": [
+ ""
+ ]
+ }
+ },
+ "aria_text": {
+ "repo_path": "rhymes-ai/Aria-Chat",
+ "params": {
+ "vision_config": [
+ ""
+ ],
+ "text_config": [
+ ""
+ ]
+ }
+ },
+ "cwm": {
+ "repo_path": "facebook/cwm",
+ "params": {
+ "n_head": [
+ ""
+ ]
+ }
+ },
+ "decision_transformer": {
+ "repo_path": "edbeeching/decision-transformer-gym-hopper-medium"
+ },
+ "distilbert": {
+ "repo_path": "distilbert-base-uncased"
+ },
+ "gpt_bigcode": {
+ "repo_path": "bigcode/gpt_bigcode-santacoder"
+ },
+ "granite": {
+ "repo_path": "ibm-granite/granite-3.3-2b-base"
+ },
+ "granitemoe": {
+ "repo_path": "ibm-research/PowerMoE-3b"
+ },
+ "granitemoehybrid": {
+ "repo_path": "ibm-granite/granite-4.0-h-small"
+ },
+ "musicgen": {
+ "repo_path": "facebook/musicgen-small"
+ },
+ "seamless_m4t_v2": {
+ "repo_path": "facebook/seamless-m4t-v2-large"
+ },
+ "timm_backbone": {
+ "repo_path": "microsoft/resnet-50"
+ },
+ "timm_wrapper": {
+ "repo_path": "timm/resnet18.a1_in1k"
+ },
+ "vision-text-dual-encoder": {
+ "repo_path": "hakuhodo-tech/japanese-clip-vit-h-14-bert-wider"
+ }
+}
\ No newline at end of file
diff --git a/mir/spec/repo_migrations.json b/mir/spec/repo_migrations.json
new file mode 100644
index 0000000..799f906
--- /dev/null
+++ b/mir/spec/repo_migrations.json
@@ -0,0 +1,29 @@
+{
+ "/helium-2b": "/helium-1-2b",
+ "allenai/Olmo2-7B-1124-hf": "allenai/Olmo-2-1124-7B",
+ "apple/mobilevitv2-1.0": "apple/mobilevitv2-1.0-imagenet1k-256",
+ "caidas/swin2SR-classical-sr-x2-64": "caidas/swin2SR-classical-sr-x2-64",
+ "facebook/hiera-base-224": "facebook/hiera-base-224-hf",
+ "facebook/sam_hq-vit-huge": "syscv-community/sam-hq-vit-huge",
+ "facebook/vit_msn_base": "facebook/vit-msn-base",
+ "facebook/wav2vec2-bert-rel-pos-large": "facebook/w2v-bert-2.0",
+ "google/gemma-3-4b": "google/gemma-3-4b-it",
+ "google/gemma2-7b": "google/gemma-2-9b",
+ "google/gemma3_text-7b": "google/gemma-3-12b-it",
+ "IDEA-Research/dab_detr-base": "IDEA-Research/dab-detr-resnet-50",
+ "LGAI-EXAONE/EXAONE-4.0-Instruct": "LGAI-EXAONE/EXAONE-4.0-32B",
+ "meta/chameleon-7b'": "facebook/chameleon-7b",
+ "mixtralai/Mixtral-8x7B": "mistralai/Mixtral-8x7B-v0.1",
+ "paligemma-hf/paligemma-2b": "google/paligemma2-3b-mix-224",
+ "pixtral-hf/pixtral-9b": "mistralai/Pixtral-12B-Base-2409",
+ "Qwen/Qwen2-7B-beta": "Qwen/Qwen2-7B",
+ "Qwen/Qwen3-15B-A2B": "Qwen/Qwen3-30B-A3B",
+ "s-JoL/Open-Llama-V1": "openlm-research/open_llama_3b",
+ "Salesforce/instruct-blip-flan-t5": "Salesforce/instructblip-flan-t5-xl",
+ "state-spaces/mamba2-2.8b": "AntonV/mamba2-2.7b-hf",
+ "ibm-fms/FalconH1-9.8b-2.2T-hf": "tiiuae/Falcon-H1-34B-Instruct",
+ "nvidia/nemotron-3-8b-base-4k-hf": "mgoin/nemotron-3-8b-chat-4k-sft-hf",
+ "THUDM/": "zai-org/",
+ "THUDM/GLM-4-100B-A10B": "zai-org/GLM-4.5-Air",
+ "zai-org/GLM-4-100B-A10B": "zai-org/GLM-4.5-Air"
+}
\ No newline at end of file
diff --git a/mir/spec/template.json b/mir/spec/template.json
index 96fc4de..1381e19 100644
--- a/mir/spec/template.json
+++ b/mir/spec/template.json
@@ -51,6 +51,7 @@
"projection_dim",
"vlm_config",
"crop_size",
+ "fpn_hidden_size",
"out_indices",
"logit_scale_init_value",
"image_size",
@@ -77,7 +78,10 @@
"keypoint_detector_config",
"local_attention",
"act_dropout",
- "max_source_positions"
+ "max_source_positions",
+ "classifier_pooling",
+ "audio_video_config",
+ "video_config"
],
"stst": [
"is_encoder_decoder",
@@ -85,12 +89,17 @@
"encoder_layers",
"encoder_hidden_size",
"encoder_config",
+ "ctc_loss_reduction",
+ "ctc_zero_infinity",
"audio_token_index",
"codebook_dim",
"router_ignore_padding_tokens",
"d_ff",
"d_kv",
- "audio_config"
+ "audio_config",
+ "convolution_bias",
+ "rope_parameters",
+ "hotstart_dup_thresh"
],
"art": [
"ffn_dim",
@@ -98,6 +107,7 @@
"vq_config",
"attn_config",
"n_head",
+ "act_dim",
"n_heads",
"n_layer",
"rms_norm_eps",
@@ -106,9 +116,12 @@
"layernorm_embedding",
"hidden_dropout_prob",
"rotary_pct",
+ "audio_encoder",
"embed_dropout",
"nb_priors",
+ "resid_pdrop",
"embd_pdrop",
+ "action_tanh",
"n_positions",
"aux_loss_coef",
"residual_dropout",
diff --git a/mir/tag.py b/mir/tag.py
index e869ad3..7b272fe 100644
--- a/mir/tag.py
+++ b/mir/tag.py
@@ -1,11 +1,11 @@
# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
#
-from typing import List
-from mir.config.constants import PARAMETERS_SUFFIX, BREAKING_SUFFIX
+from typing import Any
+from mir.config.constants import PARAMETERS_SUFFIX, BREAKING_SUFFIX, ClassMapEntry
-def make_mir_tag(repo_title: str, decoder=False, data: dict = None) -> List[str]:
+def tag_model_from_repo(repo_title: str, decoder=False, data: dict | None = None) -> tuple[str, Any]:
"""Create a mir label from a repo path\n
:param mir_prefix: Known period-separated prefix and model type
:param repo_path: Typical remote source repo path, A URL without domain
@@ -45,7 +45,7 @@ def make_mir_tag(repo_title: str, decoder=False, data: dict = None) -> List[str]
return (cleaned_string, suffix)
-def make_scheduler_tag(series_name: str) -> tuple[str]:
+def tag_scheduler(series_name: str) -> tuple[str, str]:
"""Create a mir label from a scheduler operation\n
:param class_name: Known period-separated prefix and model type
:return: The assembled mir tag with compatibility pre-separated"""
@@ -64,23 +64,43 @@ def make_scheduler_tag(series_name: str) -> tuple[str]:
for pattern in patterns:
series_name = re.sub(pattern, "", series_name)
series_name.lower()
- # if not comp_name:
- # comp_name = "*"
+ assert series_name is not None
+ assert comp_name is not None
return series_name, comp_name
-def tag_base_model(repo_path: str, class_name: str, addendum: dict | None = None) -> tuple[str]:
+def mir_prefix_from_forward_pass(transformers: bool = False, **kwargs):
+ """Set type of MIR prefix depending on model type\n
+ :param transformers: Use transformers data instead of diffusers data, defaults to False
+ :raises ValueError: Model type not detected
+ :return: MIR prefix based on model configuration"""
+ from mir.config.json_io import read_json_file
+
+ data = read_json_file("mir/spec/template.json")
+
+ if transformers:
+ flags = data["arch"]["transformer"] # pylint:disable=unsubscriptable-object
+ else:
+ flags = data["arch"]["diffuser"] # pylint:disable=unsubscriptable-object
+ for mir_prefix, key_match in flags.items():
+ if any(kwargs.get(param, None) for param in key_match):
+ return mir_prefix
+ return None
+
+
+def tag_base_model(repo_path: str, class_name: str, addendum: dict | None = None) -> tuple[str, str, str | dict[str, dict]]:
"""Convert model repo paths to MIR tags, classifying by feature\n
:param name: Repo path
:param class_name: The HF transformers class for the model
:return: A segmented MIR tag useful for appending index entries"""
from mir.inspect.classes import extract_init_params
- from mir.indexers import flag_config
- annotations = extract_init_params(class_name.replace("Model", "Config"), "transformers")
- mir_prefix = flag_config(transformers=True, **annotations)
- base_series, base_comp = make_mir_tag(repo_path)
+ annotations = extract_init_params(class_name.replace("Model", "Config"), "transformers") # remove default annotations from python
+ if not annotations:
+ raise TypeError("No mode type returned")
+ mir_prefix = mir_prefix_from_forward_pass(True, **annotations)
+ base_series, base_comp = tag_model_from_repo(repo_path)
if not addendum:
return mir_prefix, base_series, base_comp
else:
@@ -102,6 +122,28 @@ def tag_pipe(repo_path: str, class_name: str, addendum: dict) -> tuple:
return mir_prefix, mir_series, {mir_comp: addendum}
+def mir_tag_from_config(class_map: ClassMapEntry, repo_path: str) -> tuple[str, str, str]:
+ """Change a transformers config class into a MIR series and comp
+ :param class_map: Transformers class information extracted from dependency"""
+
+ mir_prefix = mir_prefix_from_forward_pass(transformers=True, **class_map.config_params)
+ if not mir_prefix:
+ if class_map.model_params:
+ if mir_prefix := mir_prefix_from_forward_pass(transformers=True, **class_map.model_params):
+ pass
+ else:
+ raise ValueError(f"Unable to determine MIR prefix from {class_map, repo_path}")
+ else:
+ raise ValueError(f"Unrecognized model type, no tag matched {class_map.name} with {class_map.config_params} or {class_map.model_params}")
+ mir_prefix = "info." + mir_prefix
+ if class_map.name != "funnel":
+ mir_suffix, mir_comp = tag_model_from_repo(repo_path)
+ else:
+ mir_suffix, mir_comp = ["funnel", "*"]
+ mir_series = mir_prefix + "." + mir_suffix
+ return mir_series, mir_comp, mir_suffix
+
+
# def tag_mlx_model(repo_path: str, class_name: str, addendum: dict) -> tuple[str]:
# dev_series, dev_comp = make_mir_tag("black-forest-labs/FLUX.1-dev")
# schnell_series, schnell_comp = make_mir_tag("black-forest-labs/FLUX.1-schnell")
diff --git a/pyproject.toml b/pyproject.toml
index 3f4f11e..580736e 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -25,8 +25,10 @@ classifiers = [
]
dependencies = [
"diffusers>=0.35.2",
+ "ftfy>=6.3.1",
"huggingface-hub[hf-xet]>=1.1.7",
"pydantic>=2.12.5",
+ "sentencepiece>=0.2.1",
"tokenizers>=0.22.1",
"torch>=2.9.1",
"torchvision>=0.24.1",
diff --git a/tests/test_find_docstring_run.py b/tests/test_find_docstring_run.py
new file mode 100644
index 0000000..952c5a5
--- /dev/null
+++ b/tests/test_find_docstring_run.py
@@ -0,0 +1,5 @@
+from mir.inspect.metadata import find_diffusers_docstrings
+from pprint import pprint
+
+find_diffusers_docstrings()
+list(find_diffusers_docstrings())
diff --git a/tests/test_gather_diffusers_metadata.py b/tests/test_gather_diffusers_metadata.py
index efbed0a..e628720 100644
--- a/tests/test_gather_diffusers_metadata.py
+++ b/tests/test_gather_diffusers_metadata.py
@@ -28,14 +28,14 @@ def mock_pkgutil_iter_modules(mocker):
def test_list_diffusers_models():
- from mir.inspect.metadata import gather_diffusers_metadata
+ from mir.inspect.metadata import find_diffusers_docstrings
- gather_diffusers_metadata()
+ find_diffusers_docstrings()
-def test_gather_diffusers_metadata_excluded(mock_import_module, mock_pkgutil_iter_modules):
+def test_find_docstrings_excluded(mock_import_module, mock_pkgutil_iter_modules):
"""Test that excluded modules are not processed."""
- from mir.inspect.metadata import gather_diffusers_metadata
+ from mir.inspect.metadata import find_diffusers_docstrings
excluded_modules = ["ddpm"]
@@ -45,5 +45,5 @@ def side_effect(import_name, *args, **kwargs):
return Mock()
mock_import_module.side_effect = side_effect
- results = list(gather_diffusers_metadata()) # type: ignore # noqa
+ results = list(find_diffusers_docstrings()) # type: ignore # noqa
assert not any("ddpm" in call_arg[0][0] for call_arg in mock_import_module.call_args_list)
diff --git a/tests/test_mir_db_create_restore.py b/tests/test_mir_db_create_restore.py
index 3aee25b..b927cb0 100644
--- a/tests/test_mir_db_create_restore.py
+++ b/tests/test_mir_db_create_restore.py
@@ -7,7 +7,7 @@
# def test_mir_creation():
-# from mir.spec.mir import mir_entry
+# from mir.spec import mir_entry
# from pprint import pprint
# os.remove(MIR_PATH_NAMED)
diff --git a/tests/test_mir_tagging.py b/tests/test_mir_tagging.py
index ac97c02..272f157 100644
--- a/tests/test_mir_tagging.py
+++ b/tests/test_mir_tagging.py
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
#
-from mir.tag import make_mir_tag
+from mir.tag import tag_model_from_repo
# def test_param_no_delimiter_version():BAH
@@ -10,7 +10,7 @@
def test_split_hyphenated():
- result = make_mir_tag("xyz-15b")
+ result = tag_model_from_repo("xyz-15b")
assert result == ("xyz", "*")
print(result)
@@ -21,24 +21,24 @@ def test_split_hyphenated():
def test_split_dot_version():
- assert make_mir_tag("xyz1.0") == ("xyz1", "*")
+ assert tag_model_from_repo("xyz1.0") == ("xyz1", "*")
def test_split_hyphen_version():
- assert make_mir_tag("xyz1-0") == ("xyz1-0", "*")
+ assert tag_model_from_repo("xyz1-0") == ("xyz1-0", "*")
def test_split_hyphen_v_version():
- assert make_mir_tag("xyzv1-0") == ("xyzv1-0", "*")
+ assert tag_model_from_repo("xyzv1-0") == ("xyzv1-0", "*")
def test_no_split():
- assert make_mir_tag("flux.1-dev") == ("flux1-dev", "*")
+ assert tag_model_from_repo("flux.1-dev") == ("flux1-dev", "*")
def test_no_split_again():
- assert make_mir_tag("blipdiffusion") == ("blipdiffusion", "*")
+ assert tag_model_from_repo("blipdiffusion") == ("blipdiffusion", "*")
def test_no_version_dot_numeric_and_diffusers():
- assert make_mir_tag("EasyAnimateV5.1-7b-zh-diffusers") == ("easyanimatev5-zh", "diffusers")
+ assert tag_model_from_repo("EasyAnimateV5.1-7b-zh-diffusers") == ("easyanimatev5-zh", "diffusers")
diff --git a/tests/test_regex_constants.py b/tests/test_regex_constants.py
index b148c2d..70820a8 100644
--- a/tests/test_regex_constants.py
+++ b/tests/test_regex_constants.py
@@ -2,7 +2,7 @@
#
from mir.config.constants import PARAMETERS_SUFFIX
-from mir.tag import make_mir_tag
+from mir.tag import tag_model_from_repo
def test_constants():
@@ -23,5 +23,5 @@ def test_constants():
}
# regex = PARAMETERS_SUFFIX
for test, expected in data_tests.items():
- mir_tag = list(make_mir_tag(test))
+ mir_tag = list(tag_model_from_repo(test))
assert mir_tag == expected
diff --git a/uv.lock b/uv.lock
index 0eec34b..c945785 100644
--- a/uv.lock
+++ b/uv.lock
@@ -1,5 +1,5 @@
version = 1
-revision = 2
+revision = 3
requires-python = ">=3.11"
resolution-markers = [
"python_full_version >= '3.12'",
@@ -30,11 +30,11 @@ wheels = [
[[package]]
name = "certifi"
-version = "2025.11.12"
+version = "2026.1.4"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/a2/8c/58f469717fa48465e4a50c014a0400602d3c437d7c0c468e17ada824da3a/certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316", size = 160538, upload-time = "2025-11-12T02:54:51.517Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/e0/2d/a891ca51311197f6ad14a7ef42e2399f36cf2f9bd44752b3dc4eab60fdc5/certifi-2026.1.4.tar.gz", hash = "sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120", size = 154268, upload-time = "2026-01-04T02:42:41.825Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/70/7d/9bc192684cea499815ff478dfcdc13835ddf401365057044fb721ec6bddb/certifi-2025.11.12-py3-none-any.whl", hash = "sha256:97de8790030bbd5c2d96b7ec782fc2f7820ef8dba6db909ccf95449f2d062d4b", size = 159438, upload-time = "2025-11-12T02:54:49.735Z" },
+ { url = "https://files.pythonhosted.org/packages/e6/ad/3cc14f097111b4de0040c83a525973216457bbeeb63739ef1ed275c1c021/certifi-2026.1.4-py3-none-any.whl", hash = "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c", size = 152900, upload-time = "2026-01-04T02:42:40.15Z" },
]
[[package]]
@@ -133,8 +133,8 @@ wheels = [
[[package]]
name = "diffusers"
-version = "0.36.0.dev0"
-source = { git = "https://github.com/huggingface/diffusers#a1f36ee3ef4ae1bf98bd260e539197259aa981c1" }
+version = "0.37.0.dev0"
+source = { git = "https://github.com/huggingface/diffusers#5ffb65803d0ddc5e3298c35df638ceed5e580922" }
dependencies = [
{ name = "filelock" },
{ name = "httpx" },
@@ -149,11 +149,11 @@ dependencies = [
[[package]]
name = "filelock"
-version = "3.20.0"
+version = "3.20.2"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/58/46/0028a82567109b5ef6e4d2a1f04a583fb513e6cf9527fcdd09afd817deeb/filelock-3.20.0.tar.gz", hash = "sha256:711e943b4ec6be42e1d4e6690b48dc175c822967466bb31c0c293f34334c13f4", size = 18922, upload-time = "2025-10-08T18:03:50.056Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/c1/e0/a75dbe4bca1e7d41307323dad5ea2efdd95408f74ab2de8bd7dba9b51a1a/filelock-3.20.2.tar.gz", hash = "sha256:a2241ff4ddde2a7cebddf78e39832509cb045d18ec1a09d7248d6bfc6bfbbe64", size = 19510, upload-time = "2026-01-02T15:33:32.582Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/76/91/7216b27286936c16f5b4d0c530087e4a54eead683e6b0b73dd0c64844af6/filelock-3.20.0-py3-none-any.whl", hash = "sha256:339b4732ffda5cd79b13f4e2711a31b0365ce445d95d243bb996273d072546a2", size = 16054, upload-time = "2025-10-08T18:03:48.35Z" },
+ { url = "https://files.pythonhosted.org/packages/9a/30/ab407e2ec752aa541704ed8f93c11e2a5d92c168b8a755d818b74a3c5c2d/filelock-3.20.2-py3-none-any.whl", hash = "sha256:fbba7237d6ea277175a32c54bb71ef814a8546d8601269e1bfc388de333974e8", size = 16697, upload-time = "2026-01-02T15:33:31.133Z" },
]
[[package]]
@@ -165,6 +165,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/51/c7/b64cae5dba3a1b138d7123ec36bb5ccd39d39939f18454407e5468f4763f/fsspec-2025.12.0-py3-none-any.whl", hash = "sha256:8bf1fe301b7d8acfa6e8571e3b1c3d158f909666642431cc78a1b7b4dbc5ec5b", size = 201422, upload-time = "2025-12-03T15:23:41.434Z" },
]
+[[package]]
+name = "ftfy"
+version = "6.3.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "wcwidth" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/a5/d3/8650919bc3c7c6e90ee3fa7fd618bf373cbbe55dff043bd67353dbb20cd8/ftfy-6.3.1.tar.gz", hash = "sha256:9b3c3d90f84fb267fe64d375a07b7f8912d817cf86009ae134aa03e1819506ec", size = 308927, upload-time = "2024-10-26T00:50:35.149Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/ab/6e/81d47999aebc1b155f81eca4477a616a70f238a2549848c38983f3c22a82/ftfy-6.3.1-py3-none-any.whl", hash = "sha256:7c70eb532015cd2f9adb53f101fb6c7945988d023a085d127d1573dc49dd0083", size = 44821, upload-time = "2024-10-26T00:50:33.425Z" },
+]
+
[[package]]
name = "h11"
version = "0.16.0"
@@ -233,7 +245,7 @@ wheels = [
[[package]]
name = "huggingface-hub"
-version = "1.1.7"
+version = "1.2.3"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "filelock" },
@@ -247,9 +259,9 @@ dependencies = [
{ name = "typer-slim" },
{ name = "typing-extensions" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/6f/fa/a1a94c55637f2b7cfeb05263ac3881aa87c82df92d8b4b31c909079f4419/huggingface_hub-1.1.7.tar.gz", hash = "sha256:3c84b6283caca928595f08fd42e9a572f17ec3501dec508c3f2939d94bfbd9d2", size = 607537, upload-time = "2025-12-01T11:05:28.137Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/a7/c8/9cd2fcb670ba0e708bfdf95a1177b34ca62de2d3821df0773bc30559af80/huggingface_hub-1.2.3.tar.gz", hash = "sha256:4ba57f17004fd27bb176a6b7107df579865d4cde015112db59184c51f5602ba7", size = 614605, upload-time = "2025-12-12T15:31:42.161Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/dd/4f/82e5ab009089a2c48472bf4248391fe4091cf0b9c3e951dbb8afe3b23d76/huggingface_hub-1.1.7-py3-none-any.whl", hash = "sha256:f3efa4779f4890e44c957bbbb0f197e6028887ad09f0cf95a21659fa7753605d", size = 516239, upload-time = "2025-12-01T11:05:25.981Z" },
+ { url = "https://files.pythonhosted.org/packages/df/8d/7ca723a884d55751b70479b8710f06a317296b1fa1c1dec01d0420d13e43/huggingface_hub-1.2.3-py3-none-any.whl", hash = "sha256:c9b7a91a9eedaa2149cdc12bdd8f5a11780e10de1f1024718becf9e41e5a4642", size = 520953, upload-time = "2025-12-12T15:31:40.339Z" },
]
[package.optional-dependencies]
@@ -268,14 +280,14 @@ wheels = [
[[package]]
name = "importlib-metadata"
-version = "8.7.0"
+version = "8.7.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "zipp" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/76/66/650a33bd90f786193e4de4b3ad86ea60b53c89b669a5c7be931fac31cdb0/importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000", size = 56641, upload-time = "2025-04-27T15:29:01.736Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/f3/49/3b30cad09e7771a4982d9975a8cbf64f00d4a1ececb53297f1d9a7be1b10/importlib_metadata-8.7.1.tar.gz", hash = "sha256:49fef1ae6440c182052f407c8d34a68f72efc36db9ca90dc0113398f2fdde8bb", size = 57107, upload-time = "2025-12-21T10:00:19.278Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/20/b0/36bd937216ec521246249be3bf9855081de4c5e06a0c9b4219dbeda50373/importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd", size = 27656, upload-time = "2025-04-27T15:29:00.214Z" },
+ { url = "https://files.pythonhosted.org/packages/fa/5e/f8e9a1d23b9c20a551a8a02ea3637b4642e22c2626e3a13a9a29cdea99eb/importlib_metadata-8.7.1-py3-none-any.whl", hash = "sha256:5a1f80bf1daa489495071efbb095d75a634cf28a8bc299581244063b53176151", size = 27865, upload-time = "2025-12-21T10:00:18.329Z" },
]
[[package]]
@@ -388,8 +400,10 @@ version = "0.0.1"
source = { editable = "." }
dependencies = [
{ name = "diffusers" },
+ { name = "ftfy" },
{ name = "huggingface-hub", extra = ["hf-xet"] },
{ name = "pydantic" },
+ { name = "sentencepiece" },
{ name = "tokenizers" },
{ name = "torch" },
{ name = "torchvision" },
@@ -409,8 +423,10 @@ dev = [
[package.metadata]
requires-dist = [
{ name = "diffusers", git = "https://github.com/huggingface/diffusers" },
+ { name = "ftfy", specifier = ">=6.3.1" },
{ name = "huggingface-hub", extras = ["hf-xet"], specifier = ">=1.1.7" },
{ name = "pydantic", specifier = ">=2.12.5" },
+ { name = "sentencepiece", specifier = ">=0.2.1" },
{ name = "tokenizers", specifier = ">=0.22.1" },
{ name = "torch", specifier = ">=2.9.1" },
{ name = "torchvision", specifier = ">=0.24.1" },
@@ -438,101 +454,99 @@ wheels = [
[[package]]
name = "networkx"
-version = "3.6"
+version = "3.6.1"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/e8/fc/7b6fd4d22c8c4dc5704430140d8b3f520531d4fe7328b8f8d03f5a7950e8/networkx-3.6.tar.gz", hash = "sha256:285276002ad1f7f7da0f7b42f004bcba70d381e936559166363707fdad3d72ad", size = 2511464, upload-time = "2025-11-24T03:03:47.158Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/6a/51/63fe664f3908c97be9d2e4f1158eb633317598cfa6e1fc14af5383f17512/networkx-3.6.1.tar.gz", hash = "sha256:26b7c357accc0c8cde558ad486283728b65b6a95d85ee1cd66bafab4c8168509", size = 2517025, upload-time = "2025-12-08T17:02:39.908Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/07/c7/d64168da60332c17d24c0d2f08bdf3987e8d1ae9d84b5bbd0eec2eb26a55/networkx-3.6-py3-none-any.whl", hash = "sha256:cdb395b105806062473d3be36458d8f1459a4e4b98e236a66c3a48996e07684f", size = 2063713, upload-time = "2025-11-24T03:03:45.21Z" },
+ { url = "https://files.pythonhosted.org/packages/9e/c9/b2622292ea83fbb4ec318f5b9ab867d0a28ab43c5717bb85b0a5f6b3b0a4/networkx-3.6.1-py3-none-any.whl", hash = "sha256:d47fbf302e7d9cbbb9e2555a0d267983d2aa476bac30e90dfbe5669bd57f3762", size = 2068504, upload-time = "2025-12-08T17:02:38.159Z" },
]
[[package]]
name = "nodeenv"
-version = "1.9.1"
+version = "1.10.0"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/43/16/fc88b08840de0e0a72a2f9d8c6bae36be573e475a6326ae854bcc549fc45/nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f", size = 47437, upload-time = "2024-06-04T18:44:11.171Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/24/bf/d1bda4f6168e0b2e9e5958945e01910052158313224ada5ce1fb2e1113b8/nodeenv-1.10.0.tar.gz", hash = "sha256:996c191ad80897d076bdfba80a41994c2b47c68e224c542b48feba42ba00f8bb", size = 55611, upload-time = "2025-12-20T14:08:54.006Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/d2/1d/1b658dbd2b9fa9c4c9f32accbfc0205d532c8c6194dc0f2a4c0428e7128a/nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9", size = 22314, upload-time = "2024-06-04T18:44:08.352Z" },
+ { url = "https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl", hash = "sha256:5bb13e3eed2923615535339b3c620e76779af4cb4c6a90deccc9e36b274d3827", size = 23438, upload-time = "2025-12-20T14:08:52.782Z" },
]
[[package]]
name = "numpy"
-version = "2.3.5"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/76/65/21b3bc86aac7b8f2862db1e808f1ea22b028e30a225a34a5ede9bf8678f2/numpy-2.3.5.tar.gz", hash = "sha256:784db1dcdab56bf0517743e746dfb0f885fc68d948aba86eeec2cba234bdf1c0", size = 20584950, upload-time = "2025-11-16T22:52:42.067Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/43/77/84dd1d2e34d7e2792a236ba180b5e8fcc1e3e414e761ce0253f63d7f572e/numpy-2.3.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:de5672f4a7b200c15a4127042170a694d4df43c992948f5e1af57f0174beed10", size = 17034641, upload-time = "2025-11-16T22:49:19.336Z" },
- { url = "https://files.pythonhosted.org/packages/2a/ea/25e26fa5837106cde46ae7d0b667e20f69cbbc0efd64cba8221411ab26ae/numpy-2.3.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:acfd89508504a19ed06ef963ad544ec6664518c863436306153e13e94605c218", size = 12528324, upload-time = "2025-11-16T22:49:22.582Z" },
- { url = "https://files.pythonhosted.org/packages/4d/1a/e85f0eea4cf03d6a0228f5c0256b53f2df4bc794706e7df019fc622e47f1/numpy-2.3.5-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:ffe22d2b05504f786c867c8395de703937f934272eb67586817b46188b4ded6d", size = 5356872, upload-time = "2025-11-16T22:49:25.408Z" },
- { url = "https://files.pythonhosted.org/packages/5c/bb/35ef04afd567f4c989c2060cde39211e4ac5357155c1833bcd1166055c61/numpy-2.3.5-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:872a5cf366aec6bb1147336480fef14c9164b154aeb6542327de4970282cd2f5", size = 6893148, upload-time = "2025-11-16T22:49:27.549Z" },
- { url = "https://files.pythonhosted.org/packages/f2/2b/05bbeb06e2dff5eab512dfc678b1cc5ee94d8ac5956a0885c64b6b26252b/numpy-2.3.5-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3095bdb8dd297e5920b010e96134ed91d852d81d490e787beca7e35ae1d89cf7", size = 14557282, upload-time = "2025-11-16T22:49:30.964Z" },
- { url = "https://files.pythonhosted.org/packages/65/fb/2b23769462b34398d9326081fad5655198fcf18966fcb1f1e49db44fbf31/numpy-2.3.5-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8cba086a43d54ca804ce711b2a940b16e452807acebe7852ff327f1ecd49b0d4", size = 16897903, upload-time = "2025-11-16T22:49:34.191Z" },
- { url = "https://files.pythonhosted.org/packages/ac/14/085f4cf05fc3f1e8aa95e85404e984ffca9b2275a5dc2b1aae18a67538b8/numpy-2.3.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6cf9b429b21df6b99f4dee7a1218b8b7ffbbe7df8764dc0bd60ce8a0708fed1e", size = 16341672, upload-time = "2025-11-16T22:49:37.2Z" },
- { url = "https://files.pythonhosted.org/packages/6f/3b/1f73994904142b2aa290449b3bb99772477b5fd94d787093e4f24f5af763/numpy-2.3.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:396084a36abdb603546b119d96528c2f6263921c50df3c8fd7cb28873a237748", size = 18838896, upload-time = "2025-11-16T22:49:39.727Z" },
- { url = "https://files.pythonhosted.org/packages/cd/b9/cf6649b2124f288309ffc353070792caf42ad69047dcc60da85ee85fea58/numpy-2.3.5-cp311-cp311-win32.whl", hash = "sha256:b0c7088a73aef3d687c4deef8452a3ac7c1be4e29ed8bf3b366c8111128ac60c", size = 6563608, upload-time = "2025-11-16T22:49:42.079Z" },
- { url = "https://files.pythonhosted.org/packages/aa/44/9fe81ae1dcc29c531843852e2874080dc441338574ccc4306b39e2ff6e59/numpy-2.3.5-cp311-cp311-win_amd64.whl", hash = "sha256:a414504bef8945eae5f2d7cb7be2d4af77c5d1cb5e20b296c2c25b61dff2900c", size = 13078442, upload-time = "2025-11-16T22:49:43.99Z" },
- { url = "https://files.pythonhosted.org/packages/6d/a7/f99a41553d2da82a20a2f22e93c94f928e4490bb447c9ff3c4ff230581d3/numpy-2.3.5-cp311-cp311-win_arm64.whl", hash = "sha256:0cd00b7b36e35398fa2d16af7b907b65304ef8bb4817a550e06e5012929830fa", size = 10458555, upload-time = "2025-11-16T22:49:47.092Z" },
- { url = "https://files.pythonhosted.org/packages/44/37/e669fe6cbb2b96c62f6bbedc6a81c0f3b7362f6a59230b23caa673a85721/numpy-2.3.5-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:74ae7b798248fe62021dbf3c914245ad45d1a6b0cb4a29ecb4b31d0bfbc4cc3e", size = 16733873, upload-time = "2025-11-16T22:49:49.84Z" },
- { url = "https://files.pythonhosted.org/packages/c5/65/df0db6c097892c9380851ab9e44b52d4f7ba576b833996e0080181c0c439/numpy-2.3.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ee3888d9ff7c14604052b2ca5535a30216aa0a58e948cdd3eeb8d3415f638769", size = 12259838, upload-time = "2025-11-16T22:49:52.863Z" },
- { url = "https://files.pythonhosted.org/packages/5b/e1/1ee06e70eb2136797abe847d386e7c0e830b67ad1d43f364dd04fa50d338/numpy-2.3.5-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:612a95a17655e213502f60cfb9bf9408efdc9eb1d5f50535cc6eb365d11b42b5", size = 5088378, upload-time = "2025-11-16T22:49:55.055Z" },
- { url = "https://files.pythonhosted.org/packages/6d/9c/1ca85fb86708724275103b81ec4cf1ac1d08f465368acfc8da7ab545bdae/numpy-2.3.5-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:3101e5177d114a593d79dd79658650fe28b5a0d8abeb8ce6f437c0e6df5be1a4", size = 6628559, upload-time = "2025-11-16T22:49:57.371Z" },
- { url = "https://files.pythonhosted.org/packages/74/78/fcd41e5a0ce4f3f7b003da85825acddae6d7ecb60cf25194741b036ca7d6/numpy-2.3.5-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b973c57ff8e184109db042c842423ff4f60446239bd585a5131cc47f06f789d", size = 14250702, upload-time = "2025-11-16T22:49:59.632Z" },
- { url = "https://files.pythonhosted.org/packages/b6/23/2a1b231b8ff672b4c450dac27164a8b2ca7d9b7144f9c02d2396518352eb/numpy-2.3.5-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0d8163f43acde9a73c2a33605353a4f1bc4798745a8b1d73183b28e5b435ae28", size = 16606086, upload-time = "2025-11-16T22:50:02.127Z" },
- { url = "https://files.pythonhosted.org/packages/a0/c5/5ad26fbfbe2012e190cc7d5003e4d874b88bb18861d0829edc140a713021/numpy-2.3.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:51c1e14eb1e154ebd80e860722f9e6ed6ec89714ad2db2d3aa33c31d7c12179b", size = 16025985, upload-time = "2025-11-16T22:50:04.536Z" },
- { url = "https://files.pythonhosted.org/packages/d2/fa/dd48e225c46c819288148d9d060b047fd2a6fb1eb37eae25112ee4cb4453/numpy-2.3.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b46b4ec24f7293f23adcd2d146960559aaf8020213de8ad1909dba6c013bf89c", size = 18542976, upload-time = "2025-11-16T22:50:07.557Z" },
- { url = "https://files.pythonhosted.org/packages/05/79/ccbd23a75862d95af03d28b5c6901a1b7da4803181513d52f3b86ed9446e/numpy-2.3.5-cp312-cp312-win32.whl", hash = "sha256:3997b5b3c9a771e157f9aae01dd579ee35ad7109be18db0e85dbdbe1de06e952", size = 6285274, upload-time = "2025-11-16T22:50:10.746Z" },
- { url = "https://files.pythonhosted.org/packages/2d/57/8aeaf160312f7f489dea47ab61e430b5cb051f59a98ae68b7133ce8fa06a/numpy-2.3.5-cp312-cp312-win_amd64.whl", hash = "sha256:86945f2ee6d10cdfd67bcb4069c1662dd711f7e2a4343db5cecec06b87cf31aa", size = 12782922, upload-time = "2025-11-16T22:50:12.811Z" },
- { url = "https://files.pythonhosted.org/packages/78/a6/aae5cc2ca78c45e64b9ef22f089141d661516856cf7c8a54ba434576900d/numpy-2.3.5-cp312-cp312-win_arm64.whl", hash = "sha256:f28620fe26bee16243be2b7b874da327312240a7cdc38b769a697578d2100013", size = 10194667, upload-time = "2025-11-16T22:50:16.16Z" },
- { url = "https://files.pythonhosted.org/packages/db/69/9cde09f36da4b5a505341180a3f2e6fadc352fd4d2b7096ce9778db83f1a/numpy-2.3.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d0f23b44f57077c1ede8c5f26b30f706498b4862d3ff0a7298b8411dd2f043ff", size = 16728251, upload-time = "2025-11-16T22:50:19.013Z" },
- { url = "https://files.pythonhosted.org/packages/79/fb/f505c95ceddd7027347b067689db71ca80bd5ecc926f913f1a23e65cf09b/numpy-2.3.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:aa5bc7c5d59d831d9773d1170acac7893ce3a5e130540605770ade83280e7188", size = 12254652, upload-time = "2025-11-16T22:50:21.487Z" },
- { url = "https://files.pythonhosted.org/packages/78/da/8c7738060ca9c31b30e9301ee0cf6c5ffdbf889d9593285a1cead337f9a5/numpy-2.3.5-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:ccc933afd4d20aad3c00bcef049cb40049f7f196e0397f1109dba6fed63267b0", size = 5083172, upload-time = "2025-11-16T22:50:24.562Z" },
- { url = "https://files.pythonhosted.org/packages/a4/b4/ee5bb2537fb9430fd2ef30a616c3672b991a4129bb1c7dcc42aa0abbe5d7/numpy-2.3.5-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:afaffc4393205524af9dfa400fa250143a6c3bc646c08c9f5e25a9f4b4d6a903", size = 6622990, upload-time = "2025-11-16T22:50:26.47Z" },
- { url = "https://files.pythonhosted.org/packages/95/03/dc0723a013c7d7c19de5ef29e932c3081df1c14ba582b8b86b5de9db7f0f/numpy-2.3.5-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9c75442b2209b8470d6d5d8b1c25714270686f14c749028d2199c54e29f20b4d", size = 14248902, upload-time = "2025-11-16T22:50:28.861Z" },
- { url = "https://files.pythonhosted.org/packages/f5/10/ca162f45a102738958dcec8023062dad0cbc17d1ab99d68c4e4a6c45fb2b/numpy-2.3.5-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11e06aa0af8c0f05104d56450d6093ee639e15f24ecf62d417329d06e522e017", size = 16597430, upload-time = "2025-11-16T22:50:31.56Z" },
- { url = "https://files.pythonhosted.org/packages/2a/51/c1e29be863588db58175175f057286900b4b3327a1351e706d5e0f8dd679/numpy-2.3.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ed89927b86296067b4f81f108a2271d8926467a8868e554eaf370fc27fa3ccaf", size = 16024551, upload-time = "2025-11-16T22:50:34.242Z" },
- { url = "https://files.pythonhosted.org/packages/83/68/8236589d4dbb87253d28259d04d9b814ec0ecce7cb1c7fed29729f4c3a78/numpy-2.3.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:51c55fe3451421f3a6ef9a9c1439e82101c57a2c9eab9feb196a62b1a10b58ce", size = 18533275, upload-time = "2025-11-16T22:50:37.651Z" },
- { url = "https://files.pythonhosted.org/packages/40/56/2932d75b6f13465239e3b7b7e511be27f1b8161ca2510854f0b6e521c395/numpy-2.3.5-cp313-cp313-win32.whl", hash = "sha256:1978155dd49972084bd6ef388d66ab70f0c323ddee6f693d539376498720fb7e", size = 6277637, upload-time = "2025-11-16T22:50:40.11Z" },
- { url = "https://files.pythonhosted.org/packages/0c/88/e2eaa6cffb115b85ed7c7c87775cb8bcf0816816bc98ca8dbfa2ee33fe6e/numpy-2.3.5-cp313-cp313-win_amd64.whl", hash = "sha256:00dc4e846108a382c5869e77c6ed514394bdeb3403461d25a829711041217d5b", size = 12779090, upload-time = "2025-11-16T22:50:42.503Z" },
- { url = "https://files.pythonhosted.org/packages/8f/88/3f41e13a44ebd4034ee17baa384acac29ba6a4fcc2aca95f6f08ca0447d1/numpy-2.3.5-cp313-cp313-win_arm64.whl", hash = "sha256:0472f11f6ec23a74a906a00b48a4dcf3849209696dff7c189714511268d103ae", size = 10194710, upload-time = "2025-11-16T22:50:44.971Z" },
- { url = "https://files.pythonhosted.org/packages/13/cb/71744144e13389d577f867f745b7df2d8489463654a918eea2eeb166dfc9/numpy-2.3.5-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:414802f3b97f3c1eef41e530aaba3b3c1620649871d8cb38c6eaff034c2e16bd", size = 16827292, upload-time = "2025-11-16T22:50:47.715Z" },
- { url = "https://files.pythonhosted.org/packages/71/80/ba9dc6f2a4398e7f42b708a7fdc841bb638d353be255655498edbf9a15a8/numpy-2.3.5-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5ee6609ac3604fa7780e30a03e5e241a7956f8e2fcfe547d51e3afa5247ac47f", size = 12378897, upload-time = "2025-11-16T22:50:51.327Z" },
- { url = "https://files.pythonhosted.org/packages/2e/6d/db2151b9f64264bcceccd51741aa39b50150de9b602d98ecfe7e0c4bff39/numpy-2.3.5-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:86d835afea1eaa143012a2d7a3f45a3adce2d7adc8b4961f0b362214d800846a", size = 5207391, upload-time = "2025-11-16T22:50:54.542Z" },
- { url = "https://files.pythonhosted.org/packages/80/ae/429bacace5ccad48a14c4ae5332f6aa8ab9f69524193511d60ccdfdc65fa/numpy-2.3.5-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:30bc11310e8153ca664b14c5f1b73e94bd0503681fcf136a163de856f3a50139", size = 6721275, upload-time = "2025-11-16T22:50:56.794Z" },
- { url = "https://files.pythonhosted.org/packages/74/5b/1919abf32d8722646a38cd527bc3771eb229a32724ee6ba340ead9b92249/numpy-2.3.5-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1062fde1dcf469571705945b0f221b73928f34a20c904ffb45db101907c3454e", size = 14306855, upload-time = "2025-11-16T22:50:59.208Z" },
- { url = "https://files.pythonhosted.org/packages/a5/87/6831980559434973bebc30cd9c1f21e541a0f2b0c280d43d3afd909b66d0/numpy-2.3.5-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ce581db493ea1a96c0556360ede6607496e8bf9b3a8efa66e06477267bc831e9", size = 16657359, upload-time = "2025-11-16T22:51:01.991Z" },
- { url = "https://files.pythonhosted.org/packages/dd/91/c797f544491ee99fd00495f12ebb7802c440c1915811d72ac5b4479a3356/numpy-2.3.5-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:cc8920d2ec5fa99875b670bb86ddeb21e295cb07aa331810d9e486e0b969d946", size = 16093374, upload-time = "2025-11-16T22:51:05.291Z" },
- { url = "https://files.pythonhosted.org/packages/74/a6/54da03253afcbe7a72785ec4da9c69fb7a17710141ff9ac5fcb2e32dbe64/numpy-2.3.5-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:9ee2197ef8c4f0dfe405d835f3b6a14f5fee7782b5de51ba06fb65fc9b36e9f1", size = 18594587, upload-time = "2025-11-16T22:51:08.585Z" },
- { url = "https://files.pythonhosted.org/packages/80/e9/aff53abbdd41b0ecca94285f325aff42357c6b5abc482a3fcb4994290b18/numpy-2.3.5-cp313-cp313t-win32.whl", hash = "sha256:70b37199913c1bd300ff6e2693316c6f869c7ee16378faf10e4f5e3275b299c3", size = 6405940, upload-time = "2025-11-16T22:51:11.541Z" },
- { url = "https://files.pythonhosted.org/packages/d5/81/50613fec9d4de5480de18d4f8ef59ad7e344d497edbef3cfd80f24f98461/numpy-2.3.5-cp313-cp313t-win_amd64.whl", hash = "sha256:b501b5fa195cc9e24fe102f21ec0a44dffc231d2af79950b451e0d99cea02234", size = 12920341, upload-time = "2025-11-16T22:51:14.312Z" },
- { url = "https://files.pythonhosted.org/packages/bb/ab/08fd63b9a74303947f34f0bd7c5903b9c5532c2d287bead5bdf4c556c486/numpy-2.3.5-cp313-cp313t-win_arm64.whl", hash = "sha256:a80afd79f45f3c4a7d341f13acbe058d1ca8ac017c165d3fa0d3de6bc1a079d7", size = 10262507, upload-time = "2025-11-16T22:51:16.846Z" },
- { url = "https://files.pythonhosted.org/packages/ba/97/1a914559c19e32d6b2e233cf9a6a114e67c856d35b1d6babca571a3e880f/numpy-2.3.5-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:bf06bc2af43fa8d32d30fae16ad965663e966b1a3202ed407b84c989c3221e82", size = 16735706, upload-time = "2025-11-16T22:51:19.558Z" },
- { url = "https://files.pythonhosted.org/packages/57/d4/51233b1c1b13ecd796311216ae417796b88b0616cfd8a33ae4536330748a/numpy-2.3.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:052e8c42e0c49d2575621c158934920524f6c5da05a1d3b9bab5d8e259e045f0", size = 12264507, upload-time = "2025-11-16T22:51:22.492Z" },
- { url = "https://files.pythonhosted.org/packages/45/98/2fe46c5c2675b8306d0b4a3ec3494273e93e1226a490f766e84298576956/numpy-2.3.5-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:1ed1ec893cff7040a02c8aa1c8611b94d395590d553f6b53629a4461dc7f7b63", size = 5093049, upload-time = "2025-11-16T22:51:25.171Z" },
- { url = "https://files.pythonhosted.org/packages/ce/0e/0698378989bb0ac5f1660c81c78ab1fe5476c1a521ca9ee9d0710ce54099/numpy-2.3.5-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:2dcd0808a421a482a080f89859a18beb0b3d1e905b81e617a188bd80422d62e9", size = 6626603, upload-time = "2025-11-16T22:51:27Z" },
- { url = "https://files.pythonhosted.org/packages/5e/a6/9ca0eecc489640615642a6cbc0ca9e10df70df38c4d43f5a928ff18d8827/numpy-2.3.5-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:727fd05b57df37dc0bcf1a27767a3d9a78cbbc92822445f32cc3436ba797337b", size = 14262696, upload-time = "2025-11-16T22:51:29.402Z" },
- { url = "https://files.pythonhosted.org/packages/c8/f6/07ec185b90ec9d7217a00eeeed7383b73d7e709dae2a9a021b051542a708/numpy-2.3.5-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fffe29a1ef00883599d1dc2c51aa2e5d80afe49523c261a74933df395c15c520", size = 16597350, upload-time = "2025-11-16T22:51:32.167Z" },
- { url = "https://files.pythonhosted.org/packages/75/37/164071d1dde6a1a84c9b8e5b414fa127981bad47adf3a6b7e23917e52190/numpy-2.3.5-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:8f7f0e05112916223d3f438f293abf0727e1181b5983f413dfa2fefc4098245c", size = 16040190, upload-time = "2025-11-16T22:51:35.403Z" },
- { url = "https://files.pythonhosted.org/packages/08/3c/f18b82a406b04859eb026d204e4e1773eb41c5be58410f41ffa511d114ae/numpy-2.3.5-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2e2eb32ddb9ccb817d620ac1d8dae7c3f641c1e5f55f531a33e8ab97960a75b8", size = 18536749, upload-time = "2025-11-16T22:51:39.698Z" },
- { url = "https://files.pythonhosted.org/packages/40/79/f82f572bf44cf0023a2fe8588768e23e1592585020d638999f15158609e1/numpy-2.3.5-cp314-cp314-win32.whl", hash = "sha256:66f85ce62c70b843bab1fb14a05d5737741e74e28c7b8b5a064de10142fad248", size = 6335432, upload-time = "2025-11-16T22:51:42.476Z" },
- { url = "https://files.pythonhosted.org/packages/a3/2e/235b4d96619931192c91660805e5e49242389742a7a82c27665021db690c/numpy-2.3.5-cp314-cp314-win_amd64.whl", hash = "sha256:e6a0bc88393d65807d751a614207b7129a310ca4fe76a74e5c7da5fa5671417e", size = 12919388, upload-time = "2025-11-16T22:51:45.275Z" },
- { url = "https://files.pythonhosted.org/packages/07/2b/29fd75ce45d22a39c61aad74f3d718e7ab67ccf839ca8b60866054eb15f8/numpy-2.3.5-cp314-cp314-win_arm64.whl", hash = "sha256:aeffcab3d4b43712bb7a60b65f6044d444e75e563ff6180af8f98dd4b905dfd2", size = 10476651, upload-time = "2025-11-16T22:51:47.749Z" },
- { url = "https://files.pythonhosted.org/packages/17/e1/f6a721234ebd4d87084cfa68d081bcba2f5cfe1974f7de4e0e8b9b2a2ba1/numpy-2.3.5-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:17531366a2e3a9e30762c000f2c43a9aaa05728712e25c11ce1dbe700c53ad41", size = 16834503, upload-time = "2025-11-16T22:51:50.443Z" },
- { url = "https://files.pythonhosted.org/packages/5c/1c/baf7ffdc3af9c356e1c135e57ab7cf8d247931b9554f55c467efe2c69eff/numpy-2.3.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:d21644de1b609825ede2f48be98dfde4656aefc713654eeee280e37cadc4e0ad", size = 12381612, upload-time = "2025-11-16T22:51:53.609Z" },
- { url = "https://files.pythonhosted.org/packages/74/91/f7f0295151407ddc9ba34e699013c32c3c91944f9b35fcf9281163dc1468/numpy-2.3.5-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:c804e3a5aba5460c73955c955bdbd5c08c354954e9270a2c1565f62e866bdc39", size = 5210042, upload-time = "2025-11-16T22:51:56.213Z" },
- { url = "https://files.pythonhosted.org/packages/2e/3b/78aebf345104ec50dd50a4d06ddeb46a9ff5261c33bcc58b1c4f12f85ec2/numpy-2.3.5-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:cc0a57f895b96ec78969c34f682c602bf8da1a0270b09bc65673df2e7638ec20", size = 6724502, upload-time = "2025-11-16T22:51:58.584Z" },
- { url = "https://files.pythonhosted.org/packages/02/c6/7c34b528740512e57ef1b7c8337ab0b4f0bddf34c723b8996c675bc2bc91/numpy-2.3.5-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:900218e456384ea676e24ea6a0417f030a3b07306d29d7ad843957b40a9d8d52", size = 14308962, upload-time = "2025-11-16T22:52:01.698Z" },
- { url = "https://files.pythonhosted.org/packages/80/35/09d433c5262bc32d725bafc619e095b6a6651caf94027a03da624146f655/numpy-2.3.5-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:09a1bea522b25109bf8e6f3027bd810f7c1085c64a0c7ce050c1676ad0ba010b", size = 16655054, upload-time = "2025-11-16T22:52:04.267Z" },
- { url = "https://files.pythonhosted.org/packages/7a/ab/6a7b259703c09a88804fa2430b43d6457b692378f6b74b356155283566ac/numpy-2.3.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:04822c00b5fd0323c8166d66c701dc31b7fbd252c100acd708c48f763968d6a3", size = 16091613, upload-time = "2025-11-16T22:52:08.651Z" },
- { url = "https://files.pythonhosted.org/packages/c2/88/330da2071e8771e60d1038166ff9d73f29da37b01ec3eb43cb1427464e10/numpy-2.3.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d6889ec4ec662a1a37eb4b4fb26b6100841804dac55bd9df579e326cdc146227", size = 18591147, upload-time = "2025-11-16T22:52:11.453Z" },
- { url = "https://files.pythonhosted.org/packages/51/41/851c4b4082402d9ea860c3626db5d5df47164a712cb23b54be028b184c1c/numpy-2.3.5-cp314-cp314t-win32.whl", hash = "sha256:93eebbcf1aafdf7e2ddd44c2923e2672e1010bddc014138b229e49725b4d6be5", size = 6479806, upload-time = "2025-11-16T22:52:14.641Z" },
- { url = "https://files.pythonhosted.org/packages/90/30/d48bde1dfd93332fa557cff1972fbc039e055a52021fbef4c2c4b1eefd17/numpy-2.3.5-cp314-cp314t-win_amd64.whl", hash = "sha256:c8a9958e88b65c3b27e22ca2a076311636850b612d6bbfb76e8d156aacde2aaf", size = 13105760, upload-time = "2025-11-16T22:52:17.975Z" },
- { url = "https://files.pythonhosted.org/packages/2d/fd/4b5eb0b3e888d86aee4d198c23acec7d214baaf17ea93c1adec94c9518b9/numpy-2.3.5-cp314-cp314t-win_arm64.whl", hash = "sha256:6203fdf9f3dc5bdaed7319ad8698e685c7a3be10819f41d32a0723e611733b42", size = 10545459, upload-time = "2025-11-16T22:52:20.55Z" },
- { url = "https://files.pythonhosted.org/packages/c6/65/f9dea8e109371ade9c782b4e4756a82edf9d3366bca495d84d79859a0b79/numpy-2.3.5-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:f0963b55cdd70fad460fa4c1341f12f976bb26cb66021a5580329bd498988310", size = 16910689, upload-time = "2025-11-16T22:52:23.247Z" },
- { url = "https://files.pythonhosted.org/packages/00/4f/edb00032a8fb92ec0a679d3830368355da91a69cab6f3e9c21b64d0bb986/numpy-2.3.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:f4255143f5160d0de972d28c8f9665d882b5f61309d8362fdd3e103cf7bf010c", size = 12457053, upload-time = "2025-11-16T22:52:26.367Z" },
- { url = "https://files.pythonhosted.org/packages/16/a4/e8a53b5abd500a63836a29ebe145fc1ab1f2eefe1cfe59276020373ae0aa/numpy-2.3.5-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:a4b9159734b326535f4dd01d947f919c6eefd2d9827466a696c44ced82dfbc18", size = 5285635, upload-time = "2025-11-16T22:52:29.266Z" },
- { url = "https://files.pythonhosted.org/packages/a3/2f/37eeb9014d9c8b3e9c55bc599c68263ca44fdbc12a93e45a21d1d56df737/numpy-2.3.5-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:2feae0d2c91d46e59fcd62784a3a83b3fb677fead592ce51b5a6fbb4f95965ff", size = 6801770, upload-time = "2025-11-16T22:52:31.421Z" },
- { url = "https://files.pythonhosted.org/packages/7d/e4/68d2f474df2cb671b2b6c2986a02e520671295647dad82484cde80ca427b/numpy-2.3.5-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ffac52f28a7849ad7576293c0cb7b9f08304e8f7d738a8cb8a90ec4c55a998eb", size = 14391768, upload-time = "2025-11-16T22:52:33.593Z" },
- { url = "https://files.pythonhosted.org/packages/b8/50/94ccd8a2b141cb50651fddd4f6a48874acb3c91c8f0842b08a6afc4b0b21/numpy-2.3.5-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:63c0e9e7eea69588479ebf4a8a270d5ac22763cc5854e9a7eae952a3908103f7", size = 16729263, upload-time = "2025-11-16T22:52:36.369Z" },
- { url = "https://files.pythonhosted.org/packages/2d/ee/346fa473e666fe14c52fcdd19ec2424157290a032d4c41f98127bfb31ac7/numpy-2.3.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:f16417ec91f12f814b10bafe79ef77e70113a2f5f7018640e7425ff979253425", size = 12967213, upload-time = "2025-11-16T22:52:39.38Z" },
+version = "2.4.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/a4/7a/6a3d14e205d292b738db449d0de649b373a59edb0d0b4493821d0a3e8718/numpy-2.4.0.tar.gz", hash = "sha256:6e504f7b16118198f138ef31ba24d985b124c2c469fe8467007cf30fd992f934", size = 20685720, upload-time = "2025-12-20T16:18:19.023Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/26/7e/7bae7cbcc2f8132271967aa03e03954fc1e48aa1f3bf32b29ca95fbef352/numpy-2.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:316b2f2584682318539f0bcaca5a496ce9ca78c88066579ebd11fd06f8e4741e", size = 16940166, upload-time = "2025-12-20T16:15:43.434Z" },
+ { url = "https://files.pythonhosted.org/packages/0f/27/6c13f5b46776d6246ec884ac5817452672156a506d08a1f2abb39961930a/numpy-2.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a2718c1de8504121714234b6f8241d0019450353276c88b9453c9c3d92e101db", size = 12641781, upload-time = "2025-12-20T16:15:45.701Z" },
+ { url = "https://files.pythonhosted.org/packages/14/1c/83b4998d4860d15283241d9e5215f28b40ac31f497c04b12fa7f428ff370/numpy-2.4.0-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:21555da4ec4a0c942520ead42c3b0dc9477441e085c42b0fbdd6a084869a6f6b", size = 5470247, upload-time = "2025-12-20T16:15:47.943Z" },
+ { url = "https://files.pythonhosted.org/packages/54/08/cbce72c835d937795571b0464b52069f869c9e78b0c076d416c5269d2718/numpy-2.4.0-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:413aa561266a4be2d06cd2b9665e89d9f54c543f418773076a76adcf2af08bc7", size = 6799807, upload-time = "2025-12-20T16:15:49.795Z" },
+ { url = "https://files.pythonhosted.org/packages/ff/be/2e647961cd8c980591d75cdcd9e8f647d69fbe05e2a25613dc0a2ea5fb1a/numpy-2.4.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0feafc9e03128074689183031181fac0897ff169692d8492066e949041096548", size = 14701992, upload-time = "2025-12-20T16:15:51.615Z" },
+ { url = "https://files.pythonhosted.org/packages/a2/fb/e1652fb8b6fd91ce6ed429143fe2e01ce714711e03e5b762615e7b36172c/numpy-2.4.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8fdfed3deaf1928fb7667d96e0567cdf58c2b370ea2ee7e586aa383ec2cb346", size = 16646871, upload-time = "2025-12-20T16:15:54.129Z" },
+ { url = "https://files.pythonhosted.org/packages/62/23/d841207e63c4322842f7cd042ae981cffe715c73376dcad8235fb31debf1/numpy-2.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e06a922a469cae9a57100864caf4f8a97a1026513793969f8ba5b63137a35d25", size = 16487190, upload-time = "2025-12-20T16:15:56.147Z" },
+ { url = "https://files.pythonhosted.org/packages/bc/a0/6a842c8421ebfdec0a230e65f61e0dabda6edbef443d999d79b87c273965/numpy-2.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:927ccf5cd17c48f801f4ed43a7e5673a2724bd2171460be3e3894e6e332ef83a", size = 18580762, upload-time = "2025-12-20T16:15:58.524Z" },
+ { url = "https://files.pythonhosted.org/packages/0a/d1/c79e0046641186f2134dde05e6181825b911f8bdcef31b19ddd16e232847/numpy-2.4.0-cp311-cp311-win32.whl", hash = "sha256:882567b7ae57c1b1a0250208cc21a7976d8cbcc49d5a322e607e6f09c9e0bd53", size = 6233359, upload-time = "2025-12-20T16:16:00.938Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/f0/74965001d231f28184d6305b8cdc1b6fcd4bf23033f6cb039cfe76c9fca7/numpy-2.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:8b986403023c8f3bf8f487c2e6186afda156174d31c175f747d8934dfddf3479", size = 12601132, upload-time = "2025-12-20T16:16:02.484Z" },
+ { url = "https://files.pythonhosted.org/packages/65/32/55408d0f46dfebce38017f5bd931affa7256ad6beac1a92a012e1fbc67a7/numpy-2.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:3f3096405acc48887458bbf9f6814d43785ac7ba2a57ea6442b581dedbc60ce6", size = 10573977, upload-time = "2025-12-20T16:16:04.77Z" },
+ { url = "https://files.pythonhosted.org/packages/8b/ff/f6400ffec95de41c74b8e73df32e3fff1830633193a7b1e409be7fb1bb8c/numpy-2.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2a8b6bb8369abefb8bd1801b054ad50e02b3275c8614dc6e5b0373c305291037", size = 16653117, upload-time = "2025-12-20T16:16:06.709Z" },
+ { url = "https://files.pythonhosted.org/packages/fd/28/6c23e97450035072e8d830a3c411bf1abd1f42c611ff9d29e3d8f55c6252/numpy-2.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2e284ca13d5a8367e43734148622caf0b261b275673823593e3e3634a6490f83", size = 12369711, upload-time = "2025-12-20T16:16:08.758Z" },
+ { url = "https://files.pythonhosted.org/packages/bc/af/acbef97b630ab1bb45e6a7d01d1452e4251aa88ce680ac36e56c272120ec/numpy-2.4.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:49ff32b09f5aa0cd30a20c2b39db3e669c845589f2b7fc910365210887e39344", size = 5198355, upload-time = "2025-12-20T16:16:10.902Z" },
+ { url = "https://files.pythonhosted.org/packages/c1/c8/4e0d436b66b826f2e53330adaa6311f5cac9871a5b5c31ad773b27f25a74/numpy-2.4.0-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:36cbfb13c152b1c7c184ddac43765db8ad672567e7bafff2cc755a09917ed2e6", size = 6545298, upload-time = "2025-12-20T16:16:12.607Z" },
+ { url = "https://files.pythonhosted.org/packages/ef/27/e1f5d144ab54eac34875e79037011d511ac57b21b220063310cb96c80fbc/numpy-2.4.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:35ddc8f4914466e6fc954c76527aa91aa763682a4f6d73249ef20b418fe6effb", size = 14398387, upload-time = "2025-12-20T16:16:14.257Z" },
+ { url = "https://files.pythonhosted.org/packages/67/64/4cb909dd5ab09a9a5d086eff9586e69e827b88a5585517386879474f4cf7/numpy-2.4.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dc578891de1db95b2a35001b695451767b580bb45753717498213c5ff3c41d63", size = 16363091, upload-time = "2025-12-20T16:16:17.32Z" },
+ { url = "https://files.pythonhosted.org/packages/9d/9c/8efe24577523ec6809261859737cf117b0eb6fdb655abdfdc81b2e468ce4/numpy-2.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:98e81648e0b36e325ab67e46b5400a7a6d4a22b8a7c8e8bbfe20e7db7906bf95", size = 16176394, upload-time = "2025-12-20T16:16:19.524Z" },
+ { url = "https://files.pythonhosted.org/packages/61/f0/1687441ece7b47a62e45a1f82015352c240765c707928edd8aef875d5951/numpy-2.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d57b5046c120561ba8fa8e4030fbb8b822f3063910fa901ffadf16e2b7128ad6", size = 18287378, upload-time = "2025-12-20T16:16:22.866Z" },
+ { url = "https://files.pythonhosted.org/packages/d3/6f/f868765d44e6fc466467ed810ba9d8d6db1add7d4a748abfa2a4c99a3194/numpy-2.4.0-cp312-cp312-win32.whl", hash = "sha256:92190db305a6f48734d3982f2c60fa30d6b5ee9bff10f2887b930d7b40119f4c", size = 5955432, upload-time = "2025-12-20T16:16:25.06Z" },
+ { url = "https://files.pythonhosted.org/packages/d4/b5/94c1e79fcbab38d1ca15e13777477b2914dd2d559b410f96949d6637b085/numpy-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:680060061adb2d74ce352628cb798cfdec399068aa7f07ba9fb818b2b3305f98", size = 12306201, upload-time = "2025-12-20T16:16:26.979Z" },
+ { url = "https://files.pythonhosted.org/packages/70/09/c39dadf0b13bb0768cd29d6a3aaff1fb7c6905ac40e9aaeca26b1c086e06/numpy-2.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:39699233bc72dd482da1415dcb06076e32f60eddc796a796c5fb6c5efce94667", size = 10308234, upload-time = "2025-12-20T16:16:29.417Z" },
+ { url = "https://files.pythonhosted.org/packages/a7/0d/853fd96372eda07c824d24adf02e8bc92bb3731b43a9b2a39161c3667cc4/numpy-2.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a152d86a3ae00ba5f47b3acf3b827509fd0b6cb7d3259665e63dafbad22a75ea", size = 16649088, upload-time = "2025-12-20T16:16:31.421Z" },
+ { url = "https://files.pythonhosted.org/packages/e3/37/cc636f1f2a9f585434e20a3e6e63422f70bfe4f7f6698e941db52ea1ac9a/numpy-2.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:39b19251dec4de8ff8496cd0806cbe27bf0684f765abb1f4809554de93785f2d", size = 12364065, upload-time = "2025-12-20T16:16:33.491Z" },
+ { url = "https://files.pythonhosted.org/packages/ed/69/0b78f37ca3690969beee54103ce5f6021709134e8020767e93ba691a72f1/numpy-2.4.0-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:009bd0ea12d3c784b6639a8457537016ce5172109e585338e11334f6a7bb88ee", size = 5192640, upload-time = "2025-12-20T16:16:35.636Z" },
+ { url = "https://files.pythonhosted.org/packages/1d/2a/08569f8252abf590294dbb09a430543ec8f8cc710383abfb3e75cc73aeda/numpy-2.4.0-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:5fe44e277225fd3dff6882d86d3d447205d43532c3627313d17e754fb3905a0e", size = 6541556, upload-time = "2025-12-20T16:16:37.276Z" },
+ { url = "https://files.pythonhosted.org/packages/93/e9/a949885a4e177493d61519377952186b6cbfdf1d6002764c664ba28349b5/numpy-2.4.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f935c4493eda9069851058fa0d9e39dbf6286be690066509305e52912714dbb2", size = 14396562, upload-time = "2025-12-20T16:16:38.953Z" },
+ { url = "https://files.pythonhosted.org/packages/99/98/9d4ad53b0e9ef901c2ef1d550d2136f5ac42d3fd2988390a6def32e23e48/numpy-2.4.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8cfa5f29a695cb7438965e6c3e8d06e0416060cf0d709c1b1c1653a939bf5c2a", size = 16351719, upload-time = "2025-12-20T16:16:41.503Z" },
+ { url = "https://files.pythonhosted.org/packages/28/de/5f3711a38341d6e8dd619f6353251a0cdd07f3d6d101a8fd46f4ef87f895/numpy-2.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ba0cb30acd3ef11c94dc27fbfba68940652492bc107075e7ffe23057f9425681", size = 16176053, upload-time = "2025-12-20T16:16:44.552Z" },
+ { url = "https://files.pythonhosted.org/packages/2a/5b/2a3753dc43916501b4183532e7ace862e13211042bceafa253afb5c71272/numpy-2.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:60e8c196cd82cbbd4f130b5290007e13e6de3eca79f0d4d38014769d96a7c475", size = 18277859, upload-time = "2025-12-20T16:16:47.174Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/c5/a18bcdd07a941db3076ef489d036ab16d2bfc2eae0cf27e5a26e29189434/numpy-2.4.0-cp313-cp313-win32.whl", hash = "sha256:5f48cb3e88fbc294dc90e215d86fbaf1c852c63dbdb6c3a3e63f45c4b57f7344", size = 5953849, upload-time = "2025-12-20T16:16:49.554Z" },
+ { url = "https://files.pythonhosted.org/packages/4f/f1/719010ff8061da6e8a26e1980cf090412d4f5f8060b31f0c45d77dd67a01/numpy-2.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:a899699294f28f7be8992853c0c60741f16ff199205e2e6cdca155762cbaa59d", size = 12302840, upload-time = "2025-12-20T16:16:51.227Z" },
+ { url = "https://files.pythonhosted.org/packages/f5/5a/b3d259083ed8b4d335270c76966cb6cf14a5d1b69e1a608994ac57a659e6/numpy-2.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:9198f447e1dc5647d07c9a6bbe2063cc0132728cc7175b39dbc796da5b54920d", size = 10308509, upload-time = "2025-12-20T16:16:53.313Z" },
+ { url = "https://files.pythonhosted.org/packages/31/01/95edcffd1bb6c0633df4e808130545c4f07383ab629ac7e316fb44fff677/numpy-2.4.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:74623f2ab5cc3f7c886add4f735d1031a1d2be4a4ae63c0546cfd74e7a31ddf6", size = 12491815, upload-time = "2025-12-20T16:16:55.496Z" },
+ { url = "https://files.pythonhosted.org/packages/59/ea/5644b8baa92cc1c7163b4b4458c8679852733fa74ca49c942cfa82ded4e0/numpy-2.4.0-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:0804a8e4ab070d1d35496e65ffd3cf8114c136a2b81f61dfab0de4b218aacfd5", size = 5320321, upload-time = "2025-12-20T16:16:57.468Z" },
+ { url = "https://files.pythonhosted.org/packages/26/4e/e10938106d70bc21319bd6a86ae726da37edc802ce35a3a71ecdf1fdfe7f/numpy-2.4.0-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:02a2038eb27f9443a8b266a66911e926566b5a6ffd1a689b588f7f35b81e7dc3", size = 6641635, upload-time = "2025-12-20T16:16:59.379Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/8d/a8828e3eaf5c0b4ab116924df82f24ce3416fa38d0674d8f708ddc6c8aac/numpy-2.4.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1889b3a3f47a7b5bee16bc25a2145bd7cb91897f815ce3499db64c7458b6d91d", size = 14456053, upload-time = "2025-12-20T16:17:01.768Z" },
+ { url = "https://files.pythonhosted.org/packages/68/a1/17d97609d87d4520aa5ae2dcfb32305654550ac6a35effb946d303e594ce/numpy-2.4.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85eef4cb5625c47ee6425c58a3502555e10f45ee973da878ac8248ad58c136f3", size = 16401702, upload-time = "2025-12-20T16:17:04.235Z" },
+ { url = "https://files.pythonhosted.org/packages/18/32/0f13c1b2d22bea1118356b8b963195446f3af124ed7a5adfa8fdecb1b6ca/numpy-2.4.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6dc8b7e2f4eb184b37655195f421836cfae6f58197b67e3ffc501f1333d993fa", size = 16242493, upload-time = "2025-12-20T16:17:06.856Z" },
+ { url = "https://files.pythonhosted.org/packages/ae/23/48f21e3d309fbc137c068a1475358cbd3a901b3987dcfc97a029ab3068e2/numpy-2.4.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:44aba2f0cafd287871a495fb3163408b0bd25bbce135c6f621534a07f4f7875c", size = 18324222, upload-time = "2025-12-20T16:17:09.392Z" },
+ { url = "https://files.pythonhosted.org/packages/ac/52/41f3d71296a3dcaa4f456aaa3c6fc8e745b43d0552b6bde56571bb4b4a0f/numpy-2.4.0-cp313-cp313t-win32.whl", hash = "sha256:20c115517513831860c573996e395707aa9fb691eb179200125c250e895fcd93", size = 6076216, upload-time = "2025-12-20T16:17:11.437Z" },
+ { url = "https://files.pythonhosted.org/packages/35/ff/46fbfe60ab0710d2a2b16995f708750307d30eccbb4c38371ea9e986866e/numpy-2.4.0-cp313-cp313t-win_amd64.whl", hash = "sha256:b48e35f4ab6f6a7597c46e301126ceba4c44cd3280e3750f85db48b082624fa4", size = 12444263, upload-time = "2025-12-20T16:17:13.182Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/e3/9189ab319c01d2ed556c932ccf55064c5d75bb5850d1df7a482ce0badead/numpy-2.4.0-cp313-cp313t-win_arm64.whl", hash = "sha256:4d1cfce39e511069b11e67cd0bd78ceff31443b7c9e5c04db73c7a19f572967c", size = 10378265, upload-time = "2025-12-20T16:17:15.211Z" },
+ { url = "https://files.pythonhosted.org/packages/ab/ed/52eac27de39d5e5a6c9aadabe672bc06f55e24a3d9010cd1183948055d76/numpy-2.4.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:c95eb6db2884917d86cde0b4d4cf31adf485c8ec36bf8696dd66fa70de96f36b", size = 16647476, upload-time = "2025-12-20T16:17:17.671Z" },
+ { url = "https://files.pythonhosted.org/packages/77/c0/990ce1b7fcd4e09aeaa574e2a0a839589e4b08b2ca68070f1acb1fea6736/numpy-2.4.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:65167da969cd1ec3a1df31cb221ca3a19a8aaa25370ecb17d428415e93c1935e", size = 12374563, upload-time = "2025-12-20T16:17:20.216Z" },
+ { url = "https://files.pythonhosted.org/packages/37/7c/8c5e389c6ae8f5fd2277a988600d79e9625db3fff011a2d87ac80b881a4c/numpy-2.4.0-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:3de19cfecd1465d0dcf8a5b5ea8b3155b42ed0b639dba4b71e323d74f2a3be5e", size = 5203107, upload-time = "2025-12-20T16:17:22.47Z" },
+ { url = "https://files.pythonhosted.org/packages/e6/94/ca5b3bd6a8a70a5eec9a0b8dd7f980c1eff4b8a54970a9a7fef248ef564f/numpy-2.4.0-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:6c05483c3136ac4c91b4e81903cb53a8707d316f488124d0398499a4f8e8ef51", size = 6538067, upload-time = "2025-12-20T16:17:24.001Z" },
+ { url = "https://files.pythonhosted.org/packages/79/43/993eb7bb5be6761dde2b3a3a594d689cec83398e3f58f4758010f3b85727/numpy-2.4.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36667db4d6c1cea79c8930ab72fadfb4060feb4bfe724141cd4bd064d2e5f8ce", size = 14411926, upload-time = "2025-12-20T16:17:25.822Z" },
+ { url = "https://files.pythonhosted.org/packages/03/75/d4c43b61de473912496317a854dac54f1efec3eeb158438da6884b70bb90/numpy-2.4.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9a818668b674047fd88c4cddada7ab8f1c298812783e8328e956b78dc4807f9f", size = 16354295, upload-time = "2025-12-20T16:17:28.308Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/0a/b54615b47ee8736a6461a4bb6749128dd3435c5a759d5663f11f0e9af4ac/numpy-2.4.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:1ee32359fb7543b7b7bd0b2f46294db27e29e7bbdf70541e81b190836cd83ded", size = 16190242, upload-time = "2025-12-20T16:17:30.993Z" },
+ { url = "https://files.pythonhosted.org/packages/98/ce/ea207769aacad6246525ec6c6bbd66a2bf56c72443dc10e2f90feed29290/numpy-2.4.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:e493962256a38f58283de033d8af176c5c91c084ea30f15834f7545451c42059", size = 18280875, upload-time = "2025-12-20T16:17:33.327Z" },
+ { url = "https://files.pythonhosted.org/packages/17/ef/ec409437aa962ea372ed601c519a2b141701683ff028f894b7466f0ab42b/numpy-2.4.0-cp314-cp314-win32.whl", hash = "sha256:6bbaebf0d11567fa8926215ae731e1d58e6ec28a8a25235b8a47405d301332db", size = 6002530, upload-time = "2025-12-20T16:17:35.729Z" },
+ { url = "https://files.pythonhosted.org/packages/5f/4a/5cb94c787a3ed1ac65e1271b968686521169a7b3ec0b6544bb3ca32960b0/numpy-2.4.0-cp314-cp314-win_amd64.whl", hash = "sha256:3d857f55e7fdf7c38ab96c4558c95b97d1c685be6b05c249f5fdafcbd6f9899e", size = 12435890, upload-time = "2025-12-20T16:17:37.599Z" },
+ { url = "https://files.pythonhosted.org/packages/48/a0/04b89db963af9de1104975e2544f30de89adbf75b9e75f7dd2599be12c79/numpy-2.4.0-cp314-cp314-win_arm64.whl", hash = "sha256:bb50ce5fb202a26fd5404620e7ef820ad1ab3558b444cb0b55beb7ef66cd2d63", size = 10591892, upload-time = "2025-12-20T16:17:39.649Z" },
+ { url = "https://files.pythonhosted.org/packages/53/e5/d74b5ccf6712c06c7a545025a6a71bfa03bdc7e0568b405b0d655232fd92/numpy-2.4.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:355354388cba60f2132df297e2d53053d4063f79077b67b481d21276d61fc4df", size = 12494312, upload-time = "2025-12-20T16:17:41.714Z" },
+ { url = "https://files.pythonhosted.org/packages/c2/08/3ca9cc2ddf54dfee7ae9a6479c071092a228c68aef08252aa08dac2af002/numpy-2.4.0-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:1d8f9fde5f6dc1b6fc34df8162f3b3079365468703fee7f31d4e0cc8c63baed9", size = 5322862, upload-time = "2025-12-20T16:17:44.145Z" },
+ { url = "https://files.pythonhosted.org/packages/87/74/0bb63a68394c0c1e52670cfff2e309afa41edbe11b3327d9af29e4383f34/numpy-2.4.0-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:e0434aa22c821f44eeb4c650b81c7fbdd8c0122c6c4b5a576a76d5a35625ecd9", size = 6644986, upload-time = "2025-12-20T16:17:46.203Z" },
+ { url = "https://files.pythonhosted.org/packages/06/8f/9264d9bdbcf8236af2823623fe2f3981d740fc3461e2787e231d97c38c28/numpy-2.4.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:40483b2f2d3ba7aad426443767ff5632ec3156ef09742b96913787d13c336471", size = 14457958, upload-time = "2025-12-20T16:17:48.017Z" },
+ { url = "https://files.pythonhosted.org/packages/8c/d9/f9a69ae564bbc7236a35aa883319364ef5fd41f72aa320cc1cbe66148fe2/numpy-2.4.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d9e6a7664ddd9746e20b7325351fe1a8408d0a2bf9c63b5e898290ddc8f09544", size = 16398394, upload-time = "2025-12-20T16:17:50.409Z" },
+ { url = "https://files.pythonhosted.org/packages/34/c7/39241501408dde7f885d241a98caba5421061a2c6d2b2197ac5e3aa842d8/numpy-2.4.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:ecb0019d44f4cdb50b676c5d0cb4b1eae8e15d1ed3d3e6639f986fc92b2ec52c", size = 16241044, upload-time = "2025-12-20T16:17:52.661Z" },
+ { url = "https://files.pythonhosted.org/packages/7c/95/cae7effd90e065a95e59fe710eeee05d7328ed169776dfdd9f789e032125/numpy-2.4.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d0ffd9e2e4441c96a9c91ec1783285d80bf835b677853fc2770a89d50c1e48ac", size = 18321772, upload-time = "2025-12-20T16:17:54.947Z" },
+ { url = "https://files.pythonhosted.org/packages/96/df/3c6c279accd2bfb968a76298e5b276310bd55d243df4fa8ac5816d79347d/numpy-2.4.0-cp314-cp314t-win32.whl", hash = "sha256:77f0d13fa87036d7553bf81f0e1fe3ce68d14c9976c9851744e4d3e91127e95f", size = 6148320, upload-time = "2025-12-20T16:17:57.249Z" },
+ { url = "https://files.pythonhosted.org/packages/92/8d/f23033cce252e7a75cae853d17f582e86534c46404dea1c8ee094a9d6d84/numpy-2.4.0-cp314-cp314t-win_amd64.whl", hash = "sha256:b1f5b45829ac1848893f0ddf5cb326110604d6df96cdc255b0bf9edd154104d4", size = 12623460, upload-time = "2025-12-20T16:17:58.963Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/4f/1f8475907d1a7c4ef9020edf7f39ea2422ec896849245f00688e4b268a71/numpy-2.4.0-cp314-cp314t-win_arm64.whl", hash = "sha256:23a3e9d1a6f360267e8fbb38ba5db355a6a7e9be71d7fce7ab3125e88bb646c8", size = 10661799, upload-time = "2025-12-20T16:18:01.078Z" },
+ { url = "https://files.pythonhosted.org/packages/4b/ef/088e7c7342f300aaf3ee5f2c821c4b9996a1bef2aaf6a49cc8ab4883758e/numpy-2.4.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b54c83f1c0c0f1d748dca0af516062b8829d53d1f0c402be24b4257a9c48ada6", size = 16819003, upload-time = "2025-12-20T16:18:03.41Z" },
+ { url = "https://files.pythonhosted.org/packages/ff/ce/a53017b5443b4b84517182d463fc7bcc2adb4faa8b20813f8e5f5aeb5faa/numpy-2.4.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:aabb081ca0ec5d39591fc33018cd4b3f96e1a2dd6756282029986d00a785fba4", size = 12567105, upload-time = "2025-12-20T16:18:05.594Z" },
+ { url = "https://files.pythonhosted.org/packages/77/58/5ff91b161f2ec650c88a626c3905d938c89aaadabd0431e6d9c1330c83e2/numpy-2.4.0-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:8eafe7c36c8430b7794edeab3087dec7bf31d634d92f2af9949434b9d1964cba", size = 5395590, upload-time = "2025-12-20T16:18:08.031Z" },
+ { url = "https://files.pythonhosted.org/packages/1d/4e/f1a084106df8c2df8132fc437e56987308e0524836aa7733721c8429d4fe/numpy-2.4.0-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:2f585f52b2baf07ff3356158d9268ea095e221371f1074fadea2f42544d58b4d", size = 6709947, upload-time = "2025-12-20T16:18:09.836Z" },
+ { url = "https://files.pythonhosted.org/packages/63/09/3d8aeb809c0332c3f642da812ac2e3d74fc9252b3021f8c30c82e99e3f3d/numpy-2.4.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:32ed06d0fe9cae27d8fb5f400c63ccee72370599c75e683a6358dd3a4fb50aaf", size = 14535119, upload-time = "2025-12-20T16:18:12.105Z" },
+ { url = "https://files.pythonhosted.org/packages/fd/7f/68f0fc43a2cbdc6bb239160c754d87c922f60fbaa0fa3cd3d312b8a7f5ee/numpy-2.4.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:57c540ed8fb1f05cb997c6761cd56db72395b0d6985e90571ff660452ade4f98", size = 16475815, upload-time = "2025-12-20T16:18:14.433Z" },
+ { url = "https://files.pythonhosted.org/packages/11/73/edeacba3167b1ca66d51b1a5a14697c2c40098b5ffa01811c67b1785a5ab/numpy-2.4.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:a39fb973a726e63223287adc6dafe444ce75af952d711e400f3bf2b36ef55a7b", size = 12489376, upload-time = "2025-12-20T16:18:16.524Z" },
]
[[package]]
@@ -695,89 +709,89 @@ wheels = [
[[package]]
name = "pillow"
-version = "12.0.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/5a/b0/cace85a1b0c9775a9f8f5d5423c8261c858760e2466c79b2dd184638b056/pillow-12.0.0.tar.gz", hash = "sha256:87d4f8125c9988bfbed67af47dd7a953e2fc7b0cc1e7800ec6d2080d490bb353", size = 47008828, upload-time = "2025-10-15T18:24:14.008Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/0e/5a/a2f6773b64edb921a756eb0729068acad9fc5208a53f4a349396e9436721/pillow-12.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0fd00cac9c03256c8b2ff58f162ebcd2587ad3e1f2e397eab718c47e24d231cc", size = 5289798, upload-time = "2025-10-15T18:21:47.763Z" },
- { url = "https://files.pythonhosted.org/packages/2e/05/069b1f8a2e4b5a37493da6c5868531c3f77b85e716ad7a590ef87d58730d/pillow-12.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a3475b96f5908b3b16c47533daaa87380c491357d197564e0ba34ae75c0f3257", size = 4650589, upload-time = "2025-10-15T18:21:49.515Z" },
- { url = "https://files.pythonhosted.org/packages/61/e3/2c820d6e9a36432503ead175ae294f96861b07600a7156154a086ba7111a/pillow-12.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:110486b79f2d112cf6add83b28b627e369219388f64ef2f960fef9ebaf54c642", size = 6230472, upload-time = "2025-10-15T18:21:51.052Z" },
- { url = "https://files.pythonhosted.org/packages/4f/89/63427f51c64209c5e23d4d52071c8d0f21024d3a8a487737caaf614a5795/pillow-12.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5269cc1caeedb67e6f7269a42014f381f45e2e7cd42d834ede3c703a1d915fe3", size = 8033887, upload-time = "2025-10-15T18:21:52.604Z" },
- { url = "https://files.pythonhosted.org/packages/f6/1b/c9711318d4901093c15840f268ad649459cd81984c9ec9887756cca049a5/pillow-12.0.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:aa5129de4e174daccbc59d0a3b6d20eaf24417d59851c07ebb37aeb02947987c", size = 6343964, upload-time = "2025-10-15T18:21:54.619Z" },
- { url = "https://files.pythonhosted.org/packages/41/1e/db9470f2d030b4995083044cd8738cdd1bf773106819f6d8ba12597d5352/pillow-12.0.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bee2a6db3a7242ea309aa7ee8e2780726fed67ff4e5b40169f2c940e7eb09227", size = 7034756, upload-time = "2025-10-15T18:21:56.151Z" },
- { url = "https://files.pythonhosted.org/packages/cc/b0/6177a8bdd5ee4ed87cba2de5a3cc1db55ffbbec6176784ce5bb75aa96798/pillow-12.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:90387104ee8400a7b4598253b4c406f8958f59fcf983a6cea2b50d59f7d63d0b", size = 6458075, upload-time = "2025-10-15T18:21:57.759Z" },
- { url = "https://files.pythonhosted.org/packages/bc/5e/61537aa6fa977922c6a03253a0e727e6e4a72381a80d63ad8eec350684f2/pillow-12.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bc91a56697869546d1b8f0a3ff35224557ae7f881050e99f615e0119bf934b4e", size = 7125955, upload-time = "2025-10-15T18:21:59.372Z" },
- { url = "https://files.pythonhosted.org/packages/1f/3d/d5033539344ee3cbd9a4d69e12e63ca3a44a739eb2d4c8da350a3d38edd7/pillow-12.0.0-cp311-cp311-win32.whl", hash = "sha256:27f95b12453d165099c84f8a8bfdfd46b9e4bda9e0e4b65f0635430027f55739", size = 6298440, upload-time = "2025-10-15T18:22:00.982Z" },
- { url = "https://files.pythonhosted.org/packages/4d/42/aaca386de5cc8bd8a0254516957c1f265e3521c91515b16e286c662854c4/pillow-12.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:b583dc9070312190192631373c6c8ed277254aa6e6084b74bdd0a6d3b221608e", size = 6999256, upload-time = "2025-10-15T18:22:02.617Z" },
- { url = "https://files.pythonhosted.org/packages/ba/f1/9197c9c2d5708b785f631a6dfbfa8eb3fb9672837cb92ae9af812c13b4ed/pillow-12.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:759de84a33be3b178a64c8ba28ad5c135900359e85fb662bc6e403ad4407791d", size = 2436025, upload-time = "2025-10-15T18:22:04.598Z" },
- { url = "https://files.pythonhosted.org/packages/2c/90/4fcce2c22caf044e660a198d740e7fbc14395619e3cb1abad12192c0826c/pillow-12.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:53561a4ddc36facb432fae7a9d8afbfaf94795414f5cdc5fc52f28c1dca90371", size = 5249377, upload-time = "2025-10-15T18:22:05.993Z" },
- { url = "https://files.pythonhosted.org/packages/fd/e0/ed960067543d080691d47d6938ebccbf3976a931c9567ab2fbfab983a5dd/pillow-12.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:71db6b4c1653045dacc1585c1b0d184004f0d7e694c7b34ac165ca70c0838082", size = 4650343, upload-time = "2025-10-15T18:22:07.718Z" },
- { url = "https://files.pythonhosted.org/packages/e7/a1/f81fdeddcb99c044bf7d6faa47e12850f13cee0849537a7d27eeab5534d4/pillow-12.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2fa5f0b6716fc88f11380b88b31fe591a06c6315e955c096c35715788b339e3f", size = 6232981, upload-time = "2025-10-15T18:22:09.287Z" },
- { url = "https://files.pythonhosted.org/packages/88/e1/9098d3ce341a8750b55b0e00c03f1630d6178f38ac191c81c97a3b047b44/pillow-12.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:82240051c6ca513c616f7f9da06e871f61bfd7805f566275841af15015b8f98d", size = 8041399, upload-time = "2025-10-15T18:22:10.872Z" },
- { url = "https://files.pythonhosted.org/packages/a7/62/a22e8d3b602ae8cc01446d0c57a54e982737f44b6f2e1e019a925143771d/pillow-12.0.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:55f818bd74fe2f11d4d7cbc65880a843c4075e0ac7226bc1a23261dbea531953", size = 6347740, upload-time = "2025-10-15T18:22:12.769Z" },
- { url = "https://files.pythonhosted.org/packages/4f/87/424511bdcd02c8d7acf9f65caa09f291a519b16bd83c3fb3374b3d4ae951/pillow-12.0.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b87843e225e74576437fd5b6a4c2205d422754f84a06942cfaf1dc32243e45a8", size = 7040201, upload-time = "2025-10-15T18:22:14.813Z" },
- { url = "https://files.pythonhosted.org/packages/dc/4d/435c8ac688c54d11755aedfdd9f29c9eeddf68d150fe42d1d3dbd2365149/pillow-12.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c607c90ba67533e1b2355b821fef6764d1dd2cbe26b8c1005ae84f7aea25ff79", size = 6462334, upload-time = "2025-10-15T18:22:16.375Z" },
- { url = "https://files.pythonhosted.org/packages/2b/f2/ad34167a8059a59b8ad10bc5c72d4d9b35acc6b7c0877af8ac885b5f2044/pillow-12.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:21f241bdd5080a15bc86d3466a9f6074a9c2c2b314100dd896ac81ee6db2f1ba", size = 7134162, upload-time = "2025-10-15T18:22:17.996Z" },
- { url = "https://files.pythonhosted.org/packages/0c/b1/a7391df6adacf0a5c2cf6ac1cf1fcc1369e7d439d28f637a847f8803beb3/pillow-12.0.0-cp312-cp312-win32.whl", hash = "sha256:dd333073e0cacdc3089525c7df7d39b211bcdf31fc2824e49d01c6b6187b07d0", size = 6298769, upload-time = "2025-10-15T18:22:19.923Z" },
- { url = "https://files.pythonhosted.org/packages/a2/0b/d87733741526541c909bbf159e338dcace4f982daac6e5a8d6be225ca32d/pillow-12.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:9fe611163f6303d1619bbcb653540a4d60f9e55e622d60a3108be0d5b441017a", size = 7001107, upload-time = "2025-10-15T18:22:21.644Z" },
- { url = "https://files.pythonhosted.org/packages/bc/96/aaa61ce33cc98421fb6088af2a03be4157b1e7e0e87087c888e2370a7f45/pillow-12.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:7dfb439562f234f7d57b1ac6bc8fe7f838a4bd49c79230e0f6a1da93e82f1fad", size = 2436012, upload-time = "2025-10-15T18:22:23.621Z" },
- { url = "https://files.pythonhosted.org/packages/62/f2/de993bb2d21b33a98d031ecf6a978e4b61da207bef02f7b43093774c480d/pillow-12.0.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:0869154a2d0546545cde61d1789a6524319fc1897d9ee31218eae7a60ccc5643", size = 4045493, upload-time = "2025-10-15T18:22:25.758Z" },
- { url = "https://files.pythonhosted.org/packages/0e/b6/bc8d0c4c9f6f111a783d045310945deb769b806d7574764234ffd50bc5ea/pillow-12.0.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:a7921c5a6d31b3d756ec980f2f47c0cfdbce0fc48c22a39347a895f41f4a6ea4", size = 4120461, upload-time = "2025-10-15T18:22:27.286Z" },
- { url = "https://files.pythonhosted.org/packages/5d/57/d60d343709366a353dc56adb4ee1e7d8a2cc34e3fbc22905f4167cfec119/pillow-12.0.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:1ee80a59f6ce048ae13cda1abf7fbd2a34ab9ee7d401c46be3ca685d1999a399", size = 3576912, upload-time = "2025-10-15T18:22:28.751Z" },
- { url = "https://files.pythonhosted.org/packages/a4/a4/a0a31467e3f83b94d37568294b01d22b43ae3c5d85f2811769b9c66389dd/pillow-12.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c50f36a62a22d350c96e49ad02d0da41dbd17ddc2e29750dbdba4323f85eb4a5", size = 5249132, upload-time = "2025-10-15T18:22:30.641Z" },
- { url = "https://files.pythonhosted.org/packages/83/06/48eab21dd561de2914242711434c0c0eb992ed08ff3f6107a5f44527f5e9/pillow-12.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5193fde9a5f23c331ea26d0cf171fbf67e3f247585f50c08b3e205c7aeb4589b", size = 4650099, upload-time = "2025-10-15T18:22:32.73Z" },
- { url = "https://files.pythonhosted.org/packages/fc/bd/69ed99fd46a8dba7c1887156d3572fe4484e3f031405fcc5a92e31c04035/pillow-12.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bde737cff1a975b70652b62d626f7785e0480918dece11e8fef3c0cf057351c3", size = 6230808, upload-time = "2025-10-15T18:22:34.337Z" },
- { url = "https://files.pythonhosted.org/packages/ea/94/8fad659bcdbf86ed70099cb60ae40be6acca434bbc8c4c0d4ef356d7e0de/pillow-12.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a6597ff2b61d121172f5844b53f21467f7082f5fb385a9a29c01414463f93b07", size = 8037804, upload-time = "2025-10-15T18:22:36.402Z" },
- { url = "https://files.pythonhosted.org/packages/20/39/c685d05c06deecfd4e2d1950e9a908aa2ca8bc4e6c3b12d93b9cafbd7837/pillow-12.0.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0b817e7035ea7f6b942c13aa03bb554fc44fea70838ea21f8eb31c638326584e", size = 6345553, upload-time = "2025-10-15T18:22:38.066Z" },
- { url = "https://files.pythonhosted.org/packages/38/57/755dbd06530a27a5ed74f8cb0a7a44a21722ebf318edbe67ddbd7fb28f88/pillow-12.0.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f4f1231b7dec408e8670264ce63e9c71409d9583dd21d32c163e25213ee2a344", size = 7037729, upload-time = "2025-10-15T18:22:39.769Z" },
- { url = "https://files.pythonhosted.org/packages/ca/b6/7e94f4c41d238615674d06ed677c14883103dce1c52e4af16f000338cfd7/pillow-12.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6e51b71417049ad6ab14c49608b4a24d8fb3fe605e5dfabfe523b58064dc3d27", size = 6459789, upload-time = "2025-10-15T18:22:41.437Z" },
- { url = "https://files.pythonhosted.org/packages/9c/14/4448bb0b5e0f22dd865290536d20ec8a23b64e2d04280b89139f09a36bb6/pillow-12.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:d120c38a42c234dc9a8c5de7ceaaf899cf33561956acb4941653f8bdc657aa79", size = 7130917, upload-time = "2025-10-15T18:22:43.152Z" },
- { url = "https://files.pythonhosted.org/packages/dd/ca/16c6926cc1c015845745d5c16c9358e24282f1e588237a4c36d2b30f182f/pillow-12.0.0-cp313-cp313-win32.whl", hash = "sha256:4cc6b3b2efff105c6a1656cfe59da4fdde2cda9af1c5e0b58529b24525d0a098", size = 6302391, upload-time = "2025-10-15T18:22:44.753Z" },
- { url = "https://files.pythonhosted.org/packages/6d/2a/dd43dcfd6dae9b6a49ee28a8eedb98c7d5ff2de94a5d834565164667b97b/pillow-12.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:4cf7fed4b4580601c4345ceb5d4cbf5a980d030fd5ad07c4d2ec589f95f09905", size = 7007477, upload-time = "2025-10-15T18:22:46.838Z" },
- { url = "https://files.pythonhosted.org/packages/77/f0/72ea067f4b5ae5ead653053212af05ce3705807906ba3f3e8f58ddf617e6/pillow-12.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:9f0b04c6b8584c2c193babcccc908b38ed29524b29dd464bc8801bf10d746a3a", size = 2435918, upload-time = "2025-10-15T18:22:48.399Z" },
- { url = "https://files.pythonhosted.org/packages/f5/5e/9046b423735c21f0487ea6cb5b10f89ea8f8dfbe32576fe052b5ba9d4e5b/pillow-12.0.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:7fa22993bac7b77b78cae22bad1e2a987ddf0d9015c63358032f84a53f23cdc3", size = 5251406, upload-time = "2025-10-15T18:22:49.905Z" },
- { url = "https://files.pythonhosted.org/packages/12/66/982ceebcdb13c97270ef7a56c3969635b4ee7cd45227fa707c94719229c5/pillow-12.0.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:f135c702ac42262573fe9714dfe99c944b4ba307af5eb507abef1667e2cbbced", size = 4653218, upload-time = "2025-10-15T18:22:51.587Z" },
- { url = "https://files.pythonhosted.org/packages/16/b3/81e625524688c31859450119bf12674619429cab3119eec0e30a7a1029cb/pillow-12.0.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c85de1136429c524e55cfa4e033b4a7940ac5c8ee4d9401cc2d1bf48154bbc7b", size = 6266564, upload-time = "2025-10-15T18:22:53.215Z" },
- { url = "https://files.pythonhosted.org/packages/98/59/dfb38f2a41240d2408096e1a76c671d0a105a4a8471b1871c6902719450c/pillow-12.0.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:38df9b4bfd3db902c9c2bd369bcacaf9d935b2fff73709429d95cc41554f7b3d", size = 8069260, upload-time = "2025-10-15T18:22:54.933Z" },
- { url = "https://files.pythonhosted.org/packages/dc/3d/378dbea5cd1874b94c312425ca77b0f47776c78e0df2df751b820c8c1d6c/pillow-12.0.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7d87ef5795da03d742bf49439f9ca4d027cde49c82c5371ba52464aee266699a", size = 6379248, upload-time = "2025-10-15T18:22:56.605Z" },
- { url = "https://files.pythonhosted.org/packages/84/b0/d525ef47d71590f1621510327acec75ae58c721dc071b17d8d652ca494d8/pillow-12.0.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:aff9e4d82d082ff9513bdd6acd4f5bd359f5b2c870907d2b0a9c5e10d40c88fe", size = 7066043, upload-time = "2025-10-15T18:22:58.53Z" },
- { url = "https://files.pythonhosted.org/packages/61/2c/aced60e9cf9d0cde341d54bf7932c9ffc33ddb4a1595798b3a5150c7ec4e/pillow-12.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:8d8ca2b210ada074d57fcee40c30446c9562e542fc46aedc19baf758a93532ee", size = 6490915, upload-time = "2025-10-15T18:23:00.582Z" },
- { url = "https://files.pythonhosted.org/packages/ef/26/69dcb9b91f4e59f8f34b2332a4a0a951b44f547c4ed39d3e4dcfcff48f89/pillow-12.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:99a7f72fb6249302aa62245680754862a44179b545ded638cf1fef59befb57ef", size = 7157998, upload-time = "2025-10-15T18:23:02.627Z" },
- { url = "https://files.pythonhosted.org/packages/61/2b/726235842220ca95fa441ddf55dd2382b52ab5b8d9c0596fe6b3f23dafe8/pillow-12.0.0-cp313-cp313t-win32.whl", hash = "sha256:4078242472387600b2ce8d93ade8899c12bf33fa89e55ec89fe126e9d6d5d9e9", size = 6306201, upload-time = "2025-10-15T18:23:04.709Z" },
- { url = "https://files.pythonhosted.org/packages/c0/3d/2afaf4e840b2df71344ababf2f8edd75a705ce500e5dc1e7227808312ae1/pillow-12.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:2c54c1a783d6d60595d3514f0efe9b37c8808746a66920315bfd34a938d7994b", size = 7013165, upload-time = "2025-10-15T18:23:06.46Z" },
- { url = "https://files.pythonhosted.org/packages/6f/75/3fa09aa5cf6ed04bee3fa575798ddf1ce0bace8edb47249c798077a81f7f/pillow-12.0.0-cp313-cp313t-win_arm64.whl", hash = "sha256:26d9f7d2b604cd23aba3e9faf795787456ac25634d82cd060556998e39c6fa47", size = 2437834, upload-time = "2025-10-15T18:23:08.194Z" },
- { url = "https://files.pythonhosted.org/packages/54/2a/9a8c6ba2c2c07b71bec92cf63e03370ca5e5f5c5b119b742bcc0cde3f9c5/pillow-12.0.0-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:beeae3f27f62308f1ddbcfb0690bf44b10732f2ef43758f169d5e9303165d3f9", size = 4045531, upload-time = "2025-10-15T18:23:10.121Z" },
- { url = "https://files.pythonhosted.org/packages/84/54/836fdbf1bfb3d66a59f0189ff0b9f5f666cee09c6188309300df04ad71fa/pillow-12.0.0-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:d4827615da15cd59784ce39d3388275ec093ae3ee8d7f0c089b76fa87af756c2", size = 4120554, upload-time = "2025-10-15T18:23:12.14Z" },
- { url = "https://files.pythonhosted.org/packages/0d/cd/16aec9f0da4793e98e6b54778a5fbce4f375c6646fe662e80600b8797379/pillow-12.0.0-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:3e42edad50b6909089750e65c91aa09aaf1e0a71310d383f11321b27c224ed8a", size = 3576812, upload-time = "2025-10-15T18:23:13.962Z" },
- { url = "https://files.pythonhosted.org/packages/f6/b7/13957fda356dc46339298b351cae0d327704986337c3c69bb54628c88155/pillow-12.0.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:e5d8efac84c9afcb40914ab49ba063d94f5dbdf5066db4482c66a992f47a3a3b", size = 5252689, upload-time = "2025-10-15T18:23:15.562Z" },
- { url = "https://files.pythonhosted.org/packages/fc/f5/eae31a306341d8f331f43edb2e9122c7661b975433de5e447939ae61c5da/pillow-12.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:266cd5f2b63ff316d5a1bba46268e603c9caf5606d44f38c2873c380950576ad", size = 4650186, upload-time = "2025-10-15T18:23:17.379Z" },
- { url = "https://files.pythonhosted.org/packages/86/62/2a88339aa40c4c77e79108facbd307d6091e2c0eb5b8d3cf4977cfca2fe6/pillow-12.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:58eea5ebe51504057dd95c5b77d21700b77615ab0243d8152793dc00eb4faf01", size = 6230308, upload-time = "2025-10-15T18:23:18.971Z" },
- { url = "https://files.pythonhosted.org/packages/c7/33/5425a8992bcb32d1cb9fa3dd39a89e613d09a22f2c8083b7bf43c455f760/pillow-12.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f13711b1a5ba512d647a0e4ba79280d3a9a045aaf7e0cc6fbe96b91d4cdf6b0c", size = 8039222, upload-time = "2025-10-15T18:23:20.909Z" },
- { url = "https://files.pythonhosted.org/packages/d8/61/3f5d3b35c5728f37953d3eec5b5f3e77111949523bd2dd7f31a851e50690/pillow-12.0.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6846bd2d116ff42cba6b646edf5bf61d37e5cbd256425fa089fee4ff5c07a99e", size = 6346657, upload-time = "2025-10-15T18:23:23.077Z" },
- { url = "https://files.pythonhosted.org/packages/3a/be/ee90a3d79271227e0f0a33c453531efd6ed14b2e708596ba5dd9be948da3/pillow-12.0.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c98fa880d695de164b4135a52fd2e9cd7b7c90a9d8ac5e9e443a24a95ef9248e", size = 7038482, upload-time = "2025-10-15T18:23:25.005Z" },
- { url = "https://files.pythonhosted.org/packages/44/34/a16b6a4d1ad727de390e9bd9f19f5f669e079e5826ec0f329010ddea492f/pillow-12.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:fa3ed2a29a9e9d2d488b4da81dcb54720ac3104a20bf0bd273f1e4648aff5af9", size = 6461416, upload-time = "2025-10-15T18:23:27.009Z" },
- { url = "https://files.pythonhosted.org/packages/b6/39/1aa5850d2ade7d7ba9f54e4e4c17077244ff7a2d9e25998c38a29749eb3f/pillow-12.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d034140032870024e6b9892c692fe2968493790dd57208b2c37e3fb35f6df3ab", size = 7131584, upload-time = "2025-10-15T18:23:29.752Z" },
- { url = "https://files.pythonhosted.org/packages/bf/db/4fae862f8fad0167073a7733973bfa955f47e2cac3dc3e3e6257d10fab4a/pillow-12.0.0-cp314-cp314-win32.whl", hash = "sha256:1b1b133e6e16105f524a8dec491e0586d072948ce15c9b914e41cdadd209052b", size = 6400621, upload-time = "2025-10-15T18:23:32.06Z" },
- { url = "https://files.pythonhosted.org/packages/2b/24/b350c31543fb0107ab2599464d7e28e6f856027aadda995022e695313d94/pillow-12.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:8dc232e39d409036af549c86f24aed8273a40ffa459981146829a324e0848b4b", size = 7142916, upload-time = "2025-10-15T18:23:34.71Z" },
- { url = "https://files.pythonhosted.org/packages/0f/9b/0ba5a6fd9351793996ef7487c4fdbde8d3f5f75dbedc093bb598648fddf0/pillow-12.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:d52610d51e265a51518692045e372a4c363056130d922a7351429ac9f27e70b0", size = 2523836, upload-time = "2025-10-15T18:23:36.967Z" },
- { url = "https://files.pythonhosted.org/packages/f5/7a/ceee0840aebc579af529b523d530840338ecf63992395842e54edc805987/pillow-12.0.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:1979f4566bb96c1e50a62d9831e2ea2d1211761e5662afc545fa766f996632f6", size = 5255092, upload-time = "2025-10-15T18:23:38.573Z" },
- { url = "https://files.pythonhosted.org/packages/44/76/20776057b4bfd1aef4eeca992ebde0f53a4dce874f3ae693d0ec90a4f79b/pillow-12.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b2e4b27a6e15b04832fe9bf292b94b5ca156016bbc1ea9c2c20098a0320d6cf6", size = 4653158, upload-time = "2025-10-15T18:23:40.238Z" },
- { url = "https://files.pythonhosted.org/packages/82/3f/d9ff92ace07be8836b4e7e87e6a4c7a8318d47c2f1463ffcf121fc57d9cb/pillow-12.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fb3096c30df99fd01c7bf8e544f392103d0795b9f98ba71a8054bcbf56b255f1", size = 6267882, upload-time = "2025-10-15T18:23:42.434Z" },
- { url = "https://files.pythonhosted.org/packages/9f/7a/4f7ff87f00d3ad33ba21af78bfcd2f032107710baf8280e3722ceec28cda/pillow-12.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7438839e9e053ef79f7112c881cef684013855016f928b168b81ed5835f3e75e", size = 8071001, upload-time = "2025-10-15T18:23:44.29Z" },
- { url = "https://files.pythonhosted.org/packages/75/87/fcea108944a52dad8cca0715ae6247e271eb80459364a98518f1e4f480c1/pillow-12.0.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d5c411a8eaa2299322b647cd932586b1427367fd3184ffbb8f7a219ea2041ca", size = 6380146, upload-time = "2025-10-15T18:23:46.065Z" },
- { url = "https://files.pythonhosted.org/packages/91/52/0d31b5e571ef5fd111d2978b84603fce26aba1b6092f28e941cb46570745/pillow-12.0.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d7e091d464ac59d2c7ad8e7e08105eaf9dafbc3883fd7265ffccc2baad6ac925", size = 7067344, upload-time = "2025-10-15T18:23:47.898Z" },
- { url = "https://files.pythonhosted.org/packages/7b/f4/2dd3d721f875f928d48e83bb30a434dee75a2531bca839bb996bb0aa5a91/pillow-12.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:792a2c0be4dcc18af9d4a2dfd8a11a17d5e25274a1062b0ec1c2d79c76f3e7f8", size = 6491864, upload-time = "2025-10-15T18:23:49.607Z" },
- { url = "https://files.pythonhosted.org/packages/30/4b/667dfcf3d61fc309ba5a15b141845cece5915e39b99c1ceab0f34bf1d124/pillow-12.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:afbefa430092f71a9593a99ab6a4e7538bc9eabbf7bf94f91510d3503943edc4", size = 7158911, upload-time = "2025-10-15T18:23:51.351Z" },
- { url = "https://files.pythonhosted.org/packages/a2/2f/16cabcc6426c32218ace36bf0d55955e813f2958afddbf1d391849fee9d1/pillow-12.0.0-cp314-cp314t-win32.whl", hash = "sha256:3830c769decf88f1289680a59d4f4c46c72573446352e2befec9a8512104fa52", size = 6408045, upload-time = "2025-10-15T18:23:53.177Z" },
- { url = "https://files.pythonhosted.org/packages/35/73/e29aa0c9c666cf787628d3f0dcf379f4791fba79f4936d02f8b37165bdf8/pillow-12.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:905b0365b210c73afb0ebe9101a32572152dfd1c144c7e28968a331b9217b94a", size = 7148282, upload-time = "2025-10-15T18:23:55.316Z" },
- { url = "https://files.pythonhosted.org/packages/c1/70/6b41bdcddf541b437bbb9f47f94d2db5d9ddef6c37ccab8c9107743748a4/pillow-12.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:99353a06902c2e43b43e8ff74ee65a7d90307d82370604746738a1e0661ccca7", size = 2525630, upload-time = "2025-10-15T18:23:57.149Z" },
- { url = "https://files.pythonhosted.org/packages/1d/b3/582327e6c9f86d037b63beebe981425d6811104cb443e8193824ef1a2f27/pillow-12.0.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b22bd8c974942477156be55a768f7aa37c46904c175be4e158b6a86e3a6b7ca8", size = 5215068, upload-time = "2025-10-15T18:23:59.594Z" },
- { url = "https://files.pythonhosted.org/packages/fd/d6/67748211d119f3b6540baf90f92fae73ae51d5217b171b0e8b5f7e5d558f/pillow-12.0.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:805ebf596939e48dbb2e4922a1d3852cfc25c38160751ce02da93058b48d252a", size = 4614994, upload-time = "2025-10-15T18:24:01.669Z" },
- { url = "https://files.pythonhosted.org/packages/2d/e1/f8281e5d844c41872b273b9f2c34a4bf64ca08905668c8ae730eedc7c9fa/pillow-12.0.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cae81479f77420d217def5f54b5b9d279804d17e982e0f2fa19b1d1e14ab5197", size = 5246639, upload-time = "2025-10-15T18:24:03.403Z" },
- { url = "https://files.pythonhosted.org/packages/94/5a/0d8ab8ffe8a102ff5df60d0de5af309015163bf710c7bb3e8311dd3b3ad0/pillow-12.0.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:aeaefa96c768fc66818730b952a862235d68825c178f1b3ffd4efd7ad2edcb7c", size = 6986839, upload-time = "2025-10-15T18:24:05.344Z" },
- { url = "https://files.pythonhosted.org/packages/20/2e/3434380e8110b76cd9eb00a363c484b050f949b4bbe84ba770bb8508a02c/pillow-12.0.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:09f2d0abef9e4e2f349305a4f8cc784a8a6c2f58a8c4892eea13b10a943bd26e", size = 5313505, upload-time = "2025-10-15T18:24:07.137Z" },
- { url = "https://files.pythonhosted.org/packages/57/ca/5a9d38900d9d74785141d6580950fe705de68af735ff6e727cb911b64740/pillow-12.0.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bdee52571a343d721fb2eb3b090a82d959ff37fc631e3f70422e0c2e029f3e76", size = 5963654, upload-time = "2025-10-15T18:24:09.579Z" },
- { url = "https://files.pythonhosted.org/packages/95/7e/f896623c3c635a90537ac093c6a618ebe1a90d87206e42309cb5d98a1b9e/pillow-12.0.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:b290fd8aa38422444d4b50d579de197557f182ef1068b75f5aa8558638b8d0a5", size = 6997850, upload-time = "2025-10-15T18:24:11.495Z" },
+version = "12.1.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/d0/02/d52c733a2452ef1ffcc123b68e6606d07276b0e358db70eabad7e40042b7/pillow-12.1.0.tar.gz", hash = "sha256:5c5ae0a06e9ea030ab786b0251b32c7e4ce10e58d983c0d5c56029455180b5b9", size = 46977283, upload-time = "2026-01-02T09:13:29.892Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/43/c4/bf8328039de6cc22182c3ef007a2abfbbdab153661c0a9aa78af8d706391/pillow-12.1.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:a83e0850cb8f5ac975291ebfc4170ba481f41a28065277f7f735c202cd8e0af3", size = 5304057, upload-time = "2026-01-02T09:10:46.627Z" },
+ { url = "https://files.pythonhosted.org/packages/43/06/7264c0597e676104cc22ca73ee48f752767cd4b1fe084662620b17e10120/pillow-12.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b6e53e82ec2db0717eabb276aa56cf4e500c9a7cec2c2e189b55c24f65a3e8c0", size = 4657811, upload-time = "2026-01-02T09:10:49.548Z" },
+ { url = "https://files.pythonhosted.org/packages/72/64/f9189e44474610daf83da31145fa56710b627b5c4c0b9c235e34058f6b31/pillow-12.1.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:40a8e3b9e8773876d6e30daed22f016509e3987bab61b3b7fe309d7019a87451", size = 6232243, upload-time = "2026-01-02T09:10:51.62Z" },
+ { url = "https://files.pythonhosted.org/packages/ef/30/0df458009be6a4caca4ca2c52975e6275c387d4e5c95544e34138b41dc86/pillow-12.1.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:800429ac32c9b72909c671aaf17ecd13110f823ddb7db4dfef412a5587c2c24e", size = 8037872, upload-time = "2026-01-02T09:10:53.446Z" },
+ { url = "https://files.pythonhosted.org/packages/e4/86/95845d4eda4f4f9557e25381d70876aa213560243ac1a6d619c46caaedd9/pillow-12.1.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0b022eaaf709541b391ee069f0022ee5b36c709df71986e3f7be312e46f42c84", size = 6345398, upload-time = "2026-01-02T09:10:55.426Z" },
+ { url = "https://files.pythonhosted.org/packages/5c/1f/8e66ab9be3aaf1435bc03edd1ebdf58ffcd17f7349c1d970cafe87af27d9/pillow-12.1.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1f345e7bc9d7f368887c712aa5054558bad44d2a301ddf9248599f4161abc7c0", size = 7034667, upload-time = "2026-01-02T09:10:57.11Z" },
+ { url = "https://files.pythonhosted.org/packages/f9/f6/683b83cb9b1db1fb52b87951b1c0b99bdcfceaa75febf11406c19f82cb5e/pillow-12.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d70347c8a5b7ccd803ec0c85c8709f036e6348f1e6a5bf048ecd9c64d3550b8b", size = 6458743, upload-time = "2026-01-02T09:10:59.331Z" },
+ { url = "https://files.pythonhosted.org/packages/9a/7d/de833d63622538c1d58ce5395e7c6cb7e7dce80decdd8bde4a484e095d9f/pillow-12.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1fcc52d86ce7a34fd17cb04e87cfdb164648a3662a6f20565910a99653d66c18", size = 7159342, upload-time = "2026-01-02T09:11:01.82Z" },
+ { url = "https://files.pythonhosted.org/packages/8c/40/50d86571c9e5868c42b81fe7da0c76ca26373f3b95a8dd675425f4a92ec1/pillow-12.1.0-cp311-cp311-win32.whl", hash = "sha256:3ffaa2f0659e2f740473bcf03c702c39a8d4b2b7ffc629052028764324842c64", size = 6328655, upload-time = "2026-01-02T09:11:04.556Z" },
+ { url = "https://files.pythonhosted.org/packages/6c/af/b1d7e301c4cd26cd45d4af884d9ee9b6fab893b0ad2450d4746d74a6968c/pillow-12.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:806f3987ffe10e867bab0ddad45df1148a2b98221798457fa097ad85d6e8bc75", size = 7031469, upload-time = "2026-01-02T09:11:06.538Z" },
+ { url = "https://files.pythonhosted.org/packages/48/36/d5716586d887fb2a810a4a61518a327a1e21c8b7134c89283af272efe84b/pillow-12.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:9f5fefaca968e700ad1a4a9de98bf0869a94e397fe3524c4c9450c1445252304", size = 2452515, upload-time = "2026-01-02T09:11:08.226Z" },
+ { url = "https://files.pythonhosted.org/packages/20/31/dc53fe21a2f2996e1b7d92bf671cdb157079385183ef7c1ae08b485db510/pillow-12.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a332ac4ccb84b6dde65dbace8431f3af08874bf9770719d32a635c4ef411b18b", size = 5262642, upload-time = "2026-01-02T09:11:10.138Z" },
+ { url = "https://files.pythonhosted.org/packages/ab/c1/10e45ac9cc79419cedf5121b42dcca5a50ad2b601fa080f58c22fb27626e/pillow-12.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:907bfa8a9cb790748a9aa4513e37c88c59660da3bcfffbd24a7d9e6abf224551", size = 4657464, upload-time = "2026-01-02T09:11:12.319Z" },
+ { url = "https://files.pythonhosted.org/packages/ad/26/7b82c0ab7ef40ebede7a97c72d473bda5950f609f8e0c77b04af574a0ddb/pillow-12.1.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:efdc140e7b63b8f739d09a99033aa430accce485ff78e6d311973a67b6bf3208", size = 6234878, upload-time = "2026-01-02T09:11:14.096Z" },
+ { url = "https://files.pythonhosted.org/packages/76/25/27abc9792615b5e886ca9411ba6637b675f1b77af3104710ac7353fe5605/pillow-12.1.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:bef9768cab184e7ae6e559c032e95ba8d07b3023c289f79a2bd36e8bf85605a5", size = 8044868, upload-time = "2026-01-02T09:11:15.903Z" },
+ { url = "https://files.pythonhosted.org/packages/0a/ea/f200a4c36d836100e7bc738fc48cd963d3ba6372ebc8298a889e0cfc3359/pillow-12.1.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:742aea052cf5ab5034a53c3846165bc3ce88d7c38e954120db0ab867ca242661", size = 6349468, upload-time = "2026-01-02T09:11:17.631Z" },
+ { url = "https://files.pythonhosted.org/packages/11/8f/48d0b77ab2200374c66d344459b8958c86693be99526450e7aee714e03e4/pillow-12.1.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a6dfc2af5b082b635af6e08e0d1f9f1c4e04d17d4e2ca0ef96131e85eda6eb17", size = 7041518, upload-time = "2026-01-02T09:11:19.389Z" },
+ { url = "https://files.pythonhosted.org/packages/1d/23/c281182eb986b5d31f0a76d2a2c8cd41722d6fb8ed07521e802f9bba52de/pillow-12.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:609e89d9f90b581c8d16358c9087df76024cf058fa693dd3e1e1620823f39670", size = 6462829, upload-time = "2026-01-02T09:11:21.28Z" },
+ { url = "https://files.pythonhosted.org/packages/25/ef/7018273e0faac099d7b00982abdcc39142ae6f3bd9ceb06de09779c4a9d6/pillow-12.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:43b4899cfd091a9693a1278c4982f3e50f7fb7cff5153b05174b4afc9593b616", size = 7166756, upload-time = "2026-01-02T09:11:23.559Z" },
+ { url = "https://files.pythonhosted.org/packages/8f/c8/993d4b7ab2e341fe02ceef9576afcf5830cdec640be2ac5bee1820d693d4/pillow-12.1.0-cp312-cp312-win32.whl", hash = "sha256:aa0c9cc0b82b14766a99fbe6084409972266e82f459821cd26997a488a7261a7", size = 6328770, upload-time = "2026-01-02T09:11:25.661Z" },
+ { url = "https://files.pythonhosted.org/packages/a7/87/90b358775a3f02765d87655237229ba64a997b87efa8ccaca7dd3e36e7a7/pillow-12.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:d70534cea9e7966169ad29a903b99fc507e932069a881d0965a1a84bb57f6c6d", size = 7033406, upload-time = "2026-01-02T09:11:27.474Z" },
+ { url = "https://files.pythonhosted.org/packages/5d/cf/881b457eccacac9e5b2ddd97d5071fb6d668307c57cbf4e3b5278e06e536/pillow-12.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:65b80c1ee7e14a87d6a068dd3b0aea268ffcabfe0498d38661b00c5b4b22e74c", size = 2452612, upload-time = "2026-01-02T09:11:29.309Z" },
+ { url = "https://files.pythonhosted.org/packages/dd/c7/2530a4aa28248623e9d7f27316b42e27c32ec410f695929696f2e0e4a778/pillow-12.1.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:7b5dd7cbae20285cdb597b10eb5a2c13aa9de6cde9bb64a3c1317427b1db1ae1", size = 4062543, upload-time = "2026-01-02T09:11:31.566Z" },
+ { url = "https://files.pythonhosted.org/packages/8f/1f/40b8eae823dc1519b87d53c30ed9ef085506b05281d313031755c1705f73/pillow-12.1.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:29a4cef9cb672363926f0470afc516dbf7305a14d8c54f7abbb5c199cd8f8179", size = 4138373, upload-time = "2026-01-02T09:11:33.367Z" },
+ { url = "https://files.pythonhosted.org/packages/d4/77/6fa60634cf06e52139fd0e89e5bbf055e8166c691c42fb162818b7fda31d/pillow-12.1.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:681088909d7e8fa9e31b9799aaa59ba5234c58e5e4f1951b4c4d1082a2e980e0", size = 3601241, upload-time = "2026-01-02T09:11:35.011Z" },
+ { url = "https://files.pythonhosted.org/packages/4f/bf/28ab865de622e14b747f0cd7877510848252d950e43002e224fb1c9ababf/pillow-12.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:983976c2ab753166dc66d36af6e8ec15bb511e4a25856e2227e5f7e00a160587", size = 5262410, upload-time = "2026-01-02T09:11:36.682Z" },
+ { url = "https://files.pythonhosted.org/packages/1c/34/583420a1b55e715937a85bd48c5c0991598247a1fd2eb5423188e765ea02/pillow-12.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:db44d5c160a90df2d24a24760bbd37607d53da0b34fb546c4c232af7192298ac", size = 4657312, upload-time = "2026-01-02T09:11:38.535Z" },
+ { url = "https://files.pythonhosted.org/packages/1d/fd/f5a0896839762885b3376ff04878f86ab2b097c2f9a9cdccf4eda8ba8dc0/pillow-12.1.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:6b7a9d1db5dad90e2991645874f708e87d9a3c370c243c2d7684d28f7e133e6b", size = 6232605, upload-time = "2026-01-02T09:11:40.602Z" },
+ { url = "https://files.pythonhosted.org/packages/98/aa/938a09d127ac1e70e6ed467bd03834350b33ef646b31edb7452d5de43792/pillow-12.1.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6258f3260986990ba2fa8a874f8b6e808cf5abb51a94015ca3dc3c68aa4f30ea", size = 8041617, upload-time = "2026-01-02T09:11:42.721Z" },
+ { url = "https://files.pythonhosted.org/packages/17/e8/538b24cb426ac0186e03f80f78bc8dc7246c667f58b540bdd57c71c9f79d/pillow-12.1.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e115c15e3bc727b1ca3e641a909f77f8ca72a64fff150f666fcc85e57701c26c", size = 6346509, upload-time = "2026-01-02T09:11:44.955Z" },
+ { url = "https://files.pythonhosted.org/packages/01/9a/632e58ec89a32738cabfd9ec418f0e9898a2b4719afc581f07c04a05e3c9/pillow-12.1.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6741e6f3074a35e47c77b23a4e4f2d90db3ed905cb1c5e6e0d49bff2045632bc", size = 7038117, upload-time = "2026-01-02T09:11:46.736Z" },
+ { url = "https://files.pythonhosted.org/packages/c7/a2/d40308cf86eada842ca1f3ffa45d0ca0df7e4ab33c83f81e73f5eaed136d/pillow-12.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:935b9d1aed48fcfb3f838caac506f38e29621b44ccc4f8a64d575cb1b2a88644", size = 6460151, upload-time = "2026-01-02T09:11:48.625Z" },
+ { url = "https://files.pythonhosted.org/packages/f1/88/f5b058ad6453a085c5266660a1417bdad590199da1b32fb4efcff9d33b05/pillow-12.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5fee4c04aad8932da9f8f710af2c1a15a83582cfb884152a9caa79d4efcdbf9c", size = 7164534, upload-time = "2026-01-02T09:11:50.445Z" },
+ { url = "https://files.pythonhosted.org/packages/19/ce/c17334caea1db789163b5d855a5735e47995b0b5dc8745e9a3605d5f24c0/pillow-12.1.0-cp313-cp313-win32.whl", hash = "sha256:a786bf667724d84aa29b5db1c61b7bfdde380202aaca12c3461afd6b71743171", size = 6332551, upload-time = "2026-01-02T09:11:52.234Z" },
+ { url = "https://files.pythonhosted.org/packages/e5/07/74a9d941fa45c90a0d9465098fe1ec85de3e2afbdc15cc4766622d516056/pillow-12.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:461f9dfdafa394c59cd6d818bdfdbab4028b83b02caadaff0ffd433faf4c9a7a", size = 7040087, upload-time = "2026-01-02T09:11:54.822Z" },
+ { url = "https://files.pythonhosted.org/packages/88/09/c99950c075a0e9053d8e880595926302575bc742b1b47fe1bbcc8d388d50/pillow-12.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:9212d6b86917a2300669511ed094a9406888362e085f2431a7da985a6b124f45", size = 2452470, upload-time = "2026-01-02T09:11:56.522Z" },
+ { url = "https://files.pythonhosted.org/packages/b5/ba/970b7d85ba01f348dee4d65412476321d40ee04dcb51cd3735b9dc94eb58/pillow-12.1.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:00162e9ca6d22b7c3ee8e61faa3c3253cd19b6a37f126cad04f2f88b306f557d", size = 5264816, upload-time = "2026-01-02T09:11:58.227Z" },
+ { url = "https://files.pythonhosted.org/packages/10/60/650f2fb55fdba7a510d836202aa52f0baac633e50ab1cf18415d332188fb/pillow-12.1.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:7d6daa89a00b58c37cb1747ec9fb7ac3bc5ffd5949f5888657dfddde6d1312e0", size = 4660472, upload-time = "2026-01-02T09:12:00.798Z" },
+ { url = "https://files.pythonhosted.org/packages/2b/c0/5273a99478956a099d533c4f46cbaa19fd69d606624f4334b85e50987a08/pillow-12.1.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e2479c7f02f9d505682dc47df8c0ea1fc5e264c4d1629a5d63fe3e2334b89554", size = 6268974, upload-time = "2026-01-02T09:12:02.572Z" },
+ { url = "https://files.pythonhosted.org/packages/b4/26/0bf714bc2e73d5267887d47931d53c4ceeceea6978148ed2ab2a4e6463c4/pillow-12.1.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f188d580bd870cda1e15183790d1cc2fa78f666e76077d103edf048eed9c356e", size = 8073070, upload-time = "2026-01-02T09:12:04.75Z" },
+ { url = "https://files.pythonhosted.org/packages/43/cf/1ea826200de111a9d65724c54f927f3111dc5ae297f294b370a670c17786/pillow-12.1.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0fde7ec5538ab5095cc02df38ee99b0443ff0e1c847a045554cf5f9af1f4aa82", size = 6380176, upload-time = "2026-01-02T09:12:06.626Z" },
+ { url = "https://files.pythonhosted.org/packages/03/e0/7938dd2b2013373fd85d96e0f38d62b7a5a262af21ac274250c7ca7847c9/pillow-12.1.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0ed07dca4a8464bada6139ab38f5382f83e5f111698caf3191cb8dbf27d908b4", size = 7067061, upload-time = "2026-01-02T09:12:08.624Z" },
+ { url = "https://files.pythonhosted.org/packages/86/ad/a2aa97d37272a929a98437a8c0ac37b3cf012f4f8721e1bd5154699b2518/pillow-12.1.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:f45bd71d1fa5e5749587613037b172e0b3b23159d1c00ef2fc920da6f470e6f0", size = 6491824, upload-time = "2026-01-02T09:12:10.488Z" },
+ { url = "https://files.pythonhosted.org/packages/a4/44/80e46611b288d51b115826f136fb3465653c28f491068a72d3da49b54cd4/pillow-12.1.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:277518bf4fe74aa91489e1b20577473b19ee70fb97c374aa50830b279f25841b", size = 7190911, upload-time = "2026-01-02T09:12:12.772Z" },
+ { url = "https://files.pythonhosted.org/packages/86/77/eacc62356b4cf81abe99ff9dbc7402750044aed02cfd6a503f7c6fc11f3e/pillow-12.1.0-cp313-cp313t-win32.whl", hash = "sha256:7315f9137087c4e0ee73a761b163fc9aa3b19f5f606a7fc08d83fd3e4379af65", size = 6336445, upload-time = "2026-01-02T09:12:14.775Z" },
+ { url = "https://files.pythonhosted.org/packages/e7/3c/57d81d0b74d218706dafccb87a87ea44262c43eef98eb3b164fd000e0491/pillow-12.1.0-cp313-cp313t-win_amd64.whl", hash = "sha256:0ddedfaa8b5f0b4ffbc2fa87b556dc59f6bb4ecb14a53b33f9189713ae8053c0", size = 7045354, upload-time = "2026-01-02T09:12:16.599Z" },
+ { url = "https://files.pythonhosted.org/packages/ac/82/8b9b97bba2e3576a340f93b044a3a3a09841170ab4c1eb0d5c93469fd32f/pillow-12.1.0-cp313-cp313t-win_arm64.whl", hash = "sha256:80941e6d573197a0c28f394753de529bb436b1ca990ed6e765cf42426abc39f8", size = 2454547, upload-time = "2026-01-02T09:12:18.704Z" },
+ { url = "https://files.pythonhosted.org/packages/8c/87/bdf971d8bbcf80a348cc3bacfcb239f5882100fe80534b0ce67a784181d8/pillow-12.1.0-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:5cb7bc1966d031aec37ddb9dcf15c2da5b2e9f7cc3ca7c54473a20a927e1eb91", size = 4062533, upload-time = "2026-01-02T09:12:20.791Z" },
+ { url = "https://files.pythonhosted.org/packages/ff/4f/5eb37a681c68d605eb7034c004875c81f86ec9ef51f5be4a63eadd58859a/pillow-12.1.0-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:97e9993d5ed946aba26baf9c1e8cf18adbab584b99f452ee72f7ee8acb882796", size = 4138546, upload-time = "2026-01-02T09:12:23.664Z" },
+ { url = "https://files.pythonhosted.org/packages/11/6d/19a95acb2edbace40dcd582d077b991646b7083c41b98da4ed7555b59733/pillow-12.1.0-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:414b9a78e14ffeb98128863314e62c3f24b8a86081066625700b7985b3f529bd", size = 3601163, upload-time = "2026-01-02T09:12:26.338Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/36/2b8138e51cb42e4cc39c3297713455548be855a50558c3ac2beebdc251dd/pillow-12.1.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:e6bdb408f7c9dd2a5ff2b14a3b0bb6d4deb29fb9961e6eb3ae2031ae9a5cec13", size = 5266086, upload-time = "2026-01-02T09:12:28.782Z" },
+ { url = "https://files.pythonhosted.org/packages/53/4b/649056e4d22e1caa90816bf99cef0884aed607ed38075bd75f091a607a38/pillow-12.1.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:3413c2ae377550f5487991d444428f1a8ae92784aac79caa8b1e3b89b175f77e", size = 4657344, upload-time = "2026-01-02T09:12:31.117Z" },
+ { url = "https://files.pythonhosted.org/packages/6c/6b/c5742cea0f1ade0cd61485dc3d81f05261fc2276f537fbdc00802de56779/pillow-12.1.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:e5dcbe95016e88437ecf33544ba5db21ef1b8dd6e1b434a2cb2a3d605299e643", size = 6232114, upload-time = "2026-01-02T09:12:32.936Z" },
+ { url = "https://files.pythonhosted.org/packages/bf/8f/9f521268ce22d63991601aafd3d48d5ff7280a246a1ef62d626d67b44064/pillow-12.1.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d0a7735df32ccbcc98b98a1ac785cc4b19b580be1bdf0aeb5c03223220ea09d5", size = 8042708, upload-time = "2026-01-02T09:12:34.78Z" },
+ { url = "https://files.pythonhosted.org/packages/1a/eb/257f38542893f021502a1bbe0c2e883c90b5cff26cc33b1584a841a06d30/pillow-12.1.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0c27407a2d1b96774cbc4a7594129cc027339fd800cd081e44497722ea1179de", size = 6347762, upload-time = "2026-01-02T09:12:36.748Z" },
+ { url = "https://files.pythonhosted.org/packages/c4/5a/8ba375025701c09b309e8d5163c5a4ce0102fa86bbf8800eb0d7ac87bc51/pillow-12.1.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:15c794d74303828eaa957ff8070846d0efe8c630901a1c753fdc63850e19ecd9", size = 7039265, upload-time = "2026-01-02T09:12:39.082Z" },
+ { url = "https://files.pythonhosted.org/packages/cf/dc/cf5e4cdb3db533f539e88a7bbf9f190c64ab8a08a9bc7a4ccf55067872e4/pillow-12.1.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c990547452ee2800d8506c4150280757f88532f3de2a58e3022e9b179107862a", size = 6462341, upload-time = "2026-01-02T09:12:40.946Z" },
+ { url = "https://files.pythonhosted.org/packages/d0/47/0291a25ac9550677e22eda48510cfc4fa4b2ef0396448b7fbdc0a6946309/pillow-12.1.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b63e13dd27da389ed9475b3d28510f0f954bca0041e8e551b2a4eb1eab56a39a", size = 7165395, upload-time = "2026-01-02T09:12:42.706Z" },
+ { url = "https://files.pythonhosted.org/packages/4f/4c/e005a59393ec4d9416be06e6b45820403bb946a778e39ecec62f5b2b991e/pillow-12.1.0-cp314-cp314-win32.whl", hash = "sha256:1a949604f73eb07a8adab38c4fe50791f9919344398bdc8ac6b307f755fc7030", size = 6431413, upload-time = "2026-01-02T09:12:44.944Z" },
+ { url = "https://files.pythonhosted.org/packages/1c/af/f23697f587ac5f9095d67e31b81c95c0249cd461a9798a061ed6709b09b5/pillow-12.1.0-cp314-cp314-win_amd64.whl", hash = "sha256:4f9f6a650743f0ddee5593ac9e954ba1bdbc5e150bc066586d4f26127853ab94", size = 7176779, upload-time = "2026-01-02T09:12:46.727Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/36/6a51abf8599232f3e9afbd16d52829376a68909fe14efe29084445db4b73/pillow-12.1.0-cp314-cp314-win_arm64.whl", hash = "sha256:808b99604f7873c800c4840f55ff389936ef1948e4e87645eaf3fccbc8477ac4", size = 2543105, upload-time = "2026-01-02T09:12:49.243Z" },
+ { url = "https://files.pythonhosted.org/packages/82/54/2e1dd20c8749ff225080d6ba465a0cab4387f5db0d1c5fb1439e2d99923f/pillow-12.1.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:bc11908616c8a283cf7d664f77411a5ed2a02009b0097ff8abbba5e79128ccf2", size = 5268571, upload-time = "2026-01-02T09:12:51.11Z" },
+ { url = "https://files.pythonhosted.org/packages/57/61/571163a5ef86ec0cf30d265ac2a70ae6fc9e28413d1dc94fa37fae6bda89/pillow-12.1.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:896866d2d436563fa2a43a9d72f417874f16b5545955c54a64941e87c1376c61", size = 4660426, upload-time = "2026-01-02T09:12:52.865Z" },
+ { url = "https://files.pythonhosted.org/packages/5e/e1/53ee5163f794aef1bf84243f755ee6897a92c708505350dd1923f4afec48/pillow-12.1.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8e178e3e99d3c0ea8fc64b88447f7cac8ccf058af422a6cedc690d0eadd98c51", size = 6269908, upload-time = "2026-01-02T09:12:54.884Z" },
+ { url = "https://files.pythonhosted.org/packages/bc/0b/b4b4106ff0ee1afa1dc599fde6ab230417f800279745124f6c50bcffed8e/pillow-12.1.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:079af2fb0c599c2ec144ba2c02766d1b55498e373b3ac64687e43849fbbef5bc", size = 8074733, upload-time = "2026-01-02T09:12:56.802Z" },
+ { url = "https://files.pythonhosted.org/packages/19/9f/80b411cbac4a732439e629a26ad3ef11907a8c7fc5377b7602f04f6fe4e7/pillow-12.1.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bdec5e43377761c5dbca620efb69a77f6855c5a379e32ac5b158f54c84212b14", size = 6381431, upload-time = "2026-01-02T09:12:58.823Z" },
+ { url = "https://files.pythonhosted.org/packages/8f/b7/d65c45db463b66ecb6abc17c6ba6917a911202a07662247e1355ce1789e7/pillow-12.1.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:565c986f4b45c020f5421a4cea13ef294dde9509a8577f29b2fc5edc7587fff8", size = 7068529, upload-time = "2026-01-02T09:13:00.885Z" },
+ { url = "https://files.pythonhosted.org/packages/50/96/dfd4cd726b4a45ae6e3c669fc9e49deb2241312605d33aba50499e9d9bd1/pillow-12.1.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:43aca0a55ce1eefc0aefa6253661cb54571857b1a7b2964bd8a1e3ef4b729924", size = 6492981, upload-time = "2026-01-02T09:13:03.314Z" },
+ { url = "https://files.pythonhosted.org/packages/4d/1c/b5dc52cf713ae46033359c5ca920444f18a6359ce1020dd3e9c553ea5bc6/pillow-12.1.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:0deedf2ea233722476b3a81e8cdfbad786f7adbed5d848469fa59fe52396e4ef", size = 7191878, upload-time = "2026-01-02T09:13:05.276Z" },
+ { url = "https://files.pythonhosted.org/packages/53/26/c4188248bd5edaf543864fe4834aebe9c9cb4968b6f573ce014cc42d0720/pillow-12.1.0-cp314-cp314t-win32.whl", hash = "sha256:b17fbdbe01c196e7e159aacb889e091f28e61020a8abeac07b68079b6e626988", size = 6438703, upload-time = "2026-01-02T09:13:07.491Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/0e/69ed296de8ea05cb03ee139cee600f424ca166e632567b2d66727f08c7ed/pillow-12.1.0-cp314-cp314t-win_amd64.whl", hash = "sha256:27b9baecb428899db6c0de572d6d305cfaf38ca1596b5c0542a5182e3e74e8c6", size = 7182927, upload-time = "2026-01-02T09:13:09.841Z" },
+ { url = "https://files.pythonhosted.org/packages/fc/f5/68334c015eed9b5cff77814258717dec591ded209ab5b6fb70e2ae873d1d/pillow-12.1.0-cp314-cp314t-win_arm64.whl", hash = "sha256:f61333d817698bdcdd0f9d7793e365ac3d2a21c1f1eb02b32ad6aefb8d8ea831", size = 2545104, upload-time = "2026-01-02T09:13:12.068Z" },
+ { url = "https://files.pythonhosted.org/packages/8b/bc/224b1d98cffd7164b14707c91aac83c07b047fbd8f58eba4066a3e53746a/pillow-12.1.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:ca94b6aac0d7af2a10ba08c0f888b3d5114439b6b3ef39968378723622fed377", size = 5228605, upload-time = "2026-01-02T09:13:14.084Z" },
+ { url = "https://files.pythonhosted.org/packages/0c/ca/49ca7769c4550107de049ed85208240ba0f330b3f2e316f24534795702ce/pillow-12.1.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:351889afef0f485b84078ea40fe33727a0492b9af3904661b0abbafee0355b72", size = 4622245, upload-time = "2026-01-02T09:13:15.964Z" },
+ { url = "https://files.pythonhosted.org/packages/73/48/fac807ce82e5955bcc2718642b94b1bd22a82a6d452aea31cbb678cddf12/pillow-12.1.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bb0984b30e973f7e2884362b7d23d0a348c7143ee559f38ef3eaab640144204c", size = 5247593, upload-time = "2026-01-02T09:13:17.913Z" },
+ { url = "https://files.pythonhosted.org/packages/d2/95/3e0742fe358c4664aed4fd05d5f5373dcdad0b27af52aa0972568541e3f4/pillow-12.1.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:84cabc7095dd535ca934d57e9ce2a72ffd216e435a84acb06b2277b1de2689bd", size = 6989008, upload-time = "2026-01-02T09:13:20.083Z" },
+ { url = "https://files.pythonhosted.org/packages/5a/74/fe2ac378e4e202e56d50540d92e1ef4ff34ed687f3c60f6a121bcf99437e/pillow-12.1.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:53d8b764726d3af1a138dd353116f774e3862ec7e3794e0c8781e30db0f35dfc", size = 5313824, upload-time = "2026-01-02T09:13:22.405Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/77/2a60dee1adee4e2655ac328dd05c02a955c1cd683b9f1b82ec3feb44727c/pillow-12.1.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5da841d81b1a05ef940a8567da92decaa15bc4d7dedb540a8c219ad83d91808a", size = 5963278, upload-time = "2026-01-02T09:13:24.706Z" },
+ { url = "https://files.pythonhosted.org/packages/2d/71/64e9b1c7f04ae0027f788a248e6297d7fcc29571371fe7d45495a78172c0/pillow-12.1.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:75af0b4c229ac519b155028fa1be632d812a519abba9b46b20e50c6caa184f19", size = 7029809, upload-time = "2026-01-02T09:13:26.541Z" },
]
[[package]]
@@ -925,7 +939,7 @@ wheels = [
[[package]]
name = "pytest"
-version = "9.0.1"
+version = "9.0.2"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama", marker = "sys_platform == 'win32'" },
@@ -934,9 +948,9 @@ dependencies = [
{ name = "pluggy" },
{ name = "pygments" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/07/56/f013048ac4bc4c1d9be45afd4ab209ea62822fb1598f40687e6bf45dcea4/pytest-9.0.1.tar.gz", hash = "sha256:3e9c069ea73583e255c3b21cf46b8d3c56f6e3a1a8f6da94ccb0fcf57b9d73c8", size = 1564125, upload-time = "2025-11-12T13:05:09.333Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/d1/db/7ef3487e0fb0049ddb5ce41d3a49c235bf9ad299b6a25d5780a89f19230f/pytest-9.0.2.tar.gz", hash = "sha256:75186651a92bd89611d1d9fc20f0b4345fd827c41ccd5c299a868a05d70edf11", size = 1568901, upload-time = "2025-12-06T21:30:51.014Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/0b/8b/6300fb80f858cda1c51ffa17075df5d846757081d11ab4aa35cef9e6258b/pytest-9.0.1-py3-none-any.whl", hash = "sha256:67be0030d194df2dfa7b556f2e56fb3c3315bd5c8822c6951162b92b32ce7dad", size = 373668, upload-time = "2025-11-12T13:05:07.379Z" },
+ { url = "https://files.pythonhosted.org/packages/3b/ab/b3226f0bd7cdcf710fbede2b3548584366da3b19b5021e74f5bde2a8fa3f/pytest-9.0.2-py3-none-any.whl", hash = "sha256:711ffd45bf766d5264d487b917733b453d917afd2b0ad65223959f59089f875b", size = 374801, upload-time = "2025-12-06T21:30:49.154Z" },
]
[[package]]
@@ -1128,28 +1142,28 @@ wheels = [
[[package]]
name = "ruff"
-version = "0.14.7"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/b7/5b/dd7406afa6c95e3d8fa9d652b6d6dd17dd4a6bf63cb477014e8ccd3dcd46/ruff-0.14.7.tar.gz", hash = "sha256:3417deb75d23bd14a722b57b0a1435561db65f0ad97435b4cf9f85ffcef34ae5", size = 5727324, upload-time = "2025-11-28T20:55:10.525Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/8c/b1/7ea5647aaf90106f6d102230e5df874613da43d1089864da1553b899ba5e/ruff-0.14.7-py3-none-linux_armv6l.whl", hash = "sha256:b9d5cb5a176c7236892ad7224bc1e63902e4842c460a0b5210701b13e3de4fca", size = 13414475, upload-time = "2025-11-28T20:54:54.569Z" },
- { url = "https://files.pythonhosted.org/packages/af/19/fddb4cd532299db9cdaf0efdc20f5c573ce9952a11cb532d3b859d6d9871/ruff-0.14.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:3f64fe375aefaf36ca7d7250292141e39b4cea8250427482ae779a2aa5d90015", size = 13634613, upload-time = "2025-11-28T20:55:17.54Z" },
- { url = "https://files.pythonhosted.org/packages/40/2b/469a66e821d4f3de0440676ed3e04b8e2a1dc7575cf6fa3ba6d55e3c8557/ruff-0.14.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:93e83bd3a9e1a3bda64cb771c0d47cda0e0d148165013ae2d3554d718632d554", size = 12765458, upload-time = "2025-11-28T20:55:26.128Z" },
- { url = "https://files.pythonhosted.org/packages/f1/05/0b001f734fe550bcfde4ce845948ac620ff908ab7241a39a1b39bb3c5f49/ruff-0.14.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3838948e3facc59a6070795de2ae16e5786861850f78d5914a03f12659e88f94", size = 13236412, upload-time = "2025-11-28T20:55:28.602Z" },
- { url = "https://files.pythonhosted.org/packages/11/36/8ed15d243f011b4e5da75cd56d6131c6766f55334d14ba31cce5461f28aa/ruff-0.14.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:24c8487194d38b6d71cd0fd17a5b6715cda29f59baca1defe1e3a03240f851d1", size = 13182949, upload-time = "2025-11-28T20:55:33.265Z" },
- { url = "https://files.pythonhosted.org/packages/3b/cf/fcb0b5a195455729834f2a6eadfe2e4519d8ca08c74f6d2b564a4f18f553/ruff-0.14.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:79c73db6833f058a4be8ffe4a0913b6d4ad41f6324745179bd2aa09275b01d0b", size = 13816470, upload-time = "2025-11-28T20:55:08.203Z" },
- { url = "https://files.pythonhosted.org/packages/7f/5d/34a4748577ff7a5ed2f2471456740f02e86d1568a18c9faccfc73bd9ca3f/ruff-0.14.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:12eb7014fccff10fc62d15c79d8a6be4d0c2d60fe3f8e4d169a0d2def75f5dad", size = 15289621, upload-time = "2025-11-28T20:55:30.837Z" },
- { url = "https://files.pythonhosted.org/packages/53/53/0a9385f047a858ba133d96f3f8e3c9c66a31cc7c4b445368ef88ebeac209/ruff-0.14.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c623bbdc902de7ff715a93fa3bb377a4e42dd696937bf95669118773dbf0c50", size = 14975817, upload-time = "2025-11-28T20:55:24.107Z" },
- { url = "https://files.pythonhosted.org/packages/a8/d7/2f1c32af54c3b46e7fadbf8006d8b9bcfbea535c316b0bd8813d6fb25e5d/ruff-0.14.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f53accc02ed2d200fa621593cdb3c1ae06aa9b2c3cae70bc96f72f0000ae97a9", size = 14284549, upload-time = "2025-11-28T20:55:06.08Z" },
- { url = "https://files.pythonhosted.org/packages/92/05/434ddd86becd64629c25fb6b4ce7637dd52a45cc4a4415a3008fe61c27b9/ruff-0.14.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:281f0e61a23fcdcffca210591f0f53aafaa15f9025b5b3f9706879aaa8683bc4", size = 14071389, upload-time = "2025-11-28T20:55:35.617Z" },
- { url = "https://files.pythonhosted.org/packages/ff/50/fdf89d4d80f7f9d4f420d26089a79b3bb1538fe44586b148451bc2ba8d9c/ruff-0.14.7-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:dbbaa5e14148965b91cb090236931182ee522a5fac9bc5575bafc5c07b9f9682", size = 14202679, upload-time = "2025-11-28T20:55:01.472Z" },
- { url = "https://files.pythonhosted.org/packages/77/54/87b34988984555425ce967f08a36df0ebd339bb5d9d0e92a47e41151eafc/ruff-0.14.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:1464b6e54880c0fe2f2d6eaefb6db15373331414eddf89d6b903767ae2458143", size = 13147677, upload-time = "2025-11-28T20:55:19.933Z" },
- { url = "https://files.pythonhosted.org/packages/67/29/f55e4d44edfe053918a16a3299e758e1c18eef216b7a7092550d7a9ec51c/ruff-0.14.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:f217ed871e4621ea6128460df57b19ce0580606c23aeab50f5de425d05226784", size = 13151392, upload-time = "2025-11-28T20:55:21.967Z" },
- { url = "https://files.pythonhosted.org/packages/36/69/47aae6dbd4f1d9b4f7085f4d9dcc84e04561ee7ad067bf52e0f9b02e3209/ruff-0.14.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6be02e849440ed3602d2eb478ff7ff07d53e3758f7948a2a598829660988619e", size = 13412230, upload-time = "2025-11-28T20:55:12.749Z" },
- { url = "https://files.pythonhosted.org/packages/b7/4b/6e96cb6ba297f2ba502a231cd732ed7c3de98b1a896671b932a5eefa3804/ruff-0.14.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:19a0f116ee5e2b468dfe80c41c84e2bbd6b74f7b719bee86c2ecde0a34563bcc", size = 14195397, upload-time = "2025-11-28T20:54:56.896Z" },
- { url = "https://files.pythonhosted.org/packages/69/82/251d5f1aa4dcad30aed491b4657cecd9fb4274214da6960ffec144c260f7/ruff-0.14.7-py3-none-win32.whl", hash = "sha256:e33052c9199b347c8937937163b9b149ef6ab2e4bb37b042e593da2e6f6cccfa", size = 13126751, upload-time = "2025-11-28T20:55:03.47Z" },
- { url = "https://files.pythonhosted.org/packages/a8/b5/d0b7d145963136b564806f6584647af45ab98946660d399ec4da79cae036/ruff-0.14.7-py3-none-win_amd64.whl", hash = "sha256:e17a20ad0d3fad47a326d773a042b924d3ac31c6ca6deb6c72e9e6b5f661a7c6", size = 14531726, upload-time = "2025-11-28T20:54:59.121Z" },
- { url = "https://files.pythonhosted.org/packages/1d/d2/1637f4360ada6a368d3265bf39f2cf737a0aaab15ab520fc005903e883f8/ruff-0.14.7-py3-none-win_arm64.whl", hash = "sha256:be4d653d3bea1b19742fcc6502354e32f65cd61ff2fbdb365803ef2c2aec6228", size = 13609215, upload-time = "2025-11-28T20:55:15.375Z" },
+version = "0.14.10"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/57/08/52232a877978dd8f9cf2aeddce3e611b40a63287dfca29b6b8da791f5e8d/ruff-0.14.10.tar.gz", hash = "sha256:9a2e830f075d1a42cd28420d7809ace390832a490ed0966fe373ba288e77aaf4", size = 5859763, upload-time = "2025-12-18T19:28:57.98Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/60/01/933704d69f3f05ee16ef11406b78881733c186fe14b6a46b05cfcaf6d3b2/ruff-0.14.10-py3-none-linux_armv6l.whl", hash = "sha256:7a3ce585f2ade3e1f29ec1b92df13e3da262178df8c8bdf876f48fa0e8316c49", size = 13527080, upload-time = "2025-12-18T19:29:25.642Z" },
+ { url = "https://files.pythonhosted.org/packages/df/58/a0349197a7dfa603ffb7f5b0470391efa79ddc327c1e29c4851e85b09cc5/ruff-0.14.10-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:674f9be9372907f7257c51f1d4fc902cb7cf014b9980152b802794317941f08f", size = 13797320, upload-time = "2025-12-18T19:29:02.571Z" },
+ { url = "https://files.pythonhosted.org/packages/7b/82/36be59f00a6082e38c23536df4e71cdbc6af8d7c707eade97fcad5c98235/ruff-0.14.10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:d85713d522348837ef9df8efca33ccb8bd6fcfc86a2cde3ccb4bc9d28a18003d", size = 12918434, upload-time = "2025-12-18T19:28:51.202Z" },
+ { url = "https://files.pythonhosted.org/packages/a6/00/45c62a7f7e34da92a25804f813ebe05c88aa9e0c25e5cb5a7d23dd7450e3/ruff-0.14.10-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6987ebe0501ae4f4308d7d24e2d0fe3d7a98430f5adfd0f1fead050a740a3a77", size = 13371961, upload-time = "2025-12-18T19:29:04.991Z" },
+ { url = "https://files.pythonhosted.org/packages/40/31/a5906d60f0405f7e57045a70f2d57084a93ca7425f22e1d66904769d1628/ruff-0.14.10-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:16a01dfb7b9e4eee556fbfd5392806b1b8550c9b4a9f6acd3dbe6812b193c70a", size = 13275629, upload-time = "2025-12-18T19:29:21.381Z" },
+ { url = "https://files.pythonhosted.org/packages/3e/60/61c0087df21894cf9d928dc04bcd4fb10e8b2e8dca7b1a276ba2155b2002/ruff-0.14.10-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7165d31a925b7a294465fa81be8c12a0e9b60fb02bf177e79067c867e71f8b1f", size = 14029234, upload-time = "2025-12-18T19:29:00.132Z" },
+ { url = "https://files.pythonhosted.org/packages/44/84/77d911bee3b92348b6e5dab5a0c898d87084ea03ac5dc708f46d88407def/ruff-0.14.10-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:c561695675b972effb0c0a45db233f2c816ff3da8dcfbe7dfc7eed625f218935", size = 15449890, upload-time = "2025-12-18T19:28:53.573Z" },
+ { url = "https://files.pythonhosted.org/packages/e9/36/480206eaefa24a7ec321582dda580443a8f0671fdbf6b1c80e9c3e93a16a/ruff-0.14.10-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4bb98fcbbc61725968893682fd4df8966a34611239c9fd07a1f6a07e7103d08e", size = 15123172, upload-time = "2025-12-18T19:29:23.453Z" },
+ { url = "https://files.pythonhosted.org/packages/5c/38/68e414156015ba80cef5473d57919d27dfb62ec804b96180bafdeaf0e090/ruff-0.14.10-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f24b47993a9d8cb858429e97bdf8544c78029f09b520af615c1d261bf827001d", size = 14460260, upload-time = "2025-12-18T19:29:27.808Z" },
+ { url = "https://files.pythonhosted.org/packages/b3/19/9e050c0dca8aba824d67cc0db69fb459c28d8cd3f6855b1405b3f29cc91d/ruff-0.14.10-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59aabd2e2c4fd614d2862e7939c34a532c04f1084476d6833dddef4afab87e9f", size = 14229978, upload-time = "2025-12-18T19:29:11.32Z" },
+ { url = "https://files.pythonhosted.org/packages/51/eb/e8dd1dd6e05b9e695aa9dd420f4577debdd0f87a5ff2fedda33c09e9be8c/ruff-0.14.10-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:213db2b2e44be8625002dbea33bb9c60c66ea2c07c084a00d55732689d697a7f", size = 14338036, upload-time = "2025-12-18T19:29:09.184Z" },
+ { url = "https://files.pythonhosted.org/packages/6a/12/f3e3a505db7c19303b70af370d137795fcfec136d670d5de5391e295c134/ruff-0.14.10-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:b914c40ab64865a17a9a5b67911d14df72346a634527240039eb3bd650e5979d", size = 13264051, upload-time = "2025-12-18T19:29:13.431Z" },
+ { url = "https://files.pythonhosted.org/packages/08/64/8c3a47eaccfef8ac20e0484e68e0772013eb85802f8a9f7603ca751eb166/ruff-0.14.10-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:1484983559f026788e3a5c07c81ef7d1e97c1c78ed03041a18f75df104c45405", size = 13283998, upload-time = "2025-12-18T19:29:06.994Z" },
+ { url = "https://files.pythonhosted.org/packages/12/84/534a5506f4074e5cc0529e5cd96cfc01bb480e460c7edf5af70d2bcae55e/ruff-0.14.10-py3-none-musllinux_1_2_i686.whl", hash = "sha256:c70427132db492d25f982fffc8d6c7535cc2fd2c83fc8888f05caaa248521e60", size = 13601891, upload-time = "2025-12-18T19:28:55.811Z" },
+ { url = "https://files.pythonhosted.org/packages/0d/1e/14c916087d8598917dbad9b2921d340f7884824ad6e9c55de948a93b106d/ruff-0.14.10-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5bcf45b681e9f1ee6445d317ce1fa9d6cba9a6049542d1c3d5b5958986be8830", size = 14336660, upload-time = "2025-12-18T19:29:16.531Z" },
+ { url = "https://files.pythonhosted.org/packages/f2/1c/d7b67ab43f30013b47c12b42d1acd354c195351a3f7a1d67f59e54227ede/ruff-0.14.10-py3-none-win32.whl", hash = "sha256:104c49fc7ab73f3f3a758039adea978869a918f31b73280db175b43a2d9b51d6", size = 13196187, upload-time = "2025-12-18T19:29:19.006Z" },
+ { url = "https://files.pythonhosted.org/packages/fb/9c/896c862e13886fae2af961bef3e6312db9ebc6adc2b156fe95e615dee8c1/ruff-0.14.10-py3-none-win_amd64.whl", hash = "sha256:466297bd73638c6bdf06485683e812db1c00c7ac96d4ddd0294a338c62fdc154", size = 14661283, upload-time = "2025-12-18T19:29:30.16Z" },
+ { url = "https://files.pythonhosted.org/packages/74/31/b0e29d572670dca3674eeee78e418f20bdf97fa8aa9ea71380885e175ca0/ruff-0.14.10-py3-none-win_arm64.whl", hash = "sha256:e51d046cf6dda98a4633b8a8a771451107413b0f07183b2bef03f075599e44e6", size = 13729839, upload-time = "2025-12-18T19:28:48.636Z" },
]
[[package]]
@@ -1174,6 +1188,62 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/5d/e6/ec8471c8072382cb91233ba7267fd931219753bb43814cbc71757bfd4dab/safetensors-0.7.0-cp38-abi3-win_amd64.whl", hash = "sha256:d1239932053f56f3456f32eb9625590cc7582e905021f94636202a864d470755", size = 341380, upload-time = "2025-11-19T15:18:44.427Z" },
]
+[[package]]
+name = "sentencepiece"
+version = "0.2.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/15/15/2e7a025fc62d764b151ae6d0f2a92f8081755ebe8d4a64099accc6f77ba6/sentencepiece-0.2.1.tar.gz", hash = "sha256:8138cec27c2f2282f4a34d9a016e3374cd40e5c6e9cb335063db66a0a3b71fad", size = 3228515, upload-time = "2025-08-12T07:00:51.718Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/d8/15/46afbab00733d81788b64be430ca1b93011bb9388527958e26cc31832de5/sentencepiece-0.2.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6356d0986b8b8dc351b943150fcd81a1c6e6e4d439772e8584c64230e58ca987", size = 1942560, upload-time = "2025-08-12T06:59:25.82Z" },
+ { url = "https://files.pythonhosted.org/packages/fa/79/7c01b8ef98a0567e9d84a4e7a910f8e7074fcbf398a5cd76f93f4b9316f9/sentencepiece-0.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8f8ba89a3acb3dc1ae90f65ec1894b0b9596fdb98ab003ff38e058f898b39bc7", size = 1325385, upload-time = "2025-08-12T06:59:27.722Z" },
+ { url = "https://files.pythonhosted.org/packages/bb/88/2b41e07bd24f33dcf2f18ec3b74247aa4af3526bad8907b8727ea3caba03/sentencepiece-0.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:02593eca45440ef39247cee8c47322a34bdcc1d8ae83ad28ba5a899a2cf8d79a", size = 1253319, upload-time = "2025-08-12T06:59:29.306Z" },
+ { url = "https://files.pythonhosted.org/packages/a0/54/38a1af0c6210a3c6f95aa46d23d6640636d020fba7135cd0d9a84ada05a7/sentencepiece-0.2.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a0d15781a171d188b661ae4bde1d998c303f6bd8621498c50c671bd45a4798e", size = 1316162, upload-time = "2025-08-12T06:59:30.914Z" },
+ { url = "https://files.pythonhosted.org/packages/ef/66/fb191403ade791ad2c3c1e72fe8413e63781b08cfa3aa4c9dfc536d6e795/sentencepiece-0.2.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4f5a3e0d9f445ed9d66c0fec47d4b23d12cfc858b407a03c194c1b26c2ac2a63", size = 1387785, upload-time = "2025-08-12T06:59:32.491Z" },
+ { url = "https://files.pythonhosted.org/packages/a9/2d/3bd9b08e70067b2124518b308db6a84a4f8901cc8a4317e2e4288cdd9b4d/sentencepiece-0.2.1-cp311-cp311-win32.whl", hash = "sha256:6d297a1748d429ba8534eebe5535448d78b8acc32d00a29b49acf28102eeb094", size = 999555, upload-time = "2025-08-12T06:59:34.475Z" },
+ { url = "https://files.pythonhosted.org/packages/32/b8/f709977f5fda195ae1ea24f24e7c581163b6f142b1005bc3d0bbfe4d7082/sentencepiece-0.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:82d9ead6591015f009cb1be1cb1c015d5e6f04046dbb8c9588b931e869a29728", size = 1054617, upload-time = "2025-08-12T06:59:36.461Z" },
+ { url = "https://files.pythonhosted.org/packages/7a/40/a1fc23be23067da0f703709797b464e8a30a1c78cc8a687120cd58d4d509/sentencepiece-0.2.1-cp311-cp311-win_arm64.whl", hash = "sha256:39f8651bd10974eafb9834ce30d9bcf5b73e1fc798a7f7d2528f9820ca86e119", size = 1033877, upload-time = "2025-08-12T06:59:38.391Z" },
+ { url = "https://files.pythonhosted.org/packages/4a/be/32ce495aa1d0e0c323dcb1ba87096037358edee539cac5baf8755a6bd396/sentencepiece-0.2.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:57cae326c8727de58c85977b175af132a7138d84c764635d7e71bbee7e774133", size = 1943152, upload-time = "2025-08-12T06:59:40.048Z" },
+ { url = "https://files.pythonhosted.org/packages/88/7e/ff23008899a58678e98c6ff592bf4d368eee5a71af96d0df6b38a039dd4f/sentencepiece-0.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:56dd39a3c4d6493db3cdca7e8cc68c6b633f0d4195495cbadfcf5af8a22d05a6", size = 1325651, upload-time = "2025-08-12T06:59:41.536Z" },
+ { url = "https://files.pythonhosted.org/packages/19/84/42eb3ce4796777a1b5d3699dfd4dca85113e68b637f194a6c8d786f16a04/sentencepiece-0.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d9381351182ff9888cc80e41c632e7e274b106f450de33d67a9e8f6043da6f76", size = 1253645, upload-time = "2025-08-12T06:59:42.903Z" },
+ { url = "https://files.pythonhosted.org/packages/89/fa/d3d5ebcba3cb9e6d3775a096251860c41a6bc53a1b9461151df83fe93255/sentencepiece-0.2.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:99f955df238021bf11f0fc37cdb54fd5e5b5f7fd30ecc3d93fb48b6815437167", size = 1316273, upload-time = "2025-08-12T06:59:44.476Z" },
+ { url = "https://files.pythonhosted.org/packages/04/88/14f2f4a2b922d8b39be45bf63d79e6cd3a9b2f248b2fcb98a69b12af12f5/sentencepiece-0.2.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0cdfecef430d985f1c2bcbfff3defd1d95dae876fbd0173376012d2d7d24044b", size = 1387881, upload-time = "2025-08-12T06:59:46.09Z" },
+ { url = "https://files.pythonhosted.org/packages/fd/b8/903e5ccb77b4ef140605d5d71b4f9e0ad95d456d6184688073ed11712809/sentencepiece-0.2.1-cp312-cp312-win32.whl", hash = "sha256:a483fd29a34c3e34c39ac5556b0a90942bec253d260235729e50976f5dba1068", size = 999540, upload-time = "2025-08-12T06:59:48.023Z" },
+ { url = "https://files.pythonhosted.org/packages/2d/81/92df5673c067148c2545b1bfe49adfd775bcc3a169a047f5a0e6575ddaca/sentencepiece-0.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:4cdc7c36234fda305e85c32949c5211faaf8dd886096c7cea289ddc12a2d02de", size = 1054671, upload-time = "2025-08-12T06:59:49.895Z" },
+ { url = "https://files.pythonhosted.org/packages/fe/02/c5e3bc518655d714622bec87d83db9cdba1cd0619a4a04e2109751c4f47f/sentencepiece-0.2.1-cp312-cp312-win_arm64.whl", hash = "sha256:daeb5e9e9fcad012324807856113708614d534f596d5008638eb9b40112cd9e4", size = 1033923, upload-time = "2025-08-12T06:59:51.952Z" },
+ { url = "https://files.pythonhosted.org/packages/ba/4a/85fbe1706d4d04a7e826b53f327c4b80f849cf1c7b7c5e31a20a97d8f28b/sentencepiece-0.2.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dcd8161eee7b41aae57ded06272905dbd680a0a04b91edd0f64790c796b2f706", size = 1943150, upload-time = "2025-08-12T06:59:53.588Z" },
+ { url = "https://files.pythonhosted.org/packages/c2/83/4cfb393e287509fc2155480b9d184706ef8d9fa8cbf5505d02a5792bf220/sentencepiece-0.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c6c8f42949f419ff8c7e9960dbadcfbc982d7b5efc2f6748210d3dd53a7de062", size = 1325651, upload-time = "2025-08-12T06:59:55.073Z" },
+ { url = "https://files.pythonhosted.org/packages/8d/de/5a007fb53b1ab0aafc69d11a5a3dd72a289d5a3e78dcf2c3a3d9b14ffe93/sentencepiece-0.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:097f3394e99456e9e4efba1737c3749d7e23563dd1588ce71a3d007f25475fff", size = 1253641, upload-time = "2025-08-12T06:59:56.562Z" },
+ { url = "https://files.pythonhosted.org/packages/2c/d2/f552be5928105588f4f4d66ee37dd4c61460d8097e62d0e2e0eec41bc61d/sentencepiece-0.2.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d7b670879c370d350557edabadbad1f6561a9e6968126e6debca4029e5547820", size = 1316271, upload-time = "2025-08-12T06:59:58.109Z" },
+ { url = "https://files.pythonhosted.org/packages/96/df/0cfe748ace5485be740fed9476dee7877f109da32ed0d280312c94ec259f/sentencepiece-0.2.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c7f0fd2f2693309e6628aeeb2e2faf6edd221134dfccac3308ca0de01f8dab47", size = 1387882, upload-time = "2025-08-12T07:00:00.701Z" },
+ { url = "https://files.pythonhosted.org/packages/ac/dd/f7774d42a881ced8e1739f393ab1e82ece39fc9abd4779e28050c2e975b5/sentencepiece-0.2.1-cp313-cp313-win32.whl", hash = "sha256:92b3816aa2339355fda2c8c4e021a5de92180b00aaccaf5e2808972e77a4b22f", size = 999541, upload-time = "2025-08-12T07:00:02.709Z" },
+ { url = "https://files.pythonhosted.org/packages/dd/e9/932b9eae6fd7019548321eee1ab8d5e3b3d1294df9d9a0c9ac517c7b636d/sentencepiece-0.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:10ed3dab2044c47f7a2e7b4969b0c430420cdd45735d78c8f853191fa0e3148b", size = 1054669, upload-time = "2025-08-12T07:00:04.915Z" },
+ { url = "https://files.pythonhosted.org/packages/c9/3a/76488a00ea7d6931689cda28726a1447d66bf1a4837943489314593d5596/sentencepiece-0.2.1-cp313-cp313-win_arm64.whl", hash = "sha256:ac650534e2251083c5f75dde4ff28896ce7c8904133dc8fef42780f4d5588fcd", size = 1033922, upload-time = "2025-08-12T07:00:06.496Z" },
+ { url = "https://files.pythonhosted.org/packages/4a/b6/08fe2ce819e02ccb0296f4843e3f195764ce9829cbda61b7513f29b95718/sentencepiece-0.2.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:8dd4b477a7b069648d19363aad0cab9bad2f4e83b2d179be668efa672500dc94", size = 1946052, upload-time = "2025-08-12T07:00:08.136Z" },
+ { url = "https://files.pythonhosted.org/packages/ab/d9/1ea0e740591ff4c6fc2b6eb1d7510d02f3fb885093f19b2f3abd1363b402/sentencepiece-0.2.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0c0f672da370cc490e4c59d89e12289778310a0e71d176c541e4834759e1ae07", size = 1327408, upload-time = "2025-08-12T07:00:09.572Z" },
+ { url = "https://files.pythonhosted.org/packages/99/7e/1fb26e8a21613f6200e1ab88824d5d203714162cf2883248b517deb500b7/sentencepiece-0.2.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:ad8493bea8432dae8d6830365352350f3b4144415a1d09c4c8cb8d30cf3b6c3c", size = 1254857, upload-time = "2025-08-12T07:00:11.021Z" },
+ { url = "https://files.pythonhosted.org/packages/bc/85/c72fd1f3c7a6010544d6ae07f8ddb38b5e2a7e33bd4318f87266c0bbafbf/sentencepiece-0.2.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b81a24733726e3678d2db63619acc5a8dccd074f7aa7a54ecd5ca33ca6d2d596", size = 1315722, upload-time = "2025-08-12T07:00:12.989Z" },
+ { url = "https://files.pythonhosted.org/packages/4a/e8/661e5bd82a8aa641fd6c1020bd0e890ef73230a2b7215ddf9c8cd8e941c2/sentencepiece-0.2.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0a81799d0a68d618e89063fb423c3001a034c893069135ffe51fee439ae474d6", size = 1387452, upload-time = "2025-08-12T07:00:15.088Z" },
+ { url = "https://files.pythonhosted.org/packages/99/5e/ae66c361023a470afcbc1fbb8da722c72ea678a2fcd9a18f1a12598c7501/sentencepiece-0.2.1-cp313-cp313t-win32.whl", hash = "sha256:89a3ea015517c42c0341d0d962f3e6aaf2cf10d71b1932d475c44ba48d00aa2b", size = 1002501, upload-time = "2025-08-12T07:00:16.966Z" },
+ { url = "https://files.pythonhosted.org/packages/c1/03/d332828c4ff764e16c1b56c2c8f9a33488bbe796b53fb6b9c4205ddbf167/sentencepiece-0.2.1-cp313-cp313t-win_amd64.whl", hash = "sha256:33f068c9382dc2e7c228eedfd8163b52baa86bb92f50d0488bf2b7da7032e484", size = 1057555, upload-time = "2025-08-12T07:00:18.573Z" },
+ { url = "https://files.pythonhosted.org/packages/88/14/5aee0bf0864df9bd82bd59e7711362908e4935e3f9cdc1f57246b5d5c9b9/sentencepiece-0.2.1-cp313-cp313t-win_arm64.whl", hash = "sha256:b3616ad246f360e52c85781e47682d31abfb6554c779e42b65333d4b5f44ecc0", size = 1036042, upload-time = "2025-08-12T07:00:20.209Z" },
+ { url = "https://files.pythonhosted.org/packages/24/9c/89eb8b2052f720a612478baf11c8227dcf1dc28cd4ea4c0c19506b5af2a2/sentencepiece-0.2.1-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:5d0350b686c320068702116276cfb26c066dc7e65cfef173980b11bb4d606719", size = 1943147, upload-time = "2025-08-12T07:00:21.809Z" },
+ { url = "https://files.pythonhosted.org/packages/82/0b/a1432bc87f97c2ace36386ca23e8bd3b91fb40581b5e6148d24b24186419/sentencepiece-0.2.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:c7f54a31cde6fa5cb030370566f68152a742f433f8d2be458463d06c208aef33", size = 1325624, upload-time = "2025-08-12T07:00:23.289Z" },
+ { url = "https://files.pythonhosted.org/packages/ea/99/bbe054ebb5a5039457c590e0a4156ed073fb0fe9ce4f7523404dd5b37463/sentencepiece-0.2.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c83b85ab2d6576607f31df77ff86f28182be4a8de6d175d2c33ca609925f5da1", size = 1253670, upload-time = "2025-08-12T07:00:24.69Z" },
+ { url = "https://files.pythonhosted.org/packages/19/ad/d5c7075f701bd97971d7c2ac2904f227566f51ef0838dfbdfdccb58cd212/sentencepiece-0.2.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1855f57db07b51fb51ed6c9c452f570624d2b169b36f0f79ef71a6e6c618cd8b", size = 1316247, upload-time = "2025-08-12T07:00:26.435Z" },
+ { url = "https://files.pythonhosted.org/packages/fb/03/35fbe5f3d9a7435eebd0b473e09584bd3cc354ce118b960445b060d33781/sentencepiece-0.2.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:01e6912125cb45d3792f530a4d38f8e21bf884d6b4d4ade1b2de5cf7a8d2a52b", size = 1387894, upload-time = "2025-08-12T07:00:28.339Z" },
+ { url = "https://files.pythonhosted.org/packages/dc/aa/956ef729aafb6c8f9c443104c9636489093bb5c61d6b90fc27aa1a865574/sentencepiece-0.2.1-cp314-cp314-win32.whl", hash = "sha256:c415c9de1447e0a74ae3fdb2e52f967cb544113a3a5ce3a194df185cbc1f962f", size = 1096698, upload-time = "2025-08-12T07:00:29.764Z" },
+ { url = "https://files.pythonhosted.org/packages/b8/cb/fe400d8836952cc535c81a0ce47dc6875160e5fedb71d2d9ff0e9894c2a6/sentencepiece-0.2.1-cp314-cp314-win_amd64.whl", hash = "sha256:881b2e44b14fc19feade3cbed314be37de639fc415375cefaa5bc81a4be137fd", size = 1155115, upload-time = "2025-08-12T07:00:32.865Z" },
+ { url = "https://files.pythonhosted.org/packages/32/89/047921cf70f36c7b6b6390876b2399b3633ab73b8d0cb857e5a964238941/sentencepiece-0.2.1-cp314-cp314-win_arm64.whl", hash = "sha256:2005242a16d2dc3ac5fe18aa7667549134d37854823df4c4db244752453b78a8", size = 1133890, upload-time = "2025-08-12T07:00:34.763Z" },
+ { url = "https://files.pythonhosted.org/packages/a1/11/5b414b9fae6255b5fb1e22e2ed3dc3a72d3a694e5703910e640ac78346bb/sentencepiece-0.2.1-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:a19adcec27c524cb7069a1c741060add95f942d1cbf7ad0d104dffa0a7d28a2b", size = 1946081, upload-time = "2025-08-12T07:00:36.97Z" },
+ { url = "https://files.pythonhosted.org/packages/77/eb/7a5682bb25824db8545f8e5662e7f3e32d72a508fdce086029d89695106b/sentencepiece-0.2.1-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:e37e4b4c4a11662b5db521def4e44d4d30ae69a1743241412a93ae40fdcab4bb", size = 1327406, upload-time = "2025-08-12T07:00:38.669Z" },
+ { url = "https://files.pythonhosted.org/packages/03/b0/811dae8fb9f2784e138785d481469788f2e0d0c109c5737372454415f55f/sentencepiece-0.2.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:477c81505db072b3ab627e7eab972ea1025331bd3a92bacbf798df2b75ea86ec", size = 1254846, upload-time = "2025-08-12T07:00:40.611Z" },
+ { url = "https://files.pythonhosted.org/packages/ef/23/195b2e7ec85ebb6a547969f60b723c7aca5a75800ece6cc3f41da872d14e/sentencepiece-0.2.1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:010f025a544ef770bb395091d57cb94deb9652d8972e0d09f71d85d5a0816c8c", size = 1315721, upload-time = "2025-08-12T07:00:42.914Z" },
+ { url = "https://files.pythonhosted.org/packages/7e/aa/553dbe4178b5f23eb28e59393dddd64186178b56b81d9b8d5c3ff1c28395/sentencepiece-0.2.1-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:733e59ff1794d26db706cd41fc2d7ca5f6c64a820709cb801dc0ea31780d64ab", size = 1387458, upload-time = "2025-08-12T07:00:44.56Z" },
+ { url = "https://files.pythonhosted.org/packages/66/7c/08ff0012507297a4dd74a5420fdc0eb9e3e80f4e88cab1538d7f28db303d/sentencepiece-0.2.1-cp314-cp314t-win32.whl", hash = "sha256:d3233770f78e637dc8b1fda2cd7c3b99ec77e7505041934188a4e7fe751de3b0", size = 1099765, upload-time = "2025-08-12T07:00:46.058Z" },
+ { url = "https://files.pythonhosted.org/packages/91/d5/2a69e1ce15881beb9ddfc7e3f998322f5cedcd5e4d244cb74dade9441663/sentencepiece-0.2.1-cp314-cp314t-win_amd64.whl", hash = "sha256:5e4366c97b68218fd30ea72d70c525e6e78a6c0a88650f57ac4c43c63b234a9d", size = 1157807, upload-time = "2025-08-12T07:00:47.673Z" },
+ { url = "https://files.pythonhosted.org/packages/f3/16/54f611fcfc2d1c46cbe3ec4169780b2cfa7cf63708ef2b71611136db7513/sentencepiece-0.2.1-cp314-cp314t-win_arm64.whl", hash = "sha256:105e36e75cbac1292642045458e8da677b2342dcd33df503e640f0b457cb6751", size = 1136264, upload-time = "2025-08-12T07:00:49.485Z" },
+]
+
[[package]]
name = "setuptools"
version = "80.9.0"
@@ -1206,27 +1276,28 @@ wheels = [
[[package]]
name = "tokenizers"
-version = "0.22.1"
+version = "0.22.2"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "huggingface-hub" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/1c/46/fb6854cec3278fbfa4a75b50232c77622bc517ac886156e6afbfa4d8fc6e/tokenizers-0.22.1.tar.gz", hash = "sha256:61de6522785310a309b3407bac22d99c4db5dba349935e99e4d15ea2226af2d9", size = 363123, upload-time = "2025-09-19T09:49:23.424Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/73/6f/f80cfef4a312e1fb34baf7d85c72d4411afde10978d4657f8cdd811d3ccc/tokenizers-0.22.2.tar.gz", hash = "sha256:473b83b915e547aa366d1eee11806deaf419e17be16310ac0a14077f1e28f917", size = 372115, upload-time = "2026-01-05T10:45:15.988Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/bf/33/f4b2d94ada7ab297328fc671fed209368ddb82f965ec2224eb1892674c3a/tokenizers-0.22.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:59fdb013df17455e5f950b4b834a7b3ee2e0271e6378ccb33aa74d178b513c73", size = 3069318, upload-time = "2025-09-19T09:49:11.848Z" },
- { url = "https://files.pythonhosted.org/packages/1c/58/2aa8c874d02b974990e89ff95826a4852a8b2a273c7d1b4411cdd45a4565/tokenizers-0.22.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:8d4e484f7b0827021ac5f9f71d4794aaef62b979ab7608593da22b1d2e3c4edc", size = 2926478, upload-time = "2025-09-19T09:49:09.759Z" },
- { url = "https://files.pythonhosted.org/packages/1e/3b/55e64befa1e7bfea963cf4b787b2cea1011362c4193f5477047532ce127e/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19d2962dd28bc67c1f205ab180578a78eef89ac60ca7ef7cbe9635a46a56422a", size = 3256994, upload-time = "2025-09-19T09:48:56.701Z" },
- { url = "https://files.pythonhosted.org/packages/71/0b/fbfecf42f67d9b7b80fde4aabb2b3110a97fac6585c9470b5bff103a80cb/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:38201f15cdb1f8a6843e6563e6e79f4abd053394992b9bbdf5213ea3469b4ae7", size = 3153141, upload-time = "2025-09-19T09:48:59.749Z" },
- { url = "https://files.pythonhosted.org/packages/17/a9/b38f4e74e0817af8f8ef925507c63c6ae8171e3c4cb2d5d4624bf58fca69/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1cbe5454c9a15df1b3443c726063d930c16f047a3cc724b9e6e1a91140e5a21", size = 3508049, upload-time = "2025-09-19T09:49:05.868Z" },
- { url = "https://files.pythonhosted.org/packages/d2/48/dd2b3dac46bb9134a88e35d72e1aa4869579eacc1a27238f1577270773ff/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e7d094ae6312d69cc2a872b54b91b309f4f6fbce871ef28eb27b52a98e4d0214", size = 3710730, upload-time = "2025-09-19T09:49:01.832Z" },
- { url = "https://files.pythonhosted.org/packages/93/0e/ccabc8d16ae4ba84a55d41345207c1e2ea88784651a5a487547d80851398/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:afd7594a56656ace95cdd6df4cca2e4059d294c5cfb1679c57824b605556cb2f", size = 3412560, upload-time = "2025-09-19T09:49:03.867Z" },
- { url = "https://files.pythonhosted.org/packages/d0/c6/dc3a0db5a6766416c32c034286d7c2d406da1f498e4de04ab1b8959edd00/tokenizers-0.22.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2ef6063d7a84994129732b47e7915e8710f27f99f3a3260b8a38fc7ccd083f4", size = 3250221, upload-time = "2025-09-19T09:49:07.664Z" },
- { url = "https://files.pythonhosted.org/packages/d7/a6/2c8486eef79671601ff57b093889a345dd3d576713ef047776015dc66de7/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ba0a64f450b9ef412c98f6bcd2a50c6df6e2443b560024a09fa6a03189726879", size = 9345569, upload-time = "2025-09-19T09:49:14.214Z" },
- { url = "https://files.pythonhosted.org/packages/6b/16/32ce667f14c35537f5f605fe9bea3e415ea1b0a646389d2295ec348d5657/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:331d6d149fa9c7d632cde4490fb8bbb12337fa3a0232e77892be656464f4b446", size = 9271599, upload-time = "2025-09-19T09:49:16.639Z" },
- { url = "https://files.pythonhosted.org/packages/51/7c/a5f7898a3f6baa3fc2685c705e04c98c1094c523051c805cdd9306b8f87e/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:607989f2ea68a46cb1dfbaf3e3aabdf3f21d8748312dbeb6263d1b3b66c5010a", size = 9533862, upload-time = "2025-09-19T09:49:19.146Z" },
- { url = "https://files.pythonhosted.org/packages/36/65/7e75caea90bc73c1dd8d40438adf1a7bc26af3b8d0a6705ea190462506e1/tokenizers-0.22.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a0f307d490295717726598ef6fa4f24af9d484809223bbc253b201c740a06390", size = 9681250, upload-time = "2025-09-19T09:49:21.501Z" },
- { url = "https://files.pythonhosted.org/packages/30/2c/959dddef581b46e6209da82df3b78471e96260e2bc463f89d23b1bf0e52a/tokenizers-0.22.1-cp39-abi3-win32.whl", hash = "sha256:b5120eed1442765cd90b903bb6cfef781fd8fe64e34ccaecbae4c619b7b12a82", size = 2472003, upload-time = "2025-09-19T09:49:27.089Z" },
- { url = "https://files.pythonhosted.org/packages/b3/46/e33a8c93907b631a99377ef4c5f817ab453d0b34f93529421f42ff559671/tokenizers-0.22.1-cp39-abi3-win_amd64.whl", hash = "sha256:65fd6e3fb11ca1e78a6a93602490f134d1fdeb13bcef99389d5102ea318ed138", size = 2674684, upload-time = "2025-09-19T09:49:24.953Z" },
+ { url = "https://files.pythonhosted.org/packages/92/97/5dbfabf04c7e348e655e907ed27913e03db0923abb5dfdd120d7b25630e1/tokenizers-0.22.2-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:544dd704ae7238755d790de45ba8da072e9af3eea688f698b137915ae959281c", size = 3100275, upload-time = "2026-01-05T10:41:02.158Z" },
+ { url = "https://files.pythonhosted.org/packages/2e/47/174dca0502ef88b28f1c9e06b73ce33500eedfac7a7692108aec220464e7/tokenizers-0.22.2-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:1e418a55456beedca4621dbab65a318981467a2b188e982a23e117f115ce5001", size = 2981472, upload-time = "2026-01-05T10:41:00.276Z" },
+ { url = "https://files.pythonhosted.org/packages/d6/84/7990e799f1309a8b87af6b948f31edaa12a3ed22d11b352eaf4f4b2e5753/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2249487018adec45d6e3554c71d46eb39fa8ea67156c640f7513eb26f318cec7", size = 3290736, upload-time = "2026-01-05T10:40:32.165Z" },
+ { url = "https://files.pythonhosted.org/packages/78/59/09d0d9ba94dcd5f4f1368d4858d24546b4bdc0231c2354aa31d6199f0399/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25b85325d0815e86e0bac263506dd114578953b7b53d7de09a6485e4a160a7dd", size = 3168835, upload-time = "2026-01-05T10:40:38.847Z" },
+ { url = "https://files.pythonhosted.org/packages/47/50/b3ebb4243e7160bda8d34b731e54dd8ab8b133e50775872e7a434e524c28/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfb88f22a209ff7b40a576d5324bf8286b519d7358663db21d6246fb17eea2d5", size = 3521673, upload-time = "2026-01-05T10:40:56.614Z" },
+ { url = "https://files.pythonhosted.org/packages/e0/fa/89f4cb9e08df770b57adb96f8cbb7e22695a4cb6c2bd5f0c4f0ebcf33b66/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c774b1276f71e1ef716e5486f21e76333464f47bece56bbd554485982a9e03e", size = 3724818, upload-time = "2026-01-05T10:40:44.507Z" },
+ { url = "https://files.pythonhosted.org/packages/64/04/ca2363f0bfbe3b3d36e95bf67e56a4c88c8e3362b658e616d1ac185d47f2/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df6c4265b289083bf710dff49bc51ef252f9d5be33a45ee2bed151114a56207b", size = 3379195, upload-time = "2026-01-05T10:40:51.139Z" },
+ { url = "https://files.pythonhosted.org/packages/2e/76/932be4b50ef6ccedf9d3c6639b056a967a86258c6d9200643f01269211ca/tokenizers-0.22.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:369cc9fc8cc10cb24143873a0d95438bb8ee257bb80c71989e3ee290e8d72c67", size = 3274982, upload-time = "2026-01-05T10:40:58.331Z" },
+ { url = "https://files.pythonhosted.org/packages/1d/28/5f9f5a4cc211b69e89420980e483831bcc29dade307955cc9dc858a40f01/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:29c30b83d8dcd061078b05ae0cb94d3c710555fbb44861139f9f83dcca3dc3e4", size = 9478245, upload-time = "2026-01-05T10:41:04.053Z" },
+ { url = "https://files.pythonhosted.org/packages/6c/fb/66e2da4704d6aadebf8cb39f1d6d1957df667ab24cff2326b77cda0dcb85/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:37ae80a28c1d3265bb1f22464c856bd23c02a05bb211e56d0c5301a435be6c1a", size = 9560069, upload-time = "2026-01-05T10:45:10.673Z" },
+ { url = "https://files.pythonhosted.org/packages/16/04/fed398b05caa87ce9b1a1bb5166645e38196081b225059a6edaff6440fac/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:791135ee325f2336f498590eb2f11dc5c295232f288e75c99a36c5dbce63088a", size = 9899263, upload-time = "2026-01-05T10:45:12.559Z" },
+ { url = "https://files.pythonhosted.org/packages/05/a1/d62dfe7376beaaf1394917e0f8e93ee5f67fea8fcf4107501db35996586b/tokenizers-0.22.2-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38337540fbbddff8e999d59970f3c6f35a82de10053206a7562f1ea02d046fa5", size = 10033429, upload-time = "2026-01-05T10:45:14.333Z" },
+ { url = "https://files.pythonhosted.org/packages/fd/18/a545c4ea42af3df6effd7d13d250ba77a0a86fb20393143bbb9a92e434d4/tokenizers-0.22.2-cp39-abi3-win32.whl", hash = "sha256:a6bf3f88c554a2b653af81f3204491c818ae2ac6fbc09e76ef4773351292bc92", size = 2502363, upload-time = "2026-01-05T10:45:20.593Z" },
+ { url = "https://files.pythonhosted.org/packages/65/71/0670843133a43d43070abeb1949abfdef12a86d490bea9cd9e18e37c5ff7/tokenizers-0.22.2-cp39-abi3-win_amd64.whl", hash = "sha256:c9ea31edff2968b44a88f97d784c2f16dc0729b8b143ed004699ebca91f05c48", size = 2747786, upload-time = "2026-01-05T10:45:18.411Z" },
+ { url = "https://files.pythonhosted.org/packages/72/f4/0de46cfa12cdcbcd464cc59fde36912af405696f687e53a091fb432f694c/tokenizers-0.22.2-cp39-abi3-win_arm64.whl", hash = "sha256:9ce725d22864a1e965217204946f830c37876eee3b2ba6fc6255e8e903d5fcbc", size = 2612133, upload-time = "2026-01-05T10:45:17.232Z" },
]
[[package]]
@@ -1336,7 +1407,7 @@ wheels = [
[[package]]
name = "transformers"
version = "5.0.0.dev0"
-source = { git = "https://github.com/huggingface/transformers#a48d68c6b06172938948eef457ebb7bcd5dcb77c" }
+source = { git = "https://github.com/huggingface/transformers#e8c51d1848187b9e58d00bf7d638811686ab2a4b" }
dependencies = [
{ name = "filelock" },
{ name = "huggingface-hub" },
@@ -1366,15 +1437,15 @@ wheels = [
[[package]]
name = "typer-slim"
-version = "0.20.0"
+version = "0.21.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "click" },
{ name = "typing-extensions" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/8e/45/81b94a52caed434b94da65729c03ad0fb7665fab0f7db9ee54c94e541403/typer_slim-0.20.0.tar.gz", hash = "sha256:9fc6607b3c6c20f5c33ea9590cbeb17848667c51feee27d9e314a579ab07d1a3", size = 106561, upload-time = "2025-10-20T17:03:46.642Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/f9/3b/2f60ce16f578b1db5b8816d37d6a4d9786b33b76407fc8c13b0b86312c31/typer_slim-0.21.0.tar.gz", hash = "sha256:f2dbd150cfa0fead2242e21fa9f654dfc64773763ddf07c6be9a49ad34f79557", size = 106841, upload-time = "2025-12-25T09:54:55.998Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/5e/dd/5cbf31f402f1cc0ab087c94d4669cfa55bd1e818688b910631e131d74e75/typer_slim-0.20.0-py3-none-any.whl", hash = "sha256:f42a9b7571a12b97dddf364745d29f12221865acef7a2680065f9bb29c7dc89d", size = 47087, upload-time = "2025-10-20T17:03:44.546Z" },
+ { url = "https://files.pythonhosted.org/packages/b4/84/e97abf10e4a699194ff07fd586ec7f4cf867d9d04bead559a65f9e7aff84/typer_slim-0.21.0-py3-none-any.whl", hash = "sha256:92aee2188ac6fc2b2924bd75bb61a340b78bd8cd51fd9735533ce5a856812c8e", size = 47174, upload-time = "2025-12-25T09:54:54.609Z" },
]
[[package]]
@@ -1400,11 +1471,20 @@ wheels = [
[[package]]
name = "urllib3"
-version = "2.5.0"
+version = "2.6.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/1e/24/a2a2ed9addd907787d7aa0355ba36a6cadf1768b934c652ea78acbd59dcd/urllib3-2.6.2.tar.gz", hash = "sha256:016f9c98bb7e98085cb2b4b17b87d2c702975664e4f060c6532e64d1c1a5e797", size = 432930, upload-time = "2025-12-11T15:56:40.252Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/6d/b9/4095b668ea3678bf6a0af005527f39de12fb026516fb3df17495a733b7f8/urllib3-2.6.2-py3-none-any.whl", hash = "sha256:ec21cddfe7724fc7cb4ba4bea7aa8e2ef36f607a4bab81aa6ce42a13dc3f03dd", size = 131182, upload-time = "2025-12-11T15:56:38.584Z" },
+]
+
+[[package]]
+name = "wcwidth"
+version = "0.2.14"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/15/22/9ee70a2574a4f4599c47dd506532914ce044817c7752a79b6a51286319bc/urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760", size = 393185, upload-time = "2025-06-18T14:07:41.644Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/24/30/6b0809f4510673dc723187aeaf24c7f5459922d01e2f794277a3dfb90345/wcwidth-0.2.14.tar.gz", hash = "sha256:4d478375d31bc5395a3c55c40ccdf3354688364cd61c4f6adacaa9215d0b3605", size = 102293, upload-time = "2025-09-22T16:29:53.023Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/a7/c2/fe1e52489ae3122415c51f387e221dd0773709bad6c6cdaa599e8a2c5185/urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc", size = 129795, upload-time = "2025-06-18T14:07:40.39Z" },
+ { url = "https://files.pythonhosted.org/packages/af/b5/123f13c975e9f27ab9c0770f514345bd406d0e8d3b7a0723af9d43f710af/wcwidth-0.2.14-py2.py3-none-any.whl", hash = "sha256:a7bb560c8aee30f9957e5f9895805edd20602f2d7f720186dfd906e82b4982e1", size = 37286, upload-time = "2025-09-22T16:29:51.641Z" },
]
[[package]]
From 492a7dfa8cc45f891b34cd8ac537f3492dceaf7e Mon Sep 17 00:00:00 2001
From: exdysa <91800957+exdysa@users.noreply.github.com>
Date: Sun, 11 Jan 2026 14:59:55 -0500
Subject: [PATCH 03/16] ~patched diffusers and transformers generation
---
mir/config/constants.py | 16 +-
mir/doc_parser.py | 11 +-
mir/indexers.py | 44 +-
mir/mir.json | 4634 ++++++++++++++++++++++++++++++---------
mir/spec/template.json | 1 +
mir/tag.py | 7 +-
6 files changed, 3643 insertions(+), 1070 deletions(-)
diff --git a/mir/config/constants.py b/mir/config/constants.py
index 5736e52..23632fd 100644
--- a/mir/config/constants.py
+++ b/mir/config/constants.py
@@ -9,6 +9,7 @@
from transformers.models.auto.modeling_auto import MODEL_MAPPING, MODEL_MAPPING_NAMES
from mir.config.json_io import read_json_file
+from mir.config.console import nfo
def mapped_cls(model_identifier: str):
@@ -50,9 +51,15 @@ def import_submodules(module_name: str, pkg_name_or_abs_path: str) -> Callable:
module = module_name.strip()
library = pkg_name_or_abs_path.strip()
- base_library = import_module(library, module)
- module = getattr(base_library, module)
- return module
+ try:
+ base_library = import_module(library, module)
+ except SyntaxError:
+ base_library = None
+ nfo(f"Syntax error attempting to import {module_name}")
+ if module := getattr(base_library, module, None):
+ return module
+ else:
+ nfo("failed to find module {module}")
def extract_init_params(module: Callable | str, package_name: str | None = None) -> dict[str, list[str]]:
@@ -137,12 +144,15 @@ class DocStringParserConstants:
">>> motion_adapter = ",
">>> adapter = ", # if this moves, also change motion_adapter check
">>> controlnet = ",
+ ">>> super_res_1_pipe = ",
">>> pipe_prior = ",
+ ">>> pipe_prior_redux = ",
">>> pipe = ",
">>> pipeline = ",
">>> blip_diffusion_pipe = ",
">>> prior_pipe = ",
">>> gen_pipe = ",
+ "pipe = ",
]
repo_variables: List[str] = [
"controlnet_model",
diff --git a/mir/doc_parser.py b/mir/doc_parser.py
index 9bf6181..505149c 100644
--- a/mir/doc_parser.py
+++ b/mir/doc_parser.py
@@ -4,7 +4,7 @@
from typing import List, Optional, Tuple
from pydantic import BaseModel, field_validator
-from mir.config.console import dbuq, nfo
+from mir.config.console import nfo
from mir.config.constants import DocParseData, DocStringParserConstants
@@ -80,6 +80,7 @@ def parse(self) -> DocParseData:
motion_adapter = "motion_adapter" in candidate or "adapter" in candidate
if motion_adapter and pipe_repo:
staged, prior_candidate, _ = self.doc_match(DocStringParserConstants.pipe_prefixes[2:]) # skip the adapter statements
+
staged_class, staged_repo = (
self._extract_class_and_repo(
segment=staged,
@@ -90,13 +91,13 @@ def parse(self) -> DocParseData:
if staged
else (None, None)
)
- if motion_adapter and pipe_class:
+ if motion_adapter and pipe_class and staged_class is not None:
pipe_class = staged_class
staged_repo = None
staged_class = None
if DocStringValidator.validate_pipe_class(pipe_class):
- dbuq(f"class :{pipe_class}, repo : {pipe_repo}, staged_class: {staged_class}, staged_repo:{staged_repo} \n")
+ # dbuq(f"class :{pipe_class}, repo : {pipe_repo}, staged_class: {staged_class}, staged_repo:{staged_repo} \n")
return DocParseData(pipe_class=pipe_class, pipe_repo=pipe_repo, staged_class=staged_class, staged_repo=staged_repo)
def _extract_class_and_repo(
@@ -110,8 +111,8 @@ def _extract_class_and_repo(
pipe_repo = None
for call_type in call_types:
if call_type in segment:
- pipe_class = segment.partition(call_type)[0].strip().split("= ")[-1]
- if prior_class == pipe_class:
+ pipe_class = segment.partition(call_type)[0].strip().split("= ")[-1].split(".")[-1]
+ if prior_class == pipe_class and prior_text.split(call_type)[-1].strip().replace(")", ""):
pipe_class = prior_text.partition(call_type)[0].strip().split("= ")[-1]
repo_segment = segment.partition(call_type)[2].partition(")")[0]
else:
diff --git a/mir/indexers.py b/mir/indexers.py
index 7d78c6a..0c155ce 100644
--- a/mir/indexers.py
+++ b/mir/indexers.py
@@ -10,7 +10,7 @@
from mir.config.console import nfo
from mir.config.constants import ClassMapEntry, extract_init_params
from mir.config.conversion import get_repo_from_class_map, import_submodules
-from mir.doc_parser import parse_docs
+from mir.doc_parser import parse_docs, DocParseData
from mir.tag import mir_prefix_from_forward_pass, mir_tag_from_config, tag_model_from_repo
if "pytest" in sys.modules:
@@ -101,44 +101,42 @@ def diffusers_index() -> dict[str, dict[str, dict[str, Any]]]:
from mir.inspect.metadata import find_diffusers_docstrings
extracted_docstrings = find_diffusers_docstrings()
- model_info = [
- extract #
- for pipeline in extracted_docstrings
- for extract in pipeline
- ]
+ model_info = [extract for pipeline in extracted_docstrings for extract in pipeline]
pipe_data = {} # pipeline_stable_diffusion_xl_inpaint
- for extract in model_info:
- pipe = parse_docs(extract.doc_string)
- if not pipe:
- nfo(f"Doc string not found in '{extract.package_name}' in {extract.file_name}")
+ for extracted in model_info:
+ parsed_data: DocParseData = parse_docs(extracted.doc_string)
+ if parsed_data is None:
+ print(f"Doc string not found in '{extracted.package_name}' in {extracted.file_name}")
continue
for class_name, swap_repo in special_classes.items():
- if pipe.pipe_class == class_name:
- pipe.pipe_repo = swap_repo
+ if parsed_data.pipe_class == class_name:
+ parsed_data.pipe_repo = swap_repo
break
- model_class_obj = import_submodules(pipe.pipe_class, f"diffusers.pipelines.{extract.package_name}.{extract.file_name}")
+ model_class_obj = import_submodules(parsed_data.pipe_class, f"diffusers.pipelines.{extracted.package_name}.{extracted.file_name}")
+ if not model_class_obj:
+ continue
extract_init_params(model_class_obj)
try:
- series, comp_data = create_pipe_entry(pipe.pipe_repo, pipe.pipe_class)
+ series, comp_data = create_pipe_entry(parsed_data.pipe_repo, parsed_data.pipe_class)
except TypeError:
pass # Attempt 1
if pipe_data.get(series):
- if "img2img" in pipe.pipe_class.lower():
+ if "img2img" in parsed_data.pipe_class.lower():
continue
pipe_data.setdefault(series, {}).update(comp_data)
special_conditions = special_repos | special_classes
- if pipe.staged_class or pipe.pipe_repo in list(special_conditions):
- test = special_conditions.get(pipe.pipe_repo)
+ if parsed_data.staged_class or parsed_data.pipe_repo in list(special_conditions):
+ test = special_conditions.get(parsed_data.pipe_repo)
if test:
staged_repo = test
- pipe.staged_class = pipe.pipe_class
+ parsed_data.staged_class = parsed_data.pipe_class
try:
series, comp_data = create_pipe_entry(
- staged_repo if pipe.staged_repo else pipe.pipe_repo,
- pipe.staged_class #
- if pipe.staged_class
- else pipe.pipe_class,
+ staged_repo if parsed_data.staged_repo else parsed_data.pipe_repo,
+ parsed_data.staged_class #
+ if parsed_data.staged_class
+ else parsed_data.pipe_class,
)
except TypeError as error_log:
nfo(series, comp_data)
@@ -166,7 +164,6 @@ def transformers_index():
mir_data = {}
transformers_data: list[ClassMapEntry] = map_transformers_classes()
for entry in transformers_data:
- print(entry)
repo_path = get_repo_from_class_map(entry)
if config := missing_config_params.get(entry.name, {}):
entry.config_params = config.get("params", entry.config_params)
@@ -182,7 +179,6 @@ def transformers_index():
tokenizer_classes = TOKENIZER_MAPPING_NAMES.get(entry.name)
if isinstance(tokenizer_classes, str):
tokenizer_classes = [tokenizer_classes]
- print(type(tokenizer_classes))
# mode = modalities.get("mode")
if tokenizer_classes:
index = 0
diff --git a/mir/mir.json b/mir/mir.json
index 78868b2..4c9d44d 100644
--- a/mir/mir.json
+++ b/mir/mir.json
@@ -49,6 +49,16 @@
}
}
},
+ "info.controlnet.stable-diffusion-xl-1": {
+ "*": {
+ "repo": "stabilityai/stable-diffusion-xl-base-1.0",
+ "pkg": {
+ "0": {
+ "diffusers": "StableDiffusionXLControlNetUnionInpaintPipeline"
+ }
+ }
+ }
+ },
"info.controlnet.controlnet-union-sdxl-1": {
"*": {
"repo": "xinsir/controlnet-union-sdxl-1.0",
@@ -99,6 +109,36 @@
}
}
},
+ "info.unet.marigold-depth-v1-1": {
+ "*": {
+ "repo": "prs-eth/marigold-depth-v1-1",
+ "pkg": {
+ "0": {
+ "diffusers": "MarigoldDepthPipeline"
+ }
+ }
+ }
+ },
+ "info.unet.marigold-iid-appearance-v1-1": {
+ "*": {
+ "repo": "prs-eth/marigold-iid-appearance-v1-1",
+ "pkg": {
+ "0": {
+ "diffusers": "MarigoldIntrinsicsPipeline"
+ }
+ }
+ }
+ },
+ "info.unet.marigold-normals-v1-1": {
+ "*": {
+ "repo": "prs-eth/marigold-normals-v1-1",
+ "pkg": {
+ "0": {
+ "diffusers": "MarigoldNormalsPipeline"
+ }
+ }
+ }
+ },
"info.unet.stable-diffusion-v1-5": {
"*": {
"repo": "stable-diffusion-v1-5/stable-diffusion-v1-5",
@@ -337,6 +377,16 @@
}
}
},
+ "info.lora.animatelcm": {
+ "*": {
+ "repo": "wangfuyun/AnimateLCM",
+ "pkg": {
+ "0": {
+ "diffusers": "MotionAdapter"
+ }
+ }
+ }
+ },
"info.lora.animatediff-motion-adapter-sdxl": {
"*": {
"repo": "a-r-r-o-w/animatediff-motion-adapter-sdxl-beta",
@@ -479,6 +529,16 @@
}
}
},
+ "info.controlnet.flux1-depth-dev": {
+ "*": {
+ "repo": "black-forest-labs/FLUX.1-Depth-dev",
+ "pkg": {
+ "0": {
+ "diffusers": "FluxControlInpaintPipeline"
+ }
+ }
+ }
+ },
"info.controlnet.flux1-dev-controlnet-canny": {
"*": {
"repo": "InstantX/FLUX.1-dev-controlnet-canny",
@@ -519,119 +579,6 @@
}
}
},
- "info.dit.flux1-dev": {
- "*": {
- "repo": "black-forest-labs/FLUX.1-dev",
- "pkg": {
- "0": {
- "diffusers": "FluxPipeline"
- }
- }
- },
- "mystic": {
- "repo": "enhanceaiteam/Mystic",
- "pkg": {
- "0": {
- "generation": {
- "num_inference_steps": 16,
- "guidance_scale": 7.5,
- "width": 768,
- "height": 1024
- }
- }
- },
- "file_256": [
- "179d4000e44295f6dfadc0e4ac210146454724d46371b82657200ff9fb5c68a9",
- "48ca85274e3b67f07f70dd84b67725e62395c2f7b188394342716f783ea4c6ac"
- ],
- "layer_256": [
- "3942e6a52dbb0abaf63b031d9c4eda0df47576b51d4c81361978a3dc27b1309e"
- ],
- "layer_b3": [
- "91074aaebe1b5f3b2e7755d3c092af7eb240e92a192360690f1033949d3c8a68"
- ]
- },
- "flux1-lite": {
- "repo": "freepik/flux.1-lite-8b",
- "pkg": {
- "0": {
- "generation": {
- "num_inference_steps": 28
- }
- }
- },
- "file_256": [
- "09e970a7b8d1813ea7cacd48f9a944fd223882b137a8f4f3b61d864cdc20bbec",
- "de90e69945c2f4afcb9b6a057ce48190905c984370fce76b16ba3b97d46e2747"
- ],
- "layer_256": [
- "e1afe2f9b1ca55b3c659293cf3237f6b5571f5c4e826bad025ff0f7b54dc34ee"
- ],
- "layer_b3": [
- "9276fa4805efeb45c08cca32c5b51d490e57a2ce5c15ef476a8e468a509c5cdf"
- ]
- },
- "f-lite": {
- "repo": "freepik/f-lite",
- "pkg": {
- "0": {
- "f_lite": "FLitePipeline",
- "generation": {
- "num_inference_steps": 28
- }
- }
- }
- },
- "f-lite-texture": {
- "repo": "freepik/f-lite-texture",
- "pkg": {
- "0": {
- "f_lite": "FLitePipeline",
- "generation": {
- "num_inference_steps": 28
- }
- }
- }
- },
- "flux": {
- "repo": "TencentARC/flux-mini",
- "file_256": [
- "4236455adeaeb4ed444d63b253ec99805022d17e962ed7261ada9c72ce11cfee"
- ],
- "layer_256": [
- "e4a0d8cf2034da094518ab058da1d4aea14e00d132c6152a266ec196ffef02d0"
- ],
- "layer_b3": [
- "c1a6f83585398fe452d20596a79a522e2986f4c2c01a40e7bfd787af113735d3"
- ]
- },
- "flex2": {
- "repo": "ostris/Flex.2-preview",
- "file_256": [
- "0407108e446a4f57efffc5e7518bc374876af970d3c6068dc4074de0d221c615",
- "df168ba94d5f96c478b24604a6beedff6189047152190509c73c162ea0d8ec02"
- ],
- "layer_256": [
- "5063de856be5365807d12b47ef6919b4ac611a72651739b2b4050e113bed7a83"
- ],
- "layer_b3": [
- "7f85cdc186896da6965b57d5edb672f08663075d2b207f0e20e328c4034a8076"
- ]
- },
- "flex1-alpha": {
- "repo": "ostris/Flex.1-alpha",
- "file_256": [
- "5d6dce30a266ccbf530c3a3bf253cd5486720a8fb71cdeed556c28304201dc2f",
- "7acf8771b80a91eaa21566abe8c7d9d3ba33d8688e6e98446827749aee7ca1ee"
- ],
- "layer_256": [
- "a6b9af6efc25fa77cd24046b81ee66fea09a9987d2a8e56ffca9b7a1c9c9c519"
- ],
- "layer_b3": [
- "cb3d3edafd81651eefd62894b3572deb02c5304f4b5d4f7ab8654f1fb922ecd6"
- ]
- }
- },
"info.dit.prx-512-t2i-sft": {
"*": {
"repo": "Photoroom/prx-512-t2i-sft",
@@ -835,12 +782,12 @@
}
}
},
- "info.unet.if-i-xl-v1": {
+ "info.unet.if-ii-l-v1": {
"*": {
- "repo": "DeepFloyd/IF-I-XL-v1.0",
+ "repo": "DeepFloyd/IF-II-L-v1.0",
"pkg": {
"0": {
- "diffusers": "IFPipeline"
+ "diffusers": "IFSuperResolutionPipeline"
}
}
}
@@ -970,7 +917,7 @@
"repo": "kandinsky-community/kandinsky-2-2-prior",
"pkg": {
"0": {
- "diffusers": "KandinskyPriorPipeline"
+ "diffusers": "KandinskyV22PriorPipeline"
}
}
}
@@ -1200,16 +1147,6 @@
}
}
},
- "info.unet.flux1-dev": {
- "decoder": {
- "repo": "black-forest-labs/FLUX.1-dev",
- "pkg": {
- "0": {
- "diffusers": "WuerstchenDecoderPipeline"
- }
- }
- }
- },
"info.dit.auraflow": {
"*": {
"repo": "fal/AuraFlow",
@@ -1543,1149 +1480,3669 @@
}
},
"info.encoder.tokenizer": {
- "funnel": {
+ "aimv2-patch14-224-lit": {
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "13": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "14": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
}
}
},
- "nllb-moe": {
+ "albert-xx-v2": {
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "transformers.models.albert.tokenization_albert.AlbertTokenizer"
}
}
},
- "deberta-v2-x": {
+ "align": {
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "13": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "14": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "15": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "16": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "17": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
}
}
},
- "xlm-roberta": {
+ "aria": {
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "13": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "14": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "15": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "16": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "17": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "18": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
}
}
},
- "gpt2": {
+ "audio-flamingo-3-hf": {
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "13": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "14": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
}
}
},
- "megatron-bert-uncased": {
+ "aya-vision": {
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "transformers.models.cohere.tokenization_cohere.CohereTokenizer"
}
}
},
- "blenderbot": {
+ "bark": {
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "13": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "14": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "15": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "16": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "17": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "18": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "19": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "20": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "21": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "22": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "23": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
}
}
},
- "omdet-turbo-swin-hf": {
+ "bart": {
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "13": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "14": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "15": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "16": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
}
}
},
- "mgp-str": {
+ "bert-uncased": {
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "13": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "14": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "bert-for-seq-generation-l-24-bbc-encoder": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert_generation.tokenization_bert_generation.BertGenerationTokenizer"
+ }
+ }
+ },
+ "bigbird-roberta": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.big_bird.tokenization_big_bird.BigBirdTokenizer"
+ }
+ }
+ },
+ "bigbird-pegasus-arxiv": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.pegasus.tokenization_pegasus.PegasusTokenizer"
+ }
+ }
+ },
+ "biogpt": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.biogpt.tokenization_biogpt.BioGptTokenizer"
+ }
+ }
+ },
+ "blenderbot": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.blenderbot_small.tokenization_blenderbot_small.BlenderbotSmallTokenizer"
+ }
+ }
+ },
+ "blip-vqa": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
}
}
},
"blip2-opt": {
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
}
}
},
- "efficient-mlm-m0-0": {
+ "bloom": {
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "13": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "14": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "15": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
}
}
},
- "wav2vec2-conformer-rel-pos": {
+ "bridgetower": {
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "13": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "14": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "15": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "16": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "17": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "18": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "19": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
}
}
},
- "wav2vec2-bert-rel-pos": {
+ "bros-uncased": {
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "13": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "14": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "15": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "16": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "17": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "18": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "19": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
}
}
},
- "mm-grounding-dino-o365v1-goldg-v3det": {
+ "camembert": {
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "transformers.models.camembert.tokenization_camembert.CamembertTokenizer"
+ }
+ }
+ },
+ "canine-s": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.canine.tokenization_canine.CanineTokenizer"
+ }
+ }
+ },
+ "chinese-clip-vit-patch16": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "clap-htsat-fused": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
+ }
+ }
+ },
+ "clip-vit-patch32": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
+ }
+ }
+ },
+ "clipseg-rd64": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
+ }
+ }
+ },
+ "clvp-dev": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.clvp.tokenization_clvp.ClvpTokenizer"
+ }
+ }
+ },
+ "codegen-mono": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "conv-bert": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "ctrl": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.ctrl.tokenization_ctrl.CTRLTokenizer"
+ }
+ }
+ },
+ "data2vec-audio-960h": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer"
+ }
+ }
+ },
+ "data2vec-text": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
+ }
+ }
+ },
+ "deberta": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.deberta.tokenization_deberta.DebertaTokenizer"
+ }
+ }
+ },
+ "deberta-v2-x": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.deberta_v2.tokenization_deberta_v2.DebertaV2Tokenizer"
+ }
+ }
+ },
+ "distilbert-uncased": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "dpr-ctx-encoder-single-nq": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.dpr.tokenization_dpr_fast.DPRQuestionEncoderTokenizerFast"
+ }
+ }
+ },
+ "electra-discriminator": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "ernie-3-zh": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "ernie-4-vl-a-pt": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "esm": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.esm.tokenization_esm.EsmTokenizer"
+ }
+ }
+ },
+ "falcon-mamba": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
+ }
+ }
+ },
+ "flaubert-uncased": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.flaubert.tokenization_flaubert.FlaubertTokenizer"
+ }
+ }
+ },
+ "florence-2": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
+ }
+ }
+ },
+ "fnet": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.fnet.tokenization_fnet.FNetTokenizer"
+ }
+ }
+ },
+ "wmt19-en-ru": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.fsmt.tokenization_fsmt.FSMTTokenizer"
+ }
+ }
+ },
+ "funnel": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.funnel.tokenization_funnel.FunnelTokenizer"
+ }
+ }
+ },
+ "git": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "glm-4v-thinking": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "glm-4v": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "glm-asr-nano-2512": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "gpt2": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "gpt-bigcode-santacoder": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "gpt-neo": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "gpt-j": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "grounding-dino": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "groupvit-gcc-yfcc": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
+ }
+ }
+ },
+ "hubert-ls960": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer"
+ }
+ }
+ },
+ "ibert-roberta": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
+ }
+ }
+ },
+ "idefics": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "idefics2": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "idefics3-llama3": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "blip-flan-t5": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "internvl3-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
+ }
+ }
+ },
+ "jamba-v0": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "janus": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "kosmos-2-patch14-224": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer"
+ }
+ }
+ },
+ "kosmos-2": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "todo": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.parakeet.tokenization_parakeet_fast.ParakeetTokenizerFast"
+ }
+ }
+ },
+ "layoutlm-uncased": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "layoutlmv2-uncased": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer"
+ }
+ }
+ },
+ "layoutlmv3": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.layoutlmv3.tokenization_layoutlmv3.LayoutLMv3Tokenizer"
+ }
+ }
+ },
+ "led-16384": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
+ }
+ }
+ },
+ "lfm2-vl": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "lilt-roberta-en": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
+ }
+ }
+ },
+ "llama-4-scout-16e": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "llava": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "llava-v1-mistral-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "llava-next-video-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "llava-onevision-qwen2-ov-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "long-t5-local": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
+ }
+ }
+ },
+ "luke": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.luke.tokenization_luke.LukeTokenizer"
+ }
+ }
+ },
+ "lxmert-uncased": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "m": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.m2m_100.tokenization_m2m_100.M2M100Tokenizer"
+ }
+ }
+ },
+ "mamba": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
+ }
+ }
+ },
+ "mamba2": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
+ }
+ }
+ },
+ "opus-mt-en-de": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.marian.tokenization_marian.MarianTokenizer"
+ }
+ }
+ },
+ "mbart-cc25": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.mbart.tokenization_mbart.MBartTokenizer"
+ }
+ }
+ },
+ "megatron-bert-uncased": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "metaclip-2-worldwide-huge-quickgelu": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer"
+ }
+ }
+ },
+ "mgp-str": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.mgp_str.tokenization_mgp_str.MgpstrTokenizer"
+ }
+ }
+ },
+ "mistral-3-2503": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "llama-3-vision": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "mm-grounding-dino-o365v1-goldg-v3det": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "mobilebert-uncased": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "modernbert": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "mpnet": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.mpnet.tokenization_mpnet.MPNetTokenizer"
+ }
+ }
+ },
+ "mpt": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
+ }
+ }
+ },
+ "mra-512-4": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
+ }
+ }
+ },
+ "mt5": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
+ }
+ }
+ },
+ "musicgen": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
+ }
+ }
+ },
+ "musicgen-melody": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
+ }
+ }
+ },
+ "mvp": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
+ }
+ }
+ },
+ "nllb-moe": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.nllb.tokenization_nllb.NllbTokenizer"
+ }
+ }
+ },
+ "nystromformer-512": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.albert.tokenization_albert.AlbertTokenizer"
+ }
+ }
+ },
+ "omdet-turbo-swin-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
+ }
+ }
+ },
+ "openai-gpt": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.openai.tokenization_openai.OpenAIGPTTokenizer"
+ }
+ }
+ },
+ "opt": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "ovis2-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
+ }
+ }
+ },
+ "owlv2-patch16": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
+ }
+ }
+ },
+ "owlvit-patch32": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
+ }
+ }
+ },
+ "paligemma": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "pegasus": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.pegasus.tokenization_pegasus.PegasusTokenizer"
+ }
+ }
+ },
+ "pegasus-x": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.pegasus.tokenization_pegasus.PegasusTokenizer"
+ }
+ }
+ },
+ "language-perceiver": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.perceiver.tokenization_perceiver.PerceiverTokenizer"
+ }
+ }
+ },
+ "plbart": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.plbart.tokenization_plbart.PLBartTokenizer"
+ }
+ }
+ },
+ "qwen2-vl": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
+ }
+ }
+ },
+ "qwen3-vl": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
+ }
+ }
+ },
+ "qwen3-vl-a": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
+ }
+ }
+ },
+ "reformer-crime-and-punishment": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.reformer.tokenization_reformer.ReformerTokenizer"
+ }
+ }
+ },
+ "rembert": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.rembert.tokenization_rembert.RemBertTokenizer"
+ }
+ }
+ },
+ "roberta": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
+ }
+ }
+ },
+ "efficient-mlm-m0-0": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
+ }
+ }
+ },
+ "roc-bert-zh": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.roc_bert.tokenization_roc_bert.RoCBertTokenizer"
+ }
+ }
+ },
+ "roformer-chinese": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.roformer.tokenization_roformer.RoFormerTokenizer"
+ }
+ }
+ },
+ "rwkv-4-pile": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
+ }
+ }
+ },
+ "hf-seamless-m4t": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.seamless_m4t.tokenization_seamless_m4t.SeamlessM4TTokenizer"
+ }
+ }
+ },
+ "seamless-m4t-v2": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.seamless_m4t.tokenization_seamless_m4t.SeamlessM4TTokenizer"
+ }
+ }
+ },
+ "siglip-patch16-224": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.siglip.tokenization_siglip.SiglipTokenizer"
+ }
+ }
+ },
+ "siglip2-patch16-224": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
+ }
+ }
+ },
+ "s2t-librispeech-asr": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.speech_to_text.tokenization_speech_to_text.Speech2TextTokenizer"
+ }
+ }
+ },
+ "speecht5-asr": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.speecht5.tokenization_speecht5.SpeechT5Tokenizer"
+ }
+ }
+ },
+ "splinter": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.splinter.tokenization_splinter.SplinterTokenizer"
+ }
+ }
+ },
+ "squeezebert-uncased": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "switch-8": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
+ }
+ }
+ },
+ "t5": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
+ }
+ }
+ },
+ "tapas-finetuned-sqa": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.tapas.tokenization_tapas.TapasTokenizer"
+ }
+ }
+ },
+ "tvp": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "udop": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.udop.tokenization_udop.UdopTokenizer"
+ }
+ }
+ },
+ "umt5": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
+ }
+ }
+ },
+ "video-llava-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "vilt-b32-mlm": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "vip-llava-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "visualbert-vqa-coco-pre": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "mms-tts-eng": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.vits.tokenization_vits.VitsTokenizer"
+ }
+ }
+ },
+ "voxtral-2507": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "wav2vec2-960h": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer"
+ }
+ }
+ },
+ "wav2vec2-bert-rel-pos": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer"
+ }
+ }
+ },
+ "wav2vec2-conformer-rel-pos": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer"
+ }
+ }
+ },
+ "whisper": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.whisper.tokenization_whisper.WhisperTokenizer"
+ }
+ }
+ },
+ "xclip-patch32": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
+ }
+ }
+ },
+ "xglm": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.xglm.tokenization_xglm.XGLMTokenizer"
+ }
+ }
+ },
+ "xlm-mlm-en-2048": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.xlm.tokenization_xlm.XLMTokenizer"
+ }
+ }
+ },
+ "xlm-roberta": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer"
+ }
+ }
+ },
+ "xlm-roberta-xl": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer"
+ }
+ }
+ },
+ "xlnet-cased": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer"
+ }
+ }
+ },
+ "xlstm": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
+ }
+ }
+ },
+ "xmod": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer"
+ }
+ }
+ },
+ "yoso-4096": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.albert.tokenization_albert.AlbertTokenizer"
+ }
+ }
+ },
+ "zamba-v1": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ }
+ },
+ "info.vit.aimv2-patch14-224-lit": {
+ "*": {
+ "repo": "apple/aimv2-large-patch14-224-lit",
+ "pkg": {
+ "0": {
+ "transformers": "Aimv2Model"
+ }
+ }
+ }
+ },
+ "info.art.albert-xx-v2": {
+ "*": {
+ "repo": "albert/albert-xxlarge-v2",
+ "pkg": {
+ "0": {
+ "transformers": "AlbertModel"
+ }
+ }
+ }
+ },
+ "info.vit.align": {
+ "*": {
+ "repo": "kakaobrain/align-base",
+ "pkg": {
+ "0": {
+ "transformers": "AlignModel"
+ }
+ }
+ }
+ },
+ "info.vit.altclip": {
+ "*": {
+ "repo": "BAAI/AltCLIP",
+ "pkg": {
+ "0": {
+ "transformers": "AltCLIPModel"
+ }
+ }
+ }
+ },
+ "info.vit.aria": {
+ "*": {
+ "repo": "rhymes-ai/Aria",
+ "pkg": {
+ "0": {
+ "transformers": "AriaModel"
+ }
+ }
+ }
+ },
+ "info.vit.ast-finetuned-audioset-10-10-0593": {
+ "*": {
+ "repo": "MIT/ast-finetuned-audioset-10-10-0.4593",
+ "pkg": {
+ "0": {
+ "transformers": "ASTModel"
+ }
+ }
+ }
+ },
+ "info.stst.audio-flamingo-3-hf": {
+ "*": {
+ "repo": "nvidia/audio-flamingo-3-hf",
+ "pkg": {
+ "0": {
+ "transformers": "AudioFlamingo3ForConditionalGeneration"
+ }
+ }
+ }
+ },
+ "info.aet.audio-flamingo-3-hf": {
+ "*": {
+ "repo": "nvidia/audio-flamingo-3-hf",
+ "pkg": {
+ "0": {
+ "transformers": "AudioFlamingo3Encoder"
+ }
+ }
+ }
+ },
+ "info.stst.autoformer-tourism-monthly": {
+ "*": {
+ "repo": "huggingface/autoformer-tourism-monthly",
+ "pkg": {
+ "0": {
+ "transformers": "AutoformerModel"
+ }
+ }
+ }
+ },
+ "info.vit.aya-vision": {
+ "*": {
+ "repo": "CohereForAI/aya-vision-8b",
+ "pkg": {
+ "0": {
+ "transformers": "AyaVisionModel"
+ }
+ }
+ }
+ },
+ "info.art.bark": {
+ "*": {
+ "repo": "suno/bark",
+ "pkg": {
+ "0": {
+ "transformers": "BarkModel"
+ }
+ }
+ }
+ },
+ "info.stst.bart": {
+ "*": {
+ "repo": "facebook/bart-large",
+ "pkg": {
+ "0": {
+ "transformers": "BartModel"
+ }
+ }
+ }
+ },
+ "info.vit.beit-patch16-224-pt": {
+ "*": {
+ "repo": "microsoft/beit-base-patch16-224-pt22k",
+ "pkg": {
+ "0": {
+ "transformers": "BeitModel"
+ }
+ }
+ }
+ },
+ "info.art.bert-uncased": {
+ "*": {
+ "repo": "google-bert/bert-base-uncased",
+ "pkg": {
+ "0": {
+ "transformers": "BertModel"
+ }
+ }
+ }
+ },
+ "info.art.bert-for-seq-generation-l-24-bbc-encoder": {
+ "*": {
+ "repo": "google/bert_for_seq_generation_L-24_bbc_encoder",
+ "pkg": {
+ "0": {
+ "transformers": "BertGenerationEncoder"
+ }
+ }
+ }
+ },
+ "info.art.bigbird-roberta": {
+ "*": {
+ "repo": "google/bigbird-roberta-base",
+ "pkg": {
+ "0": {
+ "transformers": "BigBirdModel"
+ }
+ }
+ }
+ },
+ "info.stst.bigbird-pegasus-arxiv": {
+ "*": {
+ "repo": "google/bigbird-pegasus-large-arxiv",
+ "pkg": {
+ "0": {
+ "transformers": "BigBirdPegasusModel"
+ }
+ }
+ }
+ },
+ "info.art.biogpt": {
+ "*": {
+ "repo": "microsoft/biogpt",
+ "pkg": {
+ "0": {
+ "transformers": "BioGptModel"
+ }
+ }
+ }
+ },
+ "info.vit.bit-50": {
+ "*": {
+ "repo": "google/bit-50",
+ "pkg": {
+ "0": {
+ "transformers": "BitModel"
+ }
+ }
+ }
+ },
+ "info.stst.blenderbot": {
+ "*": {
+ "repo": "facebook/blenderbot-3B",
+ "pkg": {
+ "0": {
+ "transformers": "BlenderbotModel"
+ }
+ }
+ }
+ },
+ "info.vit.blip-vqa": {
+ "*": {
+ "repo": "Salesforce/blip-vqa-base",
+ "pkg": {
+ "0": {
+ "transformers": "BlipModel"
+ }
+ }
+ }
+ },
+ "info.vit.blip2-opt": {
+ "*": {
+ "repo": "Salesforce/blip2-opt-2.7b",
+ "pkg": {
+ "0": {
+ "transformers": "Blip2Model"
+ }
+ }
+ }
+ },
+ "info.stst.blip2-opt": {
+ "*": {
+ "repo": "Salesforce/blip2-opt-2.7b",
+ "pkg": {
+ "0": {
+ "transformers": "Blip2QFormerModel"
+ }
+ }
+ }
+ },
+ "info.art.bloom": {
+ "*": {
+ "repo": "bigscience/bloom",
+ "pkg": {
+ "0": {
+ "transformers": "BloomModel"
+ }
+ }
+ }
+ },
+ "info.vit.bridgetower": {
+ "*": {
+ "repo": "BridgeTower/bridgetower-base",
+ "pkg": {
+ "0": {
+ "transformers": "BridgeTowerModel"
+ }
+ }
+ }
+ },
+ "info.art.bros-uncased": {
+ "*": {
+ "repo": "jinho8345/bros-base-uncased",
+ "pkg": {
+ "0": {
+ "transformers": "BrosModel"
+ }
+ }
+ }
+ },
+ "info.art.camembert": {
+ "*": {
+ "repo": "almanach/camembert-base",
+ "pkg": {
+ "0": {
+ "transformers": "CamembertModel"
+ }
+ }
+ }
+ },
+ "info.art.canine-s": {
+ "*": {
+ "repo": "google/canine-s",
+ "pkg": {
+ "0": {
+ "transformers": "CanineModel"
+ }
+ }
+ }
+ },
+ "info.vit.chinese-clip-vit-patch16": {
+ "*": {
+ "repo": "OFA-Sys/chinese-clip-vit-base-patch16",
+ "pkg": {
+ "0": {
+ "transformers": "ChineseCLIPModel"
+ }
+ }
+ }
+ },
+ "info.vit.clap-htsat-fused": {
+ "*": {
+ "repo": "laion/clap-htsat-fused",
+ "pkg": {
+ "0": {
+ "transformers": "ClapModel"
+ }
+ }
+ }
+ },
+ "info.vit.clip-vit-patch32": {
+ "*": {
+ "repo": "openai/clip-vit-base-patch32",
+ "pkg": {
+ "0": {
+ "transformers": "CLIPModel"
+ }
+ }
+ }
+ },
+ "info.vit.clipseg-rd64": {
+ "*": {
+ "repo": "CIDAS/clipseg-rd64",
+ "pkg": {
+ "0": {
+ "transformers": "CLIPSegModel"
+ }
+ }
+ }
+ },
+ "info.vit.clvp-dev": {
+ "*": {
+ "repo": "susnato/clvp_dev",
+ "pkg": {
+ "0": {
+ "transformers": "ClvpModelForConditionalGeneration"
+ }
+ }
+ }
+ },
+ "info.art.codegen-mono": {
+ "*": {
+ "repo": "Salesforce/codegen-2B-mono",
+ "pkg": {
+ "0": {
+ "transformers": "CodeGenModel"
+ }
+ }
+ }
+ },
+ "info.vit.command-a-vision-07-2025": {
+ "*": {
+ "repo": "CohereLabs/command-a-vision-07-2025",
+ "pkg": {
+ "0": {
+ "transformers": "Cohere2VisionModel"
+ }
+ }
+ }
+ },
+ "info.detr.conditional-detr-resnet-50": {
+ "*": {
+ "repo": "microsoft/conditional-detr-resnet-50",
+ "pkg": {
+ "0": {
+ "transformers": "ConditionalDetrModel"
+ }
+ }
+ }
+ },
+ "info.art.conv-bert": {
+ "*": {
+ "repo": "YituTech/conv-bert-base",
+ "pkg": {
+ "0": {
+ "transformers": "ConvBertModel"
+ }
+ }
+ }
+ },
+ "info.vit.convnext-224": {
+ "*": {
+ "repo": "facebook/convnext-tiny-224",
+ "pkg": {
+ "0": {
+ "transformers": "ConvNextModel"
+ }
+ }
+ }
+ },
+ "info.vit.convnextv2-224": {
+ "*": {
+ "repo": "facebook/convnextv2-tiny-1k-224",
+ "pkg": {
+ "0": {
+ "transformers": "ConvNextV2Model"
+ }
+ }
+ }
+ },
+ "info.art.ctrl": {
+ "*": {
+ "repo": "Salesforce/ctrl",
+ "pkg": {
+ "0": {
+ "transformers": "CTRLModel"
+ }
+ }
+ }
+ },
+ "info.vit.cvt-13": {
+ "*": {
+ "repo": "microsoft/cvt-13",
+ "pkg": {
+ "0": {
+ "transformers": "CvtModel"
+ }
+ }
+ }
+ },
+ "info.art.cwm": {
+ "*": {
+ "repo": "facebook/cwm",
+ "pkg": {
+ "0": {
+ "transformers": "CwmModel"
+ }
+ }
+ }
+ },
+ "info.detr.dfine-x-coco": {
+ "*": {
+ "repo": "ustc-community/dfine-xlarge-coco",
+ "pkg": {
+ "0": {
+ "transformers": "DFineModel"
+ }
+ }
+ }
+ },
+ "info.detr.dab-detr": {
+ "*": {
+ "repo": "IDEA-Research/dab-detr-resnet-50",
+ "pkg": {
+ "0": {
+ "transformers": "DabDetrModel"
+ }
+ }
+ }
+ },
+ "info.gan.dac": {
+ "*": {
+ "repo": "descript/dac_16khz",
+ "pkg": {
+ "0": {
+ "transformers": "DacModel"
+ }
+ }
+ }
+ },
+ "info.aet.data2vec-audio-960h": {
+ "*": {
+ "repo": "facebook/data2vec-audio-base-960h",
+ "pkg": {
+ "0": {
+ "transformers": "Data2VecAudioModel"
+ }
+ }
+ }
+ },
+ "info.art.data2vec-text": {
+ "*": {
+ "repo": "facebook/data2vec-text-base",
+ "pkg": {
+ "0": {
+ "transformers": "Data2VecTextModel"
+ }
+ }
+ }
+ },
+ "info.vit.data2vec-vision": {
+ "*": {
+ "repo": "facebook/data2vec-vision-base",
+ "pkg": {
+ "0": {
+ "transformers": "Data2VecVisionModel"
+ }
+ }
+ }
+ },
+ "info.art.deberta": {
+ "*": {
+ "repo": "microsoft/deberta-base",
+ "pkg": {
+ "0": {
+ "transformers": "DebertaModel"
+ }
+ }
+ }
+ },
+ "info.art.deberta-v2-x": {
+ "*": {
+ "repo": "microsoft/deberta-v2-xlarge",
+ "pkg": {
+ "0": {
+ "transformers": "DebertaV2Model"
+ }
+ }
+ }
+ },
+ "info.art.decision-transformer-gym-hopper": {
+ "*": {
+ "repo": "edbeeching/decision-transformer-gym-hopper-medium",
+ "pkg": {
+ "0": {
+ "transformers": "DecisionTransformerModel"
+ }
+ }
+ }
+ },
+ "info.detr.deformable-detr": {
+ "*": {
+ "repo": "SenseTime/deformable-detr",
+ "pkg": {
+ "0": {
+ "transformers": "DeformableDetrModel"
+ }
+ }
+ }
+ },
+ "info.vit.deit-distilled-patch16-224": {
+ "*": {
+ "repo": "facebook/deit-base-distilled-patch16-224",
+ "pkg": {
+ "0": {
+ "transformers": "DeiTModel"
+ }
+ }
+ }
+ },
+ "info.vit.depth": {
+ "*": {
+ "repo": "apple/DepthPro",
+ "pkg": {
+ "0": {
+ "transformers": "DepthProModel"
+ }
+ }
+ }
+ },
+ "info.detr.detr-resnet-50": {
+ "*": {
+ "repo": "facebook/detr-resnet-50",
+ "pkg": {
+ "0": {
+ "transformers": "DetrModel"
+ }
+ }
+ }
+ },
+ "info.gan.dinat-in-224": {
+ "*": {
+ "repo": "shi-labs/dinat-mini-in1k-224",
+ "pkg": {
+ "0": {
+ "transformers": "DinatModel"
+ }
+ }
+ }
+ },
+ "info.vit.dinov2-patch16-224": {
+ "*": {
+ "repo": "google/dinov2-base-patch16-224",
+ "pkg": {
+ "0": {
+ "transformers": "Dinov2Model"
+ }
+ }
+ }
+ },
+ "info.vit.dinov2-with-registers": {
+ "*": {
+ "repo": "facebook/dinov2-with-registers-base",
+ "pkg": {
+ "0": {
+ "transformers": "Dinov2WithRegistersModel"
+ }
+ }
+ }
+ },
+ "info.art.distilbert-uncased": {
+ "*": {
+ "repo": "distilbert-base-uncased",
+ "pkg": {
+ "0": {
+ "transformers": "DistilBertModel"
+ }
+ }
+ }
+ },
+ "info.vit.donut": {
+ "*": {
+ "repo": "naver-clova-ix/donut-base",
+ "pkg": {
+ "0": {
+ "transformers": "DonutSwinModel"
+ }
+ }
+ }
+ },
+ "info.art.dpr-ctx-encoder-single-nq": {
+ "*": {
+ "repo": "facebook/dpr-ctx_encoder-single-nq-base",
+ "pkg": {
+ "0": {
+ "transformers": "DPRQuestionEncoder"
+ }
+ }
+ }
+ },
+ "info.detr.dpt": {
+ "*": {
+ "repo": "Intel/dpt-large",
+ "pkg": {
+ "0": {
+ "transformers": "DPTModel"
+ }
+ }
+ }
+ },
+ "info.vit.edgetam1-hiera": {
+ "*": {
+ "repo": "facebook/edgetam.1-hiera-tiny",
+ "pkg": {
+ "0": {
+ "transformers": "EdgeTamModel"
+ }
+ }
+ }
+ },
+ "info.vit.edgetam": {
+ "*": {
+ "repo": "facebook/EdgeTAM",
+ "pkg": {
+ "0": {
+ "transformers": "EdgeTamVideoModel"
+ }
+ }
+ }
+ },
+ "info.art.electra-discriminator": {
+ "*": {
+ "repo": "google/electra-small-discriminator",
+ "pkg": {
+ "0": {
+ "transformers": "ElectraModel"
+ }
+ }
+ }
+ },
+ "info.gan.encodec": {
+ "*": {
+ "repo": "facebook/encodec_24khz",
+ "pkg": {
+ "0": {
+ "transformers": "EncodecModel"
+ }
+ }
+ }
+ },
+ "info.art.ernie-3-zh": {
+ "*": {
+ "repo": "nghuyong/ernie-3.0-base-zh",
+ "pkg": {
+ "0": {
+ "transformers": "ErnieModel"
+ }
+ }
+ }
+ },
+ "info.vit.ernie-4-vl-a-pt": {
+ "*": {
+ "repo": "baidu/ERNIE-4.5-VL-28B-A3B-PT",
+ "pkg": {
+ "0": {
+ "transformers": "Ernie4_5_VL_MoeModel"
+ }
+ }
+ }
+ },
+ "info.aet.esm": {
+ "*": {
+ "repo": "facebook/esm-1b",
+ "pkg": {
+ "0": {
+ "transformers": "EsmModel"
+ }
+ }
+ }
+ },
+ "info.ssm.falcon-mamba": {
+ "*": {
+ "repo": "tiiuae/falcon-mamba-7b",
+ "pkg": {
+ "0": {
+ "transformers": "FalconMambaModel"
+ }
+ }
+ }
+ },
+ "info.vit.fastvlm": {
+ "*": {
+ "repo": "KamilaMila/FastVLM-7B",
+ "pkg": {
+ "0": {
+ "transformers": "FastVlmModel"
+ }
+ }
+ }
+ },
+ "info.aet.fastspeech2-conformer": {
+ "*": {
+ "repo": "espnet/fastspeech2_conformer",
+ "pkg": {
+ "0": {
+ "transformers": "FastSpeech2ConformerModel"
+ }
+ }
+ }
+ },
+ "info.art.flaubert-uncased": {
+ "*": {
+ "repo": "flaubert/flaubert_base_uncased",
+ "pkg": {
+ "0": {
+ "transformers": "FlaubertModel"
+ }
+ }
+ }
+ },
+ "info.vit.florence-2": {
+ "*": {
+ "repo": "florence-community/Florence-2-base",
+ "pkg": {
+ "0": {
+ "transformers": "Florence2Model"
+ }
+ }
+ }
+ },
+ "info.art.fnet": {
+ "*": {
+ "repo": "google/fnet-base",
+ "pkg": {
+ "0": {
+ "transformers": "FNetModel"
+ }
+ }
+ }
+ },
+ "info.vit.focalnet": {
+ "*": {
+ "repo": "microsoft/focalnet-tiny",
+ "pkg": {
+ "0": {
+ "transformers": "FocalNetModel"
+ }
+ }
+ }
+ },
+ "info.stst.wmt19-en-ru": {
+ "*": {
+ "repo": "facebook/wmt19-en-ru",
+ "pkg": {
+ "0": {
+ "transformers": "FSMTModel"
+ }
+ }
+ }
+ },
+ "info.aet.funnel": {
+ "*": {
+ "repo": "funnel-transformer/small",
+ "pkg": {
+ "0": {
+ "transformers": "FunnelModel"
+ }
+ }
+ }
+ },
+ "info.vit.git": {
+ "*": {
+ "repo": "microsoft/git-base",
+ "pkg": {
+ "0": {
+ "transformers": "GitModel"
+ }
+ }
+ }
+ },
+ "info.vit.glm-4v-thinking": {
+ "*": {
+ "repo": "zai-org/GLM-4.1V-9B-Thinking",
+ "pkg": {
+ "0": {
+ "transformers": "Glm46VModel"
+ }
+ }
+ }
+ },
+ "info.vit.glm-4v": {
+ "*": {
+ "repo": "zai-org/GLM-4.5V",
+ "pkg": {
+ "0": {
+ "transformers": "Glm4vMoeModel"
+ }
+ }
+ }
+ },
+ "info.stst.glm-asr-nano-2512": {
+ "*": {
+ "repo": "zai-org/GLM-ASR-Nano-2512",
+ "pkg": {
+ "0": {
+ "transformers": "GlmAsrForConditionalGeneration"
+ }
+ }
+ }
+ },
+ "info.vit.glpn-kitti": {
+ "*": {
+ "repo": "vinvino02/glpn-kitti",
+ "pkg": {
+ "0": {
+ "transformers": "GLPNModel"
+ }
+ }
+ }
+ },
+ "info.art.gpt2": {
+ "*": {
+ "repo": "openai-community/gpt2",
+ "pkg": {
+ "0": {
+ "transformers": "GPT2Model"
+ }
+ }
+ }
+ },
+ "info.art.gpt-bigcode-santacoder": {
+ "*": {
+ "repo": "bigcode/gpt_bigcode-santacoder",
+ "pkg": {
+ "0": {
+ "transformers": "GPTBigCodeModel"
+ }
+ }
+ }
+ },
+ "info.art.gpt-neo": {
+ "*": {
+ "repo": "EleutherAI/gpt-neo-1.3B",
+ "pkg": {
+ "0": {
+ "transformers": "GPTNeoModel"
+ }
+ }
+ }
+ },
+ "info.art.gpt-j": {
+ "*": {
+ "repo": "EleutherAI/gpt-j-6B",
+ "pkg": {
+ "0": {
+ "transformers": "GPTJModel"
+ }
+ }
+ }
+ },
+ "info.vit.llava-v1-mistral-hf": {
+ "*": {
+ "repo": "llava-hf/llava-v1.6-mistral-7b-hf",
+ "pkg": {
+ "0": {
+ "transformers": "LlavaNextModel"
+ }
+ }
+ }
+ },
+ "info.detr.grounding-dino": {
+ "*": {
+ "repo": "IDEA-Research/grounding-dino-tiny",
+ "pkg": {
+ "0": {
+ "transformers": "GroundingDinoModel"
+ }
+ }
+ }
+ },
+ "info.vit.groupvit-gcc-yfcc": {
+ "*": {
+ "repo": "nvidia/groupvit-gcc-yfcc",
+ "pkg": {
+ "0": {
+ "transformers": "GroupViTModel"
+ }
+ }
+ }
+ },
+ "info.vit.dfine-x-coco": {
+ "*": {
+ "repo": "ustc-community/dfine_x_coco",
+ "pkg": {
+ "0": {
+ "transformers": "HGNetV2Backbone"
+ }
+ }
+ }
+ },
+ "info.vit.hiera-224": {
+ "*": {
+ "repo": "facebook/hiera-base-224-hf",
+ "pkg": {
+ "0": {
+ "transformers": "HieraModel"
+ }
+ }
+ }
+ },
+ "info.aet.hubert-ls960": {
+ "*": {
+ "repo": "facebook/hubert-base-ls960",
+ "pkg": {
+ "0": {
+ "transformers": "HubertModel"
+ }
+ }
+ }
+ },
+ "info.art.ibert-roberta": {
+ "*": {
+ "repo": "kssteven/ibert-roberta-base",
+ "pkg": {
+ "0": {
+ "transformers": "IBertModel"
+ }
+ }
+ }
+ },
+ "info.vit.idefics": {
+ "*": {
+ "repo": "HuggingFaceM4/idefics-9b",
+ "pkg": {
+ "0": {
+ "transformers": "IdeficsModel"
+ }
+ }
+ }
+ },
+ "info.vit.idefics2": {
+ "*": {
+ "repo": "HuggingFaceM4/idefics2-8b",
+ "pkg": {
+ "0": {
+ "transformers": "Idefics2Model"
+ }
+ }
+ }
+ },
+ "info.vit.idefics3-llama3": {
+ "*": {
+ "repo": "HuggingFaceM4/Idefics3-8B-Llama3",
+ "pkg": {
+ "0": {
+ "transformers": "Idefics3Model"
+ }
+ }
+ }
+ },
+ "info.vit.siglip-patch16-224": {
+ "*": {
+ "repo": "google/siglip-base-patch16-224",
+ "pkg": {
+ "0": {
+ "transformers": "Idefics3VisionTransformer"
+ }
+ }
+ }
+ },
+ "info.vit.ijepa-vith14": {
+ "*": {
+ "repo": "facebook/ijepa_vith14_1k",
+ "pkg": {
+ "0": {
+ "transformers": "IJepaModel"
+ }
+ }
+ }
+ },
+ "info.art.imagegpt": {
+ "*": {
+ "repo": "openai/imagegpt-small",
+ "pkg": {
+ "0": {
+ "transformers": "ImageGPTModel"
+ }
+ }
+ }
+ },
+ "info.vit.blip-flan-t5": {
+ "*": {
+ "repo": "Salesforce/instructblip-flan-t5-xl",
+ "pkg": {
+ "0": {
+ "transformers": "InstructBlipModel"
+ }
+ }
+ }
+ },
+ "info.vit.internvl3-hf": {
+ "*": {
+ "repo": "OpenGVLab/InternVL3-1B-hf",
+ "pkg": {
+ "0": {
+ "transformers": "InternVLModel"
+ }
+ }
+ }
+ },
+ "info.ssm.jamba-v0": {
+ "*": {
+ "repo": "ai21labs/Jamba-v0.1",
+ "pkg": {
+ "0": {
+ "transformers": "JambaModel"
+ }
+ }
+ }
+ },
+ "info.vit.janus": {
+ "*": {
+ "repo": "deepseek-community/Janus-Pro-1B",
+ "pkg": {
+ "0": {
+ "transformers": "JanusModel"
+ }
+ }
+ }
+ },
+ "info.vit.kosmos-2-patch14-224": {
+ "*": {
+ "repo": "microsoft/kosmos-2-patch14-224",
+ "pkg": {
+ "0": {
+ "transformers": "Kosmos2Model"
+ }
+ }
+ }
+ },
+ "info.vit.kosmos-2": {
+ "*": {
+ "repo": "microsoft/kosmos-2.5",
+ "pkg": {
+ "0": {
+ "transformers": "Kosmos2_5Model"
+ }
+ }
+ }
+ },
+ "info.aet.todo": {
+ "*": {
+ "repo": "TODO/TODO",
+ "pkg": {
+ "0": {
+ "transformers": "LasrForCTC"
+ }
+ }
+ }
+ },
+ "info.stst.todo": {
+ "*": {
+ "repo": "TODO/TODO",
+ "pkg": {
+ "0": {
+ "transformers": "LasrEncoder"
+ }
+ }
+ }
+ },
+ "info.art.layoutlm-uncased": {
+ "*": {
+ "repo": "microsoft/layoutlm-base-uncased",
+ "pkg": {
+ "0": {
+ "transformers": "LayoutLMModel"
+ }
+ }
+ }
+ },
+ "info.art.layoutlmv2-uncased": {
+ "*": {
+ "repo": "microsoft/layoutlmv2-base-uncased",
+ "pkg": {
+ "0": {
+ "transformers": "LayoutLMv2Model"
+ }
+ }
+ }
+ },
+ "info.vit.layoutlmv3": {
+ "*": {
+ "repo": "microsoft/layoutlmv3-base",
+ "pkg": {
+ "0": {
+ "transformers": "LayoutLMv3Model"
+ }
+ }
+ }
+ },
+ "info.stst.led-16384": {
+ "*": {
+ "repo": "allenai/led-base-16384",
+ "pkg": {
+ "0": {
+ "transformers": "LEDModel"
+ }
+ }
+ }
+ },
+ "info.gan.levit-128s": {
+ "*": {
+ "repo": "facebook/levit-128S",
+ "pkg": {
+ "0": {
+ "transformers": "LevitModel"
+ }
+ }
+ }
+ },
+ "info.stst.lfm2-a": {
+ "*": {
+ "repo": "LiquidAI/LFM2-8B-A1B",
+ "pkg": {
+ "0": {
+ "transformers": "Lfm2MoeModel"
+ }
+ }
+ }
+ },
+ "info.vit.lfm2-vl": {
+ "*": {
+ "repo": "LiquidAI/LFM2-VL-1.6B",
+ "pkg": {
+ "0": {
+ "transformers": "Lfm2VlModel"
+ }
+ }
+ }
+ },
+ "info.aet.lightglue-superpoint": {
+ "*": {
+ "repo": "ETH-CVG/lightglue_superpoint",
+ "pkg": {
+ "0": {
+ "transformers": "LightGlueForKeypointMatching"
+ }
+ }
+ }
+ },
+ "info.art.lilt-roberta-en": {
+ "*": {
+ "repo": "SCUT-DLVCLab/lilt-roberta-en-base",
+ "pkg": {
+ "0": {
+ "transformers": "LiltModel"
+ }
+ }
+ }
+ },
+ "info.vit.llama-4-scout-16e": {
+ "*": {
+ "repo": "meta-llama/Llama-4-Scout-17B-16E",
+ "pkg": {
+ "0": {
+ "transformers": "Llama4ForConditionalGeneration"
+ }
+ }
+ }
+ },
+ "info.moe.llama-4-scout-16e": {
+ "*": {
+ "repo": "meta-llama/Llama-4-Scout-17B-16E",
+ "pkg": {
+ "0": {
+ "transformers": "Llama4TextModel"
+ }
+ }
+ }
+ },
+ "info.vit.llava": {
+ "*": {
+ "repo": "llava-hf/llava-9b",
+ "pkg": {
+ "0": {
+ "transformers": "LlavaModel"
+ }
+ }
+ }
+ },
+ "info.vit.llava-next-video-hf": {
+ "*": {
+ "repo": "llava-hf/LLaVA-NeXT-Video-7B-hf",
+ "pkg": {
+ "0": {
+ "transformers": "LlavaNextVideoModel"
+ }
+ }
+ }
+ },
+ "info.vit.llava-onevision-qwen2-ov-hf": {
+ "*": {
+ "repo": "llava-hf/llava-onevision-qwen2-7b-ov-hf",
+ "pkg": {
+ "0": {
+ "transformers": "LlavaOnevisionModel"
+ }
+ }
+ }
+ },
+ "info.stst.long-t5-local": {
+ "*": {
+ "repo": "google/long-t5-local-base",
+ "pkg": {
+ "0": {
+ "transformers": "LongT5Model"
+ }
+ }
+ }
+ },
+ "info.art.luke": {
+ "*": {
+ "repo": "studio-ousia/luke-base",
+ "pkg": {
+ "0": {
+ "transformers": "LukeModel"
+ }
+ }
+ }
+ },
+ "info.art.lxmert-uncased": {
+ "*": {
+ "repo": "unc-nlp/lxmert-base-uncased",
+ "pkg": {
+ "0": {
+ "transformers": "LxmertModel"
+ }
+ }
+ }
+ },
+ "info.stst.m": {
+ "*": {
+ "repo": "facebook/m2m100_418M",
+ "pkg": {
+ "0": {
+ "transformers": "M2M100Model"
+ }
+ }
+ }
+ },
+ "info.ssm.mamba": {
+ "*": {
+ "repo": "state-spaces/mamba-2.8b",
+ "pkg": {
+ "0": {
+ "transformers": "MambaModel"
+ }
+ }
+ }
+ },
+ "info.ssm.mamba2": {
+ "*": {
+ "repo": "AntonV/mamba2-2.7b-hf",
+ "pkg": {
+ "0": {
+ "transformers": "Mamba2Model"
+ }
+ }
+ }
+ },
+ "info.stst.opus-mt-en-de": {
+ "*": {
+ "repo": "Helsinki-NLP/opus-mt-en-de",
+ "pkg": {
+ "0": {
+ "transformers": "MarianModel"
+ }
+ }
+ }
+ },
+ "info.art.markuplm": {
+ "*": {
+ "repo": "microsoft/markuplm-base",
+ "pkg": {
+ "0": {
+ "transformers": "MarkupLMModel"
+ }
+ }
+ }
+ },
+ "info.vit.swin-patch4-window7-224": {
+ "*": {
+ "repo": "microsoft/swin-tiny-patch4-window7-224",
+ "pkg": {
+ "0": {
+ "transformers": "MaskFormerSwinModel"
+ }
+ }
+ }
+ },
+ "info.stst.mbart-cc25": {
+ "*": {
+ "repo": "facebook/mbart-large-cc25",
+ "pkg": {
+ "0": {
+ "transformers": "MBartModel"
+ }
+ }
+ }
+ },
+ "info.art.megatron-bert-uncased": {
+ "*": {
+ "repo": "nvidia/megatron-bert-uncased-345m",
+ "pkg": {
+ "0": {
+ "transformers": "MegatronBertModel"
+ }
+ }
+ }
+ },
+ "info.vit.metaclip-2-worldwide-huge-quickgelu": {
+ "*": {
+ "repo": "facebook/metaclip-2-worldwide-huge-quickgelu",
+ "pkg": {
+ "0": {
+ "transformers": "MetaClip2Model"
+ }
+ }
+ }
+ },
+ "info.vit.mgp-str": {
+ "*": {
+ "repo": "alibaba-damo/mgp-str-base",
+ "pkg": {
+ "0": {
+ "transformers": "MgpstrForSceneTextRecognition"
+ }
+ }
+ }
+ },
+ "info.vit.mistral-3-2503": {
+ "*": {
+ "repo": "mistralai/Mistral-Small-3.1-24B-Instruct-2503",
+ "pkg": {
+ "0": {
+ "transformers": "Mistral3Model"
+ }
+ }
+ }
+ },
+ "info.vit.mlcd-vit-bigg-patch14-336": {
+ "*": {
+ "repo": "DeepGlint-AI/mlcd-vit-bigG-patch14-336",
+ "pkg": {
+ "0": {
+ "transformers": "MLCDVisionModel"
+ }
+ }
+ }
+ },
+ "info.vit.llama-3-vision": {
+ "*": {
+ "repo": "meta-llama/Llama-3.2-11B-Vision",
+ "pkg": {
+ "0": {
+ "transformers": "MllamaModel"
+ }
+ }
+ }
+ },
+ "info.detr.mm-grounding-dino-o365v1-goldg-v3det": {
+ "*": {
+ "repo": "openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det",
+ "pkg": {
+ "0": {
+ "transformers": "MMGroundingDinoModel"
+ }
+ }
+ }
+ },
+ "info.art.mobilebert-uncased": {
+ "*": {
+ "repo": "google/mobilebert-uncased",
+ "pkg": {
+ "0": {
+ "transformers": "MobileBertModel"
+ }
+ }
+ }
+ },
+ "info.vit.mobilenet-v1-1--224": {
+ "*": {
+ "repo": "google/mobilenet_v1_1.0_224",
+ "pkg": {
+ "0": {
+ "transformers": "MobileNetV1Model"
+ }
+ }
+ }
+ },
+ "info.vit.mobilenet-v2-1--224": {
+ "*": {
+ "repo": "google/mobilenet_v2_1.0_224",
+ "pkg": {
+ "0": {
+ "transformers": "MobileNetV2Model"
+ }
+ }
+ }
+ },
+ "info.vit.mobilevit": {
+ "*": {
+ "repo": "apple/mobilevit-small",
+ "pkg": {
+ "0": {
+ "transformers": "MobileViTModel"
+ }
+ }
+ }
+ },
+ "info.vit.mobilevitv2-1": {
+ "*": {
+ "repo": "apple/mobilevitv2-1.0-imagenet1k-256",
+ "pkg": {
+ "0": {
+ "transformers": "MobileViTV2Model"
+ }
+ }
+ }
+ },
+ "info.aet.modernbert": {
+ "*": {
+ "repo": "answerdotai/ModernBERT-base",
+ "pkg": {
+ "0": {
+ "transformers": "ModernBertModel"
+ }
+ }
+ }
+ },
+ "info.art.mpnet": {
+ "*": {
+ "repo": "microsoft/mpnet-base",
+ "pkg": {
+ "0": {
+ "transformers": "MPNetModel"
+ }
+ }
+ }
+ },
+ "info.art.mpt": {
+ "*": {
+ "repo": "mosaicml/mpt-7b",
+ "pkg": {
+ "0": {
+ "transformers": "MptModel"
+ }
+ }
+ }
+ },
+ "info.art.mra-512-4": {
+ "*": {
+ "repo": "uw-madison/mra-base-512-4",
+ "pkg": {
+ "0": {
+ "transformers": "MraModel"
+ }
+ }
+ }
+ },
+ "info.stst.mt5": {
+ "*": {
+ "repo": "google/mt5-small",
+ "pkg": {
+ "0": {
+ "transformers": "MT5Model"
+ }
+ }
+ }
+ },
+ "info.art.musicgen": {
+ "*": {
+ "repo": "facebook/musicgen-small",
+ "pkg": {
+ "0": {
+ "transformers": "MusicgenModel"
+ }
+ }
+ }
+ },
+ "info.art.musicgen-melody": {
+ "*": {
+ "repo": "facebook/musicgen-melody",
+ "pkg": {
+ "0": {
+ "transformers": "MusicgenMelodyModel"
+ }
+ }
+ }
+ },
+ "info.stst.mvp": {
+ "*": {
+ "repo": "RUCAIBox/mvp",
+ "pkg": {
+ "0": {
+ "transformers": "MvpModel"
+ }
+ }
+ }
+ },
+ "info.stst.nllb-moe": {
+ "*": {
+ "repo": "facebook/nllb-moe-54b",
+ "pkg": {
+ "0": {
+ "transformers": "NllbMoeModel"
+ }
+ }
+ }
+ },
+ "info.art.nystromformer-512": {
+ "*": {
+ "repo": "uw-madison/nystromformer-512",
+ "pkg": {
+ "0": {
+ "transformers": "NystromformerModel"
+ }
+ }
+ }
+ },
+ "info.detr.omdet-turbo-swin-hf": {
+ "*": {
+ "repo": "omlab/omdet-turbo-swin-tiny-hf",
+ "pkg": {
+ "0": {
+ "transformers": "OmDetTurboForObjectDetection"
+ }
+ }
+ }
+ },
+ "info.art.openai-gpt": {
+ "*": {
+ "repo": "openai-community/openai-gpt",
+ "pkg": {
+ "0": {
+ "transformers": "OpenAIGPTModel"
+ }
+ }
+ }
+ },
+ "info.art.opt": {
+ "*": {
+ "repo": "facebook/opt-350m",
+ "pkg": {
+ "0": {
+ "transformers": "OPTModel"
+ }
+ }
+ }
+ },
+ "info.vit.ovis2-hf": {
+ "*": {
+ "repo": "thisisiron/Ovis2-1B-hf",
+ "pkg": {
+ "0": {
+ "transformers": "Ovis2Model"
+ }
+ }
+ }
+ },
+ "info.vit.owlv2-patch16": {
+ "*": {
+ "repo": "google/owlv2-base-patch16",
+ "pkg": {
+ "0": {
+ "transformers": "Owlv2Model"
+ }
+ }
+ }
+ },
+ "info.vit.owlvit-patch32": {
+ "*": {
+ "repo": "google/owlvit-base-patch32",
+ "pkg": {
+ "0": {
+ "transformers": "OwlViTModel"
+ }
+ }
+ }
+ },
+ "info.vit.paligemma": {
+ "*": {
+ "repo": "google/paligemma2-3b-mix-224",
+ "pkg": {
+ "0": {
+ "transformers": "PaliGemmaModel"
+ }
+ }
+ }
+ },
+ "info.aet.parakeet-ctc-b": {
+ "*": {
+ "repo": "nvidia/parakeet-ctc-1.1b",
+ "pkg": {
+ "0": {
+ "transformers": "ParakeetForCTC"
+ }
+ }
+ }
+ },
+ "info.stst.parakeet-ctc-b": {
+ "*": {
+ "repo": "nvidia/parakeet-ctc-1.1b",
+ "pkg": {
+ "0": {
+ "transformers": "ParakeetEncoder"
+ }
+ }
+ }
+ },
+ "info.stst.pe-av": {
+ "*": {
+ "repo": "facebook/pe-av-large",
+ "pkg": {
+ "0": {
+ "transformers": "PeAudioModel"
+ }
+ }
+ }
+ },
+ "info.aet.pe-av": {
+ "*": {
+ "repo": "facebook/pe-av-large",
+ "pkg": {
+ "0": {
+ "transformers": "PeAudioVideoModel"
+ }
+ }
+ }
+ },
+ "info.stst.pegasus": {
+ "*": {
+ "repo": "google/pegasus-large",
+ "pkg": {
+ "0": {
+ "transformers": "PegasusModel"
+ }
+ }
+ }
+ },
+ "info.stst.pegasus-x": {
+ "*": {
+ "repo": "google/pegasus-x-large",
+ "pkg": {
+ "0": {
+ "transformers": "PegasusXModel"
+ }
+ }
+ }
+ },
+ "info.vit.language-perceiver": {
+ "*": {
+ "repo": "deepmind/language-perceiver",
+ "pkg": {
+ "0": {
+ "transformers": "PerceiverModel"
+ }
+ }
+ }
+ },
+ "info.vit.perception-lm": {
+ "*": {
+ "repo": "facebook/Perception-LM-1B",
+ "pkg": {
+ "0": {
+ "transformers": "PerceptionLMModel"
+ }
+ }
+ }
+ },
+ "info.vit.pixio-huge": {
+ "*": {
+ "repo": "facebook/pixio-huge",
+ "pkg": {
+ "0": {
+ "transformers": "PixioModel"
+ }
+ }
+ }
+ },
+ "info.stst.plbart": {
+ "*": {
+ "repo": "uclanlp/plbart-base",
+ "pkg": {
+ "0": {
+ "transformers": "PLBartModel"
+ }
+ }
+ }
+ },
+ "info.vit.poolformer-s12": {
+ "*": {
+ "repo": "sail/poolformer_s12",
+ "pkg": {
+ "0": {
+ "transformers": "PoolFormerModel"
+ }
+ }
+ }
+ },
+ "info.vit.pvt-v2-b0": {
+ "*": {
+ "repo": "OpenGVLab/pvt_v2_b0",
+ "pkg": {
+ "0": {
+ "transformers": "PvtV2Model"
+ }
+ }
+ }
+ },
+ "info.vit.qwen2-vl": {
+ "*": {
+ "repo": "Qwen/Qwen2-VL-7B-Instruct",
+ "pkg": {
+ "0": {
+ "transformers": "Qwen2_5_VLModel"
+ }
+ }
+ }
+ },
+ "info.aet.qwen2-audio": {
+ "*": {
+ "repo": "Qwen/Qwen2-Audio-7B",
+ "pkg": {
+ "0": {
+ "transformers": "Qwen2AudioEncoder"
+ }
+ }
+ }
+ },
+ "info.vit.qwen3-vl": {
+ "*": {
+ "repo": "Qwen/Qwen3-VL-4B-Instruct",
+ "pkg": {
+ "0": {
+ "transformers": "Qwen3VLModel"
+ }
+ }
+ }
+ },
+ "info.vit.qwen3-vl-a": {
+ "*": {
+ "repo": "Qwen/Qwen3-VL-30B-A3B-Instruct",
+ "pkg": {
+ "0": {
+ "transformers": "Qwen3VLMoeModel"
+ }
+ }
+ }
+ },
+ "info.art.reformer-crime-and-punishment": {
+ "*": {
+ "repo": "google/reformer-crime-and-punishment",
+ "pkg": {
+ "0": {
+ "transformers": "ReformerModel"
+ }
+ }
+ }
+ },
+ "info.vit.regnet-y-040": {
+ "*": {
+ "repo": "facebook/regnet-y-040",
+ "pkg": {
+ "0": {
+ "transformers": "RegNetModel"
+ }
+ }
+ }
+ },
+ "info.art.rembert": {
+ "*": {
+ "repo": "google/rembert",
+ "pkg": {
+ "0": {
+ "transformers": "RemBertModel"
+ }
+ }
+ }
+ },
+ "info.vit.resnet-50": {
+ "*": {
+ "repo": "microsoft/resnet-50",
+ "pkg": {
+ "0": {
+ "transformers": "ResNetModel"
+ }
+ }
+ }
+ },
+ "info.art.roberta": {
+ "*": {
+ "repo": "FacebookAI/roberta-base",
+ "pkg": {
+ "0": {
+ "transformers": "RobertaModel"
+ }
+ }
+ }
+ },
+ "info.art.efficient-mlm-m0-0": {
+ "*": {
+ "repo": "andreasmadsen/efficient_mlm_m0.40",
+ "pkg": {
+ "0": {
+ "transformers": "RobertaPreLayerNormModel"
+ }
+ }
+ }
+ },
+ "info.art.roc-bert-zh": {
+ "*": {
+ "repo": "weiweishi/roc-bert-base-zh",
+ "pkg": {
+ "0": {
+ "transformers": "RoCBertModel"
+ }
+ }
+ }
+ },
+ "info.art.roformer-chinese": {
+ "*": {
+ "repo": "junnyu/roformer_chinese_base",
+ "pkg": {
+ "0": {
+ "transformers": "RoFormerModel"
+ }
+ }
+ }
+ },
+ "info.detr.rtdetr-r50vd": {
+ "*": {
+ "repo": "PekingU/rtdetr_r50vd",
+ "pkg": {
+ "0": {
+ "transformers": "RTDetrModel"
+ }
+ }
+ }
+ },
+ "info.detr.rtdetr-r18vd": {
+ "*": {
+ "repo": "PekingU/rtdetr_r18vd",
+ "pkg": {
+ "0": {
+ "transformers": "RTDetrV2Model"
+ }
+ }
+ }
+ },
+ "info.rnn.rwkv-4-pile": {
+ "*": {
+ "repo": "RWKV/rwkv-4-169m-pile",
+ "pkg": {
+ "0": {
+ "transformers": "RwkvModel"
+ }
+ }
+ }
+ },
+ "info.vit.sam-vit-huge": {
+ "*": {
+ "repo": "facebook/sam-vit-huge",
+ "pkg": {
+ "0": {
+ "transformers": "SamModel"
+ }
+ }
+ }
+ },
+ "info.vit.sam2-hiera": {
+ "*": {
+ "repo": "facebook/sam2.1-hiera-tiny",
+ "pkg": {
+ "0": {
+ "transformers": "Sam2Model"
+ }
+ }
+ }
+ },
+ "info.vit.sam3": {
+ "*": {
+ "repo": "facebook/sam3",
+ "pkg": {
+ "0": {
+ "transformers": "Sam3Model"
+ }
+ }
+ }
+ },
+ "info.vit.sam3-tracker1-hiera": {
+ "*": {
+ "repo": "facebook/sam3_tracker.1-hiera-tiny",
+ "pkg": {
+ "0": {
+ "transformers": "Sam3TrackerModel"
+ }
+ }
+ }
+ },
+ "info.stst.sam3": {
+ "*": {
+ "repo": "facebook/sam3",
+ "pkg": {
+ "0": {
+ "transformers": "Sam3VideoModel"
+ }
+ }
+ }
+ },
+ "info.vit.sam-hq-vit-h": {
+ "*": {
+ "repo": "sushmanth/sam_hq_vit_h",
+ "pkg": {
+ "0": {
+ "transformers": "SamHQModel"
+ }
+ }
+ }
+ },
+ "info.vit.sam-hq-vit-huge": {
+ "*": {
+ "repo": "syscv-community/sam-hq-vit-huge",
+ "pkg": {
+ "0": {
+ "transformers": "SamHQVisionModel"
+ }
+ }
+ }
+ },
+ "info.aet.hf-seamless-m4t": {
+ "*": {
+ "repo": "facebook/hf-seamless-m4t-medium",
+ "pkg": {
+ "0": {
+ "transformers": "SeamlessM4TModel"
+ }
+ }
+ }
+ },
+ "info.stst.seamless-m4t-v2": {
+ "*": {
+ "repo": "facebook/seamless-m4t-v2-large",
+ "pkg": {
+ "0": {
+ "transformers": "SeamlessM4Tv2Model"
+ }
+ }
+ }
+ },
+ "info.vit.segformer-b0-finetuned-ade-512-512": {
+ "*": {
+ "repo": "nvidia/segformer-b0-finetuned-ade-512-512",
+ "pkg": {
+ "0": {
+ "transformers": "SegformerModel"
+ }
+ }
+ }
+ },
+ "info.vit.seggpt-vit": {
+ "*": {
+ "repo": "BAAI/seggpt-vit-large",
+ "pkg": {
+ "0": {
+ "transformers": "SegGptModel"
+ }
+ }
+ }
+ },
+ "info.aet.sew": {
+ "*": {
+ "repo": "asapp/sew-tiny-100k",
+ "pkg": {
+ "0": {
+ "transformers": "SEWModel"
+ }
+ }
+ }
+ },
+ "info.aet.sew-d": {
+ "*": {
+ "repo": "asapp/sew-d-tiny-100k",
+ "pkg": {
+ "0": {
+ "transformers": "SEWDModel"
+ }
+ }
+ }
+ },
+ "info.vit.siglip2-patch16-224": {
+ "*": {
+ "repo": "google/siglip2-base-patch16-224",
+ "pkg": {
+ "0": {
+ "transformers": "Siglip2Model"
+ }
+ }
+ }
+ },
+ "info.vit.siglip2-patch16-naflex": {
+ "*": {
+ "repo": "google/siglip2-base-patch16-naflex",
+ "pkg": {
+ "0": {
+ "transformers": "Siglip2VisionModel"
+ }
+ }
+ }
+ },
+ "info.vit.smolvlm": {
+ "*": {
+ "repo": "HuggingFaceTB/SmolVLM2-2.2B-Instruct",
+ "pkg": {
+ "0": {
+ "transformers": "SmolVLMModel"
+ }
+ }
+ }
+ },
+ "info.vit.siglip-so-patch14-384": {
+ "*": {
+ "repo": "google/siglip-so400m-patch14-384",
+ "pkg": {
+ "0": {
+ "transformers": "SmolVLMVisionTransformer"
+ }
+ }
+ }
+ },
+ "info.aet.s2t-librispeech-asr": {
+ "*": {
+ "repo": "facebook/s2t-small-librispeech-asr",
+ "pkg": {
+ "0": {
+ "transformers": "Speech2TextModel"
+ }
+ }
+ }
+ },
+ "info.stst.speecht5-asr": {
+ "*": {
+ "repo": "microsoft/speecht5_asr",
+ "pkg": {
+ "0": {
+ "transformers": "SpeechT5Model"
+ }
+ }
+ }
+ },
+ "info.art.splinter": {
+ "*": {
+ "repo": "tau/splinter-base",
+ "pkg": {
+ "0": {
+ "transformers": "SplinterModel"
+ }
+ }
+ }
+ },
+ "info.art.squeezebert-uncased": {
+ "*": {
+ "repo": "squeezebert/squeezebert-uncased",
+ "pkg": {
+ "0": {
+ "transformers": "SqueezeBertModel"
+ }
+ }
+ }
+ },
+ "info.vit.swiftformer-xs": {
+ "*": {
+ "repo": "MBZUAI/swiftformer-xs",
+ "pkg": {
+ "0": {
+ "transformers": "SwiftFormerModel"
+ }
+ }
+ }
+ },
+ "info.vit.swin2sr-classicalsr-x2-64": {
+ "*": {
+ "repo": "caidas/swin2sr-classicalsr-x2-64",
+ "pkg": {
+ "0": {
+ "transformers": "Swin2SRModel"
+ }
+ }
+ }
+ },
+ "info.vit.swinv2-patch4-window8-256": {
+ "*": {
+ "repo": "microsoft/swinv2-tiny-patch4-window8-256",
+ "pkg": {
+ "0": {
+ "transformers": "Swinv2Model"
+ }
+ }
+ }
+ },
+ "info.stst.switch-8": {
+ "*": {
+ "repo": "google/switch-base-8",
+ "pkg": {
+ "0": {
+ "transformers": "SwitchTransformersModel"
+ }
+ }
+ }
+ },
+ "info.stst.t5": {
+ "*": {
+ "repo": "google-t5/t5-small",
+ "pkg": {
+ "0": {
+ "transformers": "T5Model"
+ }
+ }
+ }
+ },
+ "info.detr.table-transformer-detection": {
+ "*": {
+ "repo": "microsoft/table-transformer-detection",
+ "pkg": {
+ "0": {
+ "transformers": "TableTransformerModel"
+ }
+ }
+ }
+ },
+ "info.art.tapas-finetuned-sqa": {
+ "*": {
+ "repo": "google/tapas-base-finetuned-sqa",
+ "pkg": {
+ "0": {
+ "transformers": "TapasModel"
+ }
+ }
+ }
+ },
+ "info.vit.textnet": {
+ "*": {
+ "repo": "czczup/textnet-base",
+ "pkg": {
+ "0": {
+ "transformers": "TextNetModel"
+ }
+ }
+ }
+ },
+ "info.vit.timesformer-finetuned-k600": {
+ "*": {
+ "repo": "facebook/timesformer-base-finetuned-k600",
+ "pkg": {
+ "0": {
+ "transformers": "TimesformerModel"
+ }
+ }
+ }
+ },
+ "info.detr.tvp": {
+ "*": {
+ "repo": "Intel/tvp-base",
+ "pkg": {
+ "0": {
+ "transformers": "TvpModel"
+ }
+ }
+ }
+ },
+ "info.vit.udop": {
+ "*": {
+ "repo": "microsoft/udop-large",
+ "pkg": {
+ "0": {
+ "transformers": "UdopModel"
+ }
+ }
+ }
+ },
+ "info.stst.umt5": {
+ "*": {
+ "repo": "google/umt5-small",
+ "pkg": {
+ "0": {
+ "transformers": "UMT5Model"
+ }
+ }
+ }
+ },
+ "info.aet.unispeech-1500h-cv": {
+ "*": {
+ "repo": "microsoft/unispeech-large-1500h-cv",
+ "pkg": {
+ "0": {
+ "transformers": "UniSpeechModel"
+ }
+ }
+ }
+ },
+ "info.aet.unispeech-sat-100h-libri-ft": {
+ "*": {
+ "repo": "microsoft/unispeech-sat-base-100h-libri-ft",
+ "pkg": {
+ "0": {
+ "transformers": "UniSpeechSatModel"
+ }
+ }
+ }
+ },
+ "info.gan.univnet-dev": {
+ "*": {
+ "repo": "dg845/univnet-dev",
+ "pkg": {
+ "0": {
+ "transformers": "UnivNetModel"
+ }
+ }
+ }
+ },
+ "info.vit.videollama3-image-hf": {
+ "*": {
+ "repo": "lkhl/VideoLLaMA3-2B-Image-HF",
+ "pkg": {
+ "0": {
+ "transformers": "VideoLlama3Model"
+ }
+ }
+ }
+ },
+ "info.vit.video-llava-hf": {
+ "*": {
+ "repo": "LanguageBind/Video-LLaVA-7B-hf",
+ "pkg": {
+ "0": {
+ "transformers": "VideoLlavaModel"
+ }
+ }
+ }
+ },
+ "info.vit.videomae": {
+ "*": {
+ "repo": "MCG-NJU/videomae-base",
+ "pkg": {
+ "0": {
+ "transformers": "VideoMAEModel"
+ }
+ }
+ }
+ },
+ "info.vit.vilt-b32-mlm": {
+ "*": {
+ "repo": "dandelin/vilt-b32-mlm",
+ "pkg": {
+ "0": {
+ "transformers": "ViltModel"
+ }
+ }
+ }
+ },
+ "info.vit.vip-llava-hf": {
+ "*": {
+ "repo": "ybelkada/vip-llava-7b-hf",
+ "pkg": {
+ "0": {
+ "transformers": "VipLlavaModel"
+ }
+ }
+ }
+ },
+ "info.vit.japanese-clip-vit-h-14-bert-wider": {
+ "*": {
+ "repo": "hakuhodo-tech/japanese-clip-vit-h-14-bert-wider",
+ "pkg": {
+ "0": {
+ "transformers": "VisionTextDualEncoderModel"
+ }
+ }
+ }
+ },
+ "info.art.visualbert-vqa-coco-pre": {
+ "*": {
+ "repo": "uclanlp/visualbert-vqa-coco-pre",
+ "pkg": {
+ "0": {
+ "transformers": "VisualBertModel"
}
}
- },
- "bert-for-seq-generation-l-24-bbc-encoder": {
+ }
+ },
+ "info.vit.vit-patch16-224": {
+ "*": {
+ "repo": "google/vit-base-patch16-224",
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "13": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "14": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "15": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "16": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "17": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "18": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "19": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "20": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "21": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "22": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "ViTModel"
}
}
- },
- "grounding-dino": {
+ }
+ },
+ "info.vit.vit-mae": {
+ "*": {
+ "repo": "facebook/vit-mae-base",
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "ViTMAEModel"
}
}
- },
- "xlm-roberta-xl": {
+ }
+ },
+ "info.vit.vit-msn": {
+ "*": {
+ "repo": "facebook/vit-msn-base",
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "13": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "14": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "15": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "16": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "17": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "18": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "ViTMSNModel"
}
}
}
},
- "info.aet.funnel": {
+ "info.vit.vitdet-patch16-224": {
"*": {
- "repo": "funnel-transformer/small",
+ "repo": "google/vitdet-base-patch16-224",
"pkg": {
"0": {
- "transformers": "FunnelModel"
+ "transformers": "VitDetModel"
}
}
}
},
- "info.stst.nllb-moe": {
+ "info.art.mms-tts-eng": {
"*": {
- "repo": "facebook/nllb-moe-54b",
+ "repo": "facebook/mms-tts-eng",
"pkg": {
"0": {
- "transformers": "NllbMoeModel"
+ "transformers": "VitsModel"
}
}
}
},
- "info.art.deberta-v2-x": {
+ "info.vit.vivit16x2-kinetics400": {
"*": {
- "repo": "microsoft/deberta-v2-xlarge",
+ "repo": "google/vivit-b-16x2-kinetics400",
"pkg": {
"0": {
- "transformers": "DebertaV2Model"
+ "transformers": "VivitModel"
}
}
}
},
- "info.art.xlm-roberta": {
+ "info.vit.vjepa2-vitl-fpc64-256": {
"*": {
- "repo": "FacebookAI/xlm-roberta-base",
+ "repo": "facebook/vjepa2-vitl-fpc64-256",
"pkg": {
"0": {
- "transformers": "XLMRobertaModel"
+ "transformers": "VJEPA2Model"
}
}
}
},
- "info.art.gpt2": {
+ "info.stst.voxtral-2507": {
"*": {
- "repo": "openai-community/gpt2",
+ "repo": "mistralai/Voxtral-Mini-3B-2507",
"pkg": {
"0": {
- "transformers": "GPT2Model"
+ "transformers": "VoxtralForConditionalGeneration"
}
}
}
},
- "info.art.megatron-bert-uncased": {
+ "info.aet.voxtral-2507": {
"*": {
- "repo": "nvidia/megatron-bert-uncased-345m",
+ "repo": "mistralai/Voxtral-Mini-3B-2507",
"pkg": {
"0": {
- "transformers": "MegatronBertModel"
+ "transformers": "VoxtralEncoder"
}
}
}
},
- "info.stst.blenderbot": {
+ "info.aet.wav2vec2-960h": {
"*": {
- "repo": "facebook/blenderbot_small-90M",
+ "repo": "facebook/wav2vec2-base-960h",
"pkg": {
"0": {
- "transformers": "BlenderbotSmallModel"
+ "transformers": "Wav2Vec2Model"
}
}
}
},
- "info.detr.omdet-turbo-swin-hf": {
+ "info.aet.wav2vec2-bert-rel-pos": {
"*": {
- "repo": "omlab/omdet-turbo-swin-tiny-hf",
+ "repo": "facebook/w2v-bert-2.0",
"pkg": {
"0": {
- "transformers": "OmDetTurboForObjectDetection"
+ "transformers": "Wav2Vec2BertModel"
}
}
}
},
- "info.vit.ast-finetuned-audioset-10-10-0593": {
+ "info.aet.wav2vec2-conformer-rel-pos": {
"*": {
- "repo": "MIT/ast-finetuned-audioset-10-10-0.4593",
+ "repo": "facebook/wav2vec2-conformer-rel-pos-large",
"pkg": {
"0": {
- "transformers": "ASTModel"
+ "transformers": "Wav2Vec2ConformerModel"
}
}
}
},
- "info.vit.mgp-str": {
+ "info.aet.wavlm": {
"*": {
- "repo": "alibaba-damo/mgp-str-base",
+ "repo": "microsoft/wavlm-base",
"pkg": {
"0": {
- "transformers": "MgpstrForSceneTextRecognition"
+ "transformers": "WavLMModel"
}
}
}
},
- "info.vit.blip2-opt": {
+ "info.aet.whisper": {
"*": {
- "repo": "Salesforce/blip2-opt-2.7b",
+ "repo": "openai/whisper-tiny",
"pkg": {
"0": {
- "transformers": "Blip2Model"
+ "transformers": "WhisperModel"
}
}
}
},
- "info.art.efficient-mlm-m0-0": {
+ "info.vit.xclip-patch32": {
"*": {
- "repo": "andreasmadsen/efficient_mlm_m0.40",
+ "repo": "microsoft/xclip-base-patch32",
"pkg": {
"0": {
- "transformers": "RobertaPreLayerNormModel"
+ "transformers": "XCLIPModel"
}
}
}
},
- "info.aet.wav2vec2-conformer-rel-pos": {
+ "info.art.xglm": {
"*": {
- "repo": "facebook/wav2vec2-conformer-rel-pos-large",
+ "repo": "facebook/xglm-564M",
"pkg": {
"0": {
- "transformers": "Wav2Vec2ConformerModel"
+ "transformers": "XGLMModel"
}
}
}
},
- "info.aet.unispeech-sat-100h-libri-ft": {
+ "info.art.xlm-mlm-en-2048": {
"*": {
- "repo": "microsoft/unispeech-sat-base-100h-libri-ft",
+ "repo": "FacebookAI/xlm-mlm-en-2048",
"pkg": {
"0": {
- "transformers": "UniSpeechSatModel"
+ "transformers": "XLMModel"
}
}
}
},
- "info.detr.table-transformer-detection": {
+ "info.art.xlm-roberta": {
"*": {
- "repo": "microsoft/table-transformer-detection",
+ "repo": "FacebookAI/xlm-roberta-base",
"pkg": {
"0": {
- "transformers": "TableTransformerModel"
+ "transformers": "XLMRobertaModel"
}
}
}
},
- "info.detr.dab-detr": {
+ "info.art.xlm-roberta-xl": {
"*": {
- "repo": "IDEA-Research/dab-detr-resnet-50",
+ "repo": "facebook/xlm-roberta-xl",
"pkg": {
"0": {
- "transformers": "DabDetrModel"
+ "transformers": "XLMRobertaXLModel"
}
}
}
},
- "info.aet.wav2vec2-bert-rel-pos": {
+ "info.art.xlnet-cased": {
"*": {
- "repo": "facebook/w2v-bert-2.0",
+ "repo": "xlnet/xlnet-large-cased",
"pkg": {
"0": {
- "transformers": "Wav2Vec2BertModel"
+ "transformers": "XLNetModel"
}
}
}
},
- "info.detr.mm-grounding-dino-o365v1-goldg-v3det": {
+ "info.lstm.xlstm": {
"*": {
- "repo": "openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det",
+ "repo": "NX-AI/xLSTM-7b",
"pkg": {
"0": {
- "transformers": "MMGroundingDinoModel"
+ "transformers": "xLSTMModel"
}
}
}
},
- "info.art.bert-for-seq-generation-l-24-bbc-encoder": {
+ "info.art.xmod": {
"*": {
- "repo": "google/bert_for_seq_generation_L-24_bbc_encoder",
+ "repo": "facebook/xmod-base",
"pkg": {
"0": {
- "transformers": "BertGenerationEncoder"
+ "transformers": "XmodModel"
}
}
}
},
- "info.detr.grounding-dino": {
+ "info.cnn.yolos": {
"*": {
- "repo": "IDEA-Research/grounding-dino-tiny",
+ "repo": "hustvl/yolos-base",
"pkg": {
"0": {
- "transformers": "GroundingDinoModel"
+ "transformers": "YolosModel"
}
}
}
},
- "info.art.xlm-roberta-xl": {
+ "info.art.yoso-4096": {
"*": {
- "repo": "facebook/xlm-roberta-xl",
+ "repo": "uw-madison/yoso-4096",
"pkg": {
"0": {
- "transformers": "XLMRobertaXLModel"
+ "transformers": "YosoModel"
}
}
}
},
- "info.aet.sew-d": {
+ "info.ssm.zamba-v1": {
"*": {
- "repo": "asapp/sew-d-tiny-100k",
+ "repo": "Zyphra/Zamba-7B-v1",
"pkg": {
"0": {
- "transformers": "SEWDModel"
+ "transformers": "ZambaModel"
}
}
}
@@ -4389,6 +6846,111 @@
]
}
},
+ "info.dit.flux1-dev": {
+ "mystic": {
+ "repo": "enhanceaiteam/Mystic",
+ "pkg": {
+ "0": {
+ "generation": {
+ "num_inference_steps": 16,
+ "guidance_scale": 7.5,
+ "width": 768,
+ "height": 1024
+ }
+ }
+ },
+ "file_256": [
+ "179d4000e44295f6dfadc0e4ac210146454724d46371b82657200ff9fb5c68a9",
+ "48ca85274e3b67f07f70dd84b67725e62395c2f7b188394342716f783ea4c6ac"
+ ],
+ "layer_256": [
+ "3942e6a52dbb0abaf63b031d9c4eda0df47576b51d4c81361978a3dc27b1309e"
+ ],
+ "layer_b3": [
+ "91074aaebe1b5f3b2e7755d3c092af7eb240e92a192360690f1033949d3c8a68"
+ ]
+ },
+ "flux1-lite": {
+ "repo": "freepik/flux.1-lite-8b",
+ "pkg": {
+ "0": {
+ "generation": {
+ "num_inference_steps": 28
+ }
+ }
+ },
+ "file_256": [
+ "09e970a7b8d1813ea7cacd48f9a944fd223882b137a8f4f3b61d864cdc20bbec",
+ "de90e69945c2f4afcb9b6a057ce48190905c984370fce76b16ba3b97d46e2747"
+ ],
+ "layer_256": [
+ "e1afe2f9b1ca55b3c659293cf3237f6b5571f5c4e826bad025ff0f7b54dc34ee"
+ ],
+ "layer_b3": [
+ "9276fa4805efeb45c08cca32c5b51d490e57a2ce5c15ef476a8e468a509c5cdf"
+ ]
+ },
+ "f-lite": {
+ "repo": "freepik/f-lite",
+ "pkg": {
+ "0": {
+ "f_lite": "FLitePipeline",
+ "generation": {
+ "num_inference_steps": 28
+ }
+ }
+ }
+ },
+ "f-lite-texture": {
+ "repo": "freepik/f-lite-texture",
+ "pkg": {
+ "0": {
+ "f_lite": "FLitePipeline",
+ "generation": {
+ "num_inference_steps": 28
+ }
+ }
+ }
+ },
+ "flux": {
+ "repo": "TencentARC/flux-mini",
+ "file_256": [
+ "4236455adeaeb4ed444d63b253ec99805022d17e962ed7261ada9c72ce11cfee"
+ ],
+ "layer_256": [
+ "e4a0d8cf2034da094518ab058da1d4aea14e00d132c6152a266ec196ffef02d0"
+ ],
+ "layer_b3": [
+ "c1a6f83585398fe452d20596a79a522e2986f4c2c01a40e7bfd787af113735d3"
+ ]
+ },
+ "flex2": {
+ "repo": "ostris/Flex.2-preview",
+ "file_256": [
+ "0407108e446a4f57efffc5e7518bc374876af970d3c6068dc4074de0d221c615",
+ "df168ba94d5f96c478b24604a6beedff6189047152190509c73c162ea0d8ec02"
+ ],
+ "layer_256": [
+ "5063de856be5365807d12b47ef6919b4ac611a72651739b2b4050e113bed7a83"
+ ],
+ "layer_b3": [
+ "7f85cdc186896da6965b57d5edb672f08663075d2b207f0e20e328c4034a8076"
+ ]
+ },
+ "flex1-alpha": {
+ "repo": "ostris/Flex.1-alpha",
+ "file_256": [
+ "5d6dce30a266ccbf530c3a3bf253cd5486720a8fb71cdeed556c28304201dc2f",
+ "7acf8771b80a91eaa21566abe8c7d9d3ba33d8688e6e98446827749aee7ca1ee"
+ ],
+ "layer_256": [
+ "a6b9af6efc25fa77cd24046b81ee66fea09a9987d2a8e56ffca9b7a1c9c9c519"
+ ],
+ "layer_b3": [
+ "cb3d3edafd81651eefd62894b3572deb02c5304f4b5d4f7ab8654f1fb922ecd6"
+ ]
+ }
+ },
"info.dit.wan2-flf2v-720p": {
"diffusers": {
"repo": "Wan-AI/Wan2.1-FLF2V-14B-720P-Diffusers",
diff --git a/mir/spec/template.json b/mir/spec/template.json
index 1381e19..8479db5 100644
--- a/mir/spec/template.json
+++ b/mir/spec/template.json
@@ -80,6 +80,7 @@
"act_dropout",
"max_source_positions",
"classifier_pooling",
+ "ctc_loss_reduction",
"audio_video_config",
"video_config"
],
diff --git a/mir/tag.py b/mir/tag.py
index 7b272fe..fc95b7a 100644
--- a/mir/tag.py
+++ b/mir/tag.py
@@ -94,9 +94,12 @@ def tag_base_model(repo_path: str, class_name: str, addendum: dict | None = None
:param class_name: The HF transformers class for the model
:return: A segmented MIR tag useful for appending index entries"""
- from mir.inspect.classes import extract_init_params
+ from mir.config.constants import extract_init_params
- annotations = extract_init_params(class_name.replace("Model", "Config"), "transformers") # remove default annotations from python
+ annotations = extract_init_params(class_name.replace("Model", "Config"), "transformers")
+ if not annotations:
+ class_name = class_name.replace("Config", "Model")
+ annotations = extract_init_params(class_name, "transformers")
if not annotations:
raise TypeError("No mode type returned")
mir_prefix = mir_prefix_from_forward_pass(True, **annotations)
From fb8eebe9b6a8ea979b81cf093de00631a0458acf Mon Sep 17 00:00:00 2001
From: exdysa <91800957+exdysa@users.noreply.github.com>
Date: Sun, 11 Jan 2026 23:06:29 -0500
Subject: [PATCH 04/16] ~mir gen complete
---
mir.json | 10880 +++++++++++++++++++++++++++------
mir/config/constants.py | 31 +-
mir/indexers.py | 11 +-
mir/inspect/metadata.py | 2 +-
mir/inspect/pipes.py | 4 +-
mir/mir.json | 8600 ++++++++++++++++++++++++--
mir/spec/missing_params.json | 13 +-
mir/spec/template.json | 9 +-
mir/tag.py | 6 +-
9 files changed, 17019 insertions(+), 2537 deletions(-)
diff --git a/mir.json b/mir.json
index c73a611..c897555 100644
--- a/mir.json
+++ b/mir.json
@@ -49,6 +49,16 @@
}
}
},
+ "info.controlnet.stable-diffusion-xl-1": {
+ "*": {
+ "repo": "stabilityai/stable-diffusion-xl-base-1.0",
+ "pkg": {
+ "0": {
+ "diffusers": "StableDiffusionXLControlNetUnionInpaintPipeline"
+ }
+ }
+ }
+ },
"info.controlnet.controlnet-union-sdxl-1": {
"*": {
"repo": "xinsir/controlnet-union-sdxl-1.0",
@@ -99,6 +109,105 @@
}
}
},
+ "info.unet.marigold-depth-v1-1": {
+ "*": {
+ "repo": "prs-eth/marigold-depth-v1-1",
+ "pkg": {
+ "0": {
+ "diffusers": "MarigoldDepthPipeline"
+ }
+ },
+ "pipe_names": {
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "scheduler": [
+ [
+ "ops.scheduler.ddim",
+ "scheduler"
+ ],
+ [
+ "ops.scheduler.lcm",
+ "scheduler"
+ ]
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "marigold-depth-v1-1"
+ ]
+ }
+ }
+ },
+ "info.unet.marigold-iid-appearance-v1-1": {
+ "*": {
+ "repo": "prs-eth/marigold-iid-appearance-v1-1",
+ "pkg": {
+ "0": {
+ "diffusers": "MarigoldIntrinsicsPipeline"
+ }
+ },
+ "pipe_names": {
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "scheduler": [
+ [
+ "ops.scheduler.ddim",
+ "scheduler"
+ ],
+ [
+ "ops.scheduler.lcm",
+ "scheduler"
+ ]
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "marigold-iid-appearance-v1-1"
+ ]
+ }
+ }
+ },
+ "info.unet.marigold-normals-v1-1": {
+ "*": {
+ "repo": "prs-eth/marigold-normals-v1-1",
+ "pkg": {
+ "0": {
+ "diffusers": "MarigoldNormalsPipeline"
+ }
+ },
+ "pipe_names": {
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "scheduler": [
+ [
+ "ops.scheduler.ddim",
+ "scheduler"
+ ],
+ [
+ "ops.scheduler.lcm",
+ "scheduler"
+ ]
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "marigold-normals-v1-1"
+ ]
+ }
+ }
+ },
"info.unet.stable-diffusion-v1-5": {
"*": {
"repo": "stable-diffusion-v1-5/stable-diffusion-v1-5",
@@ -107,6 +216,32 @@
"diffusers": "StableDiffusionPipeline"
}
},
+ "identifiers": [
+ "up_blocks.3.attentions.0.transformer_blocks.0.norm3.weight"
+ ],
+ "file_256": [
+ "6ce0161689b3853acaa03779ec93eafe75a02f4ced659bee03f50797806fa2fa",
+ "1a189f0be69d6106a48548e7626207dddd7042a418dbf372cefd05e0cdba61b6",
+ "e1441589a6f3c5a53f5f54d0975a18a7feb7cdf0b0dee276dfc3331ae376a053",
+ "cc6cb27103417325ff94f52b7a5d2dde45a7515b25c255d8e396c90014281516",
+ "19da7aaa4b880e59d56843f1fcb4dd9b599c28a1d9d9af7c1143057c8ffae9f1",
+ "cd1b6db09a81cb1d39fbd245a89c1e3db9da9fe8eba5e8f9098ea6c4994221d3",
+ "c83908253f9a64d08c25fc90874c9c8aef9a329ce1ca5fb909d73b0c83d1ea21"
+ ],
+ "layer_b3": [
+ "909c6ff3192ab2767e789a6125865bc23163db467ab78b1c633bad46a4293fad",
+ "b52807536902cabbf84f99e4fa2f8713fb4ef77e739f06367ee0d486e3222faa",
+ "d31382d71a1044b636d80d861a2b4dbca51826bed34d34b5c14608b7679ccefd",
+ "5fd8b28013b7e5a64c7c235f0a93d93e48bc19a0e5dde7b646a87b429219643a",
+ "731f552f29edcb4f86112cc94d296377f3533a9633ccf83e202d9e1785d94a00",
+ "2d2f97574a161cf01a6f6d476b141c7be06f940d94b695ffc12c4e74eca2de1c"
+ ],
+ "layer_256": [
+ "ece771354ad470a82d56eda413ae3dd6c00d2de28ab3c56a88201d08d4424b4b",
+ "65b084dada803461ab9ca9be9b892d211870a121dd6c555a111eea470b951c54",
+ "dc937b59892604f5a86ac96936cd7ff09e25f18ae6b758e8014a24c7fa039e91",
+ "92565dec90f7c8412dc872e820f66cd0c56263bbbc392439645b6fee270f41bb"
+ ],
"tasks": [
"StableDiffusion3ControlNetInpaintingPipeline",
"StableDiffusion3ControlNetPipeline",
@@ -146,7 +281,7 @@
"AutoencoderKL"
],
"text_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"tokenizer": [
@@ -161,10 +296,11 @@
"StableDiffusionSafetyChecker"
],
"feature_extractor": [
- "CLIPImageProcessor"
+ "info.dit.flux1-schnell",
+ "*"
],
"image_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
]
}
@@ -218,7 +354,7 @@
"stable-unclip-2-1-l"
],
"prior_text_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"prior": [
@@ -229,7 +365,8 @@
"schedulers"
],
"image_normalizer": [
- "StableUnCLIPImageNormalizer"
+ "info.dit.flux1-schnell",
+ "*"
],
"image_noising_scheduler": [
"ops.scheduler.karrasdiffusion",
@@ -240,7 +377,7 @@
"stable-unclip-2-1-l"
],
"text_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"scheduler": [
@@ -297,14 +434,16 @@
],
"pipe_names": {
"feature_extractor": [
- "CLIPImageProcessor"
+ "info.dit.flux1-schnell",
+ "*"
],
"image_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"image_normalizer": [
- "StableUnCLIPImageNormalizer"
+ "info.dit.flux1-schnell",
+ "*"
],
"image_noising_scheduler": [
"ops.scheduler.karrasdiffusion",
@@ -315,7 +454,7 @@
"stable-diffusion-2-1-unclip"
],
"text_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"scheduler": [
@@ -333,59 +472,42 @@
"repo": "stabilityai/stable-diffusion-xl-base-1.0",
"pkg": {
"0": {
- "diffusers": "StableDiffusionXLPipeline"
+ "precision": "ops.precision.float.F16",
+ "generation": {
+ "denoising_end": 0.8,
+ "num_inference_steps": 40,
+ "output_type": "latent",
+ "safety_checker": false,
+ "width": 1024,
+ "height": 1024
+ }
+ },
+ "1": {
+ "diffusers": "DiffusionPipeline"
}
},
- "tasks": [
- "StableDiffusionXLControlNetImg2ImgPipeline",
- "StableDiffusionXLControlNetInpaintPipeline",
- "StableDiffusionXLControlNetPAGImg2ImgPipeline",
- "StableDiffusionXLControlNetPAGPipeline",
- "StableDiffusionXLControlNetPipeline",
- "StableDiffusionXLControlNetUnionImg2ImgPipeline",
- "StableDiffusionXLControlNetUnionInpaintPipeline",
- "StableDiffusionXLControlNetUnionPipeline",
- "StableDiffusionXLImg2ImgPipeline",
- "StableDiffusionXLInpaintPipeline",
- "StableDiffusionXLPAGImg2ImgPipeline",
- "StableDiffusionXLPAGInpaintPipeline",
- "StableDiffusionXLPAGPipeline",
- "StableDiffusionXLPipeline"
+ "file_256": [
+ "357650fbfb3c7b4d94c1f5fd7664da819ad1ff5a839430484b4ec422d03f710a",
+ "83e012a805b84c7ca28e5646747c90a243c65c8ba4f070e2d7ddc9d74661e139",
+ "31e35c80fc4829d14f90153f4c74cd59c90b779f6afe05a74cd6120b893f7e5b",
+ "6f001c090fb13c0d0f8b0a5916da814712a94400b99471fabe77c1c4a51ecaaf"
],
- "pipe_names": {
- "vae": [
- "info.vae.eq",
- "info.vae.ms-lc-eq",
- "stable-diffusion-xl-1"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch14",
- "*"
- ],
- "text_encoder_2": [
- "info.vit.clip-vit-patch14",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "stable-diffusion-xl-1"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "stable-diffusion-xl-1"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch14",
- "*"
- ],
- "feature_extractor": [
- "CLIPImageProcessor"
- ]
- }
+ "layer_256": [
+ "62a5ab1b5fdfa4fedb32323841298c6effe1af25be94a8583350b0a7641503ef",
+ "34dff8d98898baa0f10e71943e56b588cc114253b0d2f1051f3ce7a8a45fee0b",
+ "56b1ccd89b0d6ab658048aa34d659788b6ed663f13ef566f4b11bccef590b9da"
+ ],
+ "layer_b3": [
+ "8be44fa13c1efa60f8bcadaa57f1d718473f9660f03c4f0e65dc037960d8cba1",
+ "c9ab95ed1851418b65ef99651c1eb6bbdd2e3b0715e0e435d6d1e56ce310fac3",
+ "adfa260098d87616d748e3cf9c10bb2c90ff8890a84abbb2853d4aa69664070b"
+ ],
+ "identifiers": [
+ "logit_scale",
+ "conditioner.embedders.0.transformer.text_model.encoder.layers.0.self_attn.k_proj.weight",
+ "add_embedding.linear_2.bias"
+ ],
+ "pipe_names": {}
},
"pony-diffusion": {
"file_256": [
@@ -543,8 +665,33 @@
"pkg": {
"0": {
"diffusers": "StableDiffusionXLImg2ImgPipeline"
+ },
+ "1": {
+ "diffusers": "DiffusionPipeline",
+ "generation": {
+ "num_inference_steps": 40,
+ "denoising_end": 0.8
+ }
}
},
+ "identifiers": [
+ "conditioner.embedders.0.model.transformer.resblocks.9.mlp.c_proj.bias"
+ ],
+ "file_256": [
+ "54f9cd2f2daf3aeec0b2708fa3dbc0e84e4f8ddd1ddead42e5bc60c6572c989f",
+ "7440042bbdc8a24813002c09b6b69b64dc90fded4472613437b7f55f9b7d9c5f",
+ "3ea0376dcf065eaefd27806394a90e310001b1a71d4f1cf1f655e86c0e566ffe"
+ ],
+ "layer_b3": [
+ "6281355dbb37e5769c9460ae0ac75506d89932e2f97b09d9ade32ecf191e75ba",
+ "afb0639aae2eb65577c12d4a30cf7c9b3620ae63ba64a8fa632b58608c8a7a2e",
+ "669046014b69d98ab0f6fbb59547644436e0275f8b638f467ce2a873c3313683"
+ ],
+ "layer_256": [
+ "bb9eadbfabb52c0d8645783525a3fa70b59e9d7d09d5290d742a303262e793a2",
+ "c5adb56fe51343af2c3d493eb9f41515c204bd91eb9f40b983d45f70a1fa3b6d",
+ "1f838e39ed6e916258aee6990b72c09b34aa8eb3b5342234a497b8852b3df1c6"
+ ],
"tasks": [
"StableDiffusionXLControlNetImg2ImgPipeline",
"StableDiffusionXLControlNetInpaintPipeline",
@@ -566,11 +713,11 @@
"AutoencoderKL"
],
"text_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"text_encoder_2": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"tokenizer": [
@@ -586,11 +733,12 @@
"schedulers"
],
"image_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"feature_extractor": [
- "CLIPImageProcessor"
+ "info.dit.flux1-schnell",
+ "*"
]
}
}
@@ -624,11 +772,11 @@
"AutoencoderKL"
],
"text_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"text_encoder_2": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"tokenizer": [
@@ -651,29 +799,23 @@
"repo": "rhymes-ai/Allegro",
"pkg": {
"0": {
- "diffusers": "AllegroPipeline"
+ "precision": "ops.precision.bfloat.B16",
+ "generation": {
+ "guidance_scale": 7.5,
+ "max_sequence_length": 512,
+ "num_inference_steps": 100
+ }
}
},
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "allegro"
- ],
- "text_encoder": [
- "T5EncoderModel"
- ],
- "vae": [
- "info.vae.kl",
- "allegro"
- ],
- "transformer": [
- "AllegroTransformer3DModel"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ]
- }
+ "file_256": [
+ "6927dcc812841c1da549bf11c97ddf30532aee0e708a6642fa64cf8e0dfcdef7"
+ ],
+ "layer_b3": [
+ "8b20714a6af89ea4bf4ada1f805c5b9d529ef136c229e9b75392242d62d80c3e"
+ ],
+ "layer_256": [
+ "9e44e6c919dc71c24a193641e6265cd9983a2a773b9bbaf527c10ac4837b29fd"
+ ]
}
},
"info.dit.amused-512": {
@@ -693,7 +835,7 @@
"amused-512"
],
"text_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"transformer": [
@@ -716,6 +858,16 @@
}
}
},
+ "info.lora.animatelcm": {
+ "*": {
+ "repo": "wangfuyun/AnimateLCM",
+ "pkg": {
+ "0": {
+ "diffusers": "MotionAdapter"
+ }
+ }
+ }
+ },
"info.lora.animatediff-motion-adapter-sdxl": {
"*": {
"repo": "a-r-r-o-w/animatediff-motion-adapter-sdxl-beta",
@@ -772,18 +924,20 @@
"AutoencoderKL"
],
"text_encoder": [
- "T5EncoderModel"
+ "info.stst.t5",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
"bria-3"
],
"image_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"feature_extractor": [
- "CLIPImageProcessor"
+ "info.dit.flux1-schnell",
+ "*"
]
}
}
@@ -806,7 +960,8 @@
"audioldm-s-v2"
],
"text_encoder": [
- "Mistral3ForConditionalGeneration"
+ "info.vit.mistral-3-2503",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
@@ -823,70 +978,69 @@
"repo": "black-forest-labs/FLUX.1-schnell",
"pkg": {
"0": {
- "diffusers": "FluxInpaintPipeline"
+ "precision": "ops.precision.bfloat.B16",
+ "generation": {
+ "height": 1024,
+ "width": 1024,
+ "guidance_scale": 0.0,
+ "num_inference_steps": 4,
+ "max_sequence_length": 256
+ }
+ },
+ "1": {
+ "mflux": "flux.flux.Flux1",
+ "generation": {
+ "height": 1024,
+ "width": 1024,
+ "num_inference_steps": 4
+ }
}
},
+ "identifiers": [
+ "double_blocks.12.txt_mod.lin.weight",
+ "add_q_proj.weight",
+ "single_transformer_blocks.9.norm.linear.weight"
+ ],
+ "file_256": [
+ "9403429e0052277ac2a87ad800adece5481eecefd9ed334e1f348723621d2a0a",
+ "9b633dbe87316385c5b1c262bd4b5a01e3d955170661d63dcec8a01e89c0d820"
+ ],
+ "layer_b3": [
+ "c65ba812ce3ce056eb1585673f62fb896afe6ec049faaf00a97bc35c9a398c44",
+ "03049273329fc7db2da10de6d3eb27cb03f190e379c0556cc97b3f0f29001d0c",
+ "483c4be8ef031c56bc8450d1a3cfbe54445ed317bcd801be5abe89f1d3c48790"
+ ],
+ "layer_256": [
+ "79c07e339865fe9e22c80f723d728c778130acd07a330339c68218b92bb7b3b8",
+ "ef5c9cd1ebe6e3be5e8b1347eca0a6f0b138986c71220a7f1c2c14f29d01beed",
+ "27bc71eca2d2ff7459165acc12010230911db7709a4f6a5c255befedfa6b1649"
+ ],
"tasks": [
- "FluxControlImg2ImgPipeline",
- "FluxControlInpaintPipeline",
- "FluxControlNetImg2ImgPipeline",
- "FluxControlNetInpaintPipeline",
- "FluxControlNetPipeline",
- "FluxControlPipeline",
- "FluxImg2ImgPipeline",
- "FluxInpaintPipeline",
- "FluxKontextPipeline",
- "FluxPipeline"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch14",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "flux1-schnell"
- ],
- "text_encoder_2": [
- "T5EncoderModel"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "flux1-schnell"
- ],
- "transformer": [
- "FluxTransformer2DModel"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch14",
- "*"
- ],
- "feature_extractor": [
- "CLIPImageProcessor"
- ]
- }
- },
- "shuttle-3-aesthetic": {
- "repo": "shuttleai/shuttle-3.1-aesthetic",
- "pkg": {
- "2": {
- "diffusers": "DiffusionPipeline",
- "generation": {
- "guidance_scale": 3.5,
- "num_inference_steps": 4
- }
- }
- },
- "file_256": [
- "176871da1d5d2d511a52ae9b0dd70faa1f5d1b7734b7e33ed6b4bffa52050e0d",
- "4b80d37681eaed07b7f5b3825a392da929d1620933ede7c2749ef3613cc53f42"
+ "Image",
+ "Redux",
+ "Kontext",
+ "Depth",
+ "Fill",
+ "ConceptAttention",
+ "ControlNet",
+ "CavTon",
+ "IC-Edit"
+ ]
+ },
+ "shuttle-3-aesthetic": {
+ "repo": "shuttleai/shuttle-3.1-aesthetic",
+ "pkg": {
+ "2": {
+ "diffusers": "DiffusionPipeline",
+ "generation": {
+ "guidance_scale": 3.5,
+ "num_inference_steps": 4
+ }
+ }
+ },
+ "file_256": [
+ "176871da1d5d2d511a52ae9b0dd70faa1f5d1b7734b7e33ed6b4bffa52050e0d",
+ "4b80d37681eaed07b7f5b3825a392da929d1620933ede7c2749ef3613cc53f42"
],
"layer_256": [
"e5d95de314cbfc49b79479118a1ac0b90fc95ccd6bb1a5c95803996d6cebf8fe",
@@ -956,6 +1110,16 @@
}
}
},
+ "info.controlnet.flux1-depth-dev": {
+ "*": {
+ "repo": "black-forest-labs/FLUX.1-Depth-dev",
+ "pkg": {
+ "0": {
+ "diffusers": "FluxControlInpaintPipeline"
+ }
+ }
+ }
+ },
"info.controlnet.flux1-dev-controlnet-canny": {
"*": {
"repo": "InstantX/FLUX.1-dev-controlnet-canny",
@@ -1005,7 +1169,7 @@
"AutoencoderKL"
],
"text_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"tokenizer": [
@@ -1013,7 +1177,8 @@
"flux1-fill-dev"
],
"text_encoder_2": [
- "T5EncoderModel"
+ "info.stst.t5",
+ "*"
],
"tokenizer_2": [
"info.encoder.tokenizer",
@@ -1054,7 +1219,7 @@
"AutoencoderKL"
],
"text_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"tokenizer": [
@@ -1062,7 +1227,8 @@
"flux1-kontext-dev"
],
"text_encoder_2": [
- "T5EncoderModel"
+ "info.stst.t5",
+ "*"
],
"tokenizer_2": [
"info.encoder.tokenizer",
@@ -1072,172 +1238,14 @@
"FluxTransformer2DModel"
],
"image_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"feature_extractor": [
- "CLIPImageProcessor"
- ]
- }
- }
- },
- "info.dit.flux1-dev": {
- "*": {
- "repo": "black-forest-labs/FLUX.1-dev",
- "pkg": {
- "0": {
- "diffusers": "FluxPipeline"
- }
- },
- "tasks": [
- "FluxControlImg2ImgPipeline",
- "FluxControlInpaintPipeline",
- "FluxControlNetImg2ImgPipeline",
- "FluxControlNetInpaintPipeline",
- "FluxControlNetPipeline",
- "FluxControlPipeline",
- "FluxImg2ImgPipeline",
- "FluxInpaintPipeline",
- "FluxKontextPipeline",
- "FluxPipeline"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch14",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "flux1-dev"
- ],
- "text_encoder_2": [
- "T5EncoderModel"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "flux1-dev"
- ],
- "transformer": [
- "FluxTransformer2DModel"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.dit.flux1-schnell",
"*"
- ],
- "feature_extractor": [
- "CLIPImageProcessor"
]
}
- },
- "mystic": {
- "repo": "enhanceaiteam/Mystic",
- "pkg": {
- "0": {
- "generation": {
- "num_inference_steps": 16,
- "guidance_scale": 7.5,
- "width": 768,
- "height": 1024
- }
- }
- },
- "file_256": [
- "179d4000e44295f6dfadc0e4ac210146454724d46371b82657200ff9fb5c68a9",
- "48ca85274e3b67f07f70dd84b67725e62395c2f7b188394342716f783ea4c6ac"
- ],
- "layer_256": [
- "3942e6a52dbb0abaf63b031d9c4eda0df47576b51d4c81361978a3dc27b1309e"
- ],
- "layer_b3": [
- "91074aaebe1b5f3b2e7755d3c092af7eb240e92a192360690f1033949d3c8a68"
- ]
- },
- "flux1-lite": {
- "repo": "freepik/flux.1-lite-8b",
- "pkg": {
- "0": {
- "generation": {
- "num_inference_steps": 28
- }
- }
- },
- "file_256": [
- "09e970a7b8d1813ea7cacd48f9a944fd223882b137a8f4f3b61d864cdc20bbec",
- "de90e69945c2f4afcb9b6a057ce48190905c984370fce76b16ba3b97d46e2747"
- ],
- "layer_256": [
- "e1afe2f9b1ca55b3c659293cf3237f6b5571f5c4e826bad025ff0f7b54dc34ee"
- ],
- "layer_b3": [
- "9276fa4805efeb45c08cca32c5b51d490e57a2ce5c15ef476a8e468a509c5cdf"
- ]
- },
- "f-lite": {
- "repo": "freepik/f-lite",
- "pkg": {
- "0": {
- "f_lite": "FLitePipeline",
- "generation": {
- "num_inference_steps": 28
- }
- }
- }
- },
- "f-lite-texture": {
- "repo": "freepik/f-lite-texture",
- "pkg": {
- "0": {
- "f_lite": "FLitePipeline",
- "generation": {
- "num_inference_steps": 28
- }
- }
- }
- },
- "flux": {
- "repo": "TencentARC/flux-mini",
- "file_256": [
- "4236455adeaeb4ed444d63b253ec99805022d17e962ed7261ada9c72ce11cfee"
- ],
- "layer_256": [
- "e4a0d8cf2034da094518ab058da1d4aea14e00d132c6152a266ec196ffef02d0"
- ],
- "layer_b3": [
- "c1a6f83585398fe452d20596a79a522e2986f4c2c01a40e7bfd787af113735d3"
- ]
- },
- "flex2": {
- "repo": "ostris/Flex.2-preview",
- "file_256": [
- "0407108e446a4f57efffc5e7518bc374876af970d3c6068dc4074de0d221c615",
- "df168ba94d5f96c478b24604a6beedff6189047152190509c73c162ea0d8ec02"
- ],
- "layer_256": [
- "5063de856be5365807d12b47ef6919b4ac611a72651739b2b4050e113bed7a83"
- ],
- "layer_b3": [
- "7f85cdc186896da6965b57d5edb672f08663075d2b207f0e20e328c4034a8076"
- ]
- },
- "flex1-alpha": {
- "repo": "ostris/Flex.1-alpha",
- "file_256": [
- "5d6dce30a266ccbf530c3a3bf253cd5486720a8fb71cdeed556c28304201dc2f",
- "7acf8771b80a91eaa21566abe8c7d9d3ba33d8688e6e98446827749aee7ca1ee"
- ],
- "layer_256": [
- "a6b9af6efc25fa77cd24046b81ee66fea09a9987d2a8e56ffca9b7a1c9c9c519"
- ],
- "layer_b3": [
- "cb3d3edafd81651eefd62894b3572deb02c5304f4b5d4f7ab8654f1fb922ecd6"
- ]
}
},
"info.dit.prx-512-t2i-sft": {
@@ -1257,7 +1265,8 @@
"discrete"
],
"text_encoder": [
- "T5GemmaEncoder"
+ "info.stst.t5gemma-prefixlm",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
@@ -1282,12 +1291,22 @@
"diffusers": "AudioLDMPipeline"
}
},
+ "file_256": [
+ "fc30d5b5a3bb8d08672736efb1fff10755ba7024dace39b2dcb579a105aa2a5a"
+ ],
+ "layer_b3": [
+ "82fbcc553c1ad770d28fd1866b935249c5ebfbf75f3166ae823e1bc6ef39a95a"
+ ],
+ "layer_256": [
+ "d076446a58a36bf436e37444679d62bcf2f45689d4aa3d799b3fe801c71ed2c8"
+ ],
"pipe_names": {
"vae": [
"AutoencoderKL"
],
"text_encoder": [
- "ClapTextModelWithProjection"
+ "info.vit.clap-htsat-fused",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
@@ -1298,7 +1317,8 @@
"schedulers"
],
"vocoder": [
- "SpeechT5HifiGan"
+ "info.stst.speecht5-asr",
+ "*"
]
}
}
@@ -1308,49 +1328,22 @@
"repo": "cvssp/audioldm2",
"pkg": {
"0": {
- "diffusers": "AudioLDM2Pipeline"
+ "precision": "ops.precision.float.F16",
+ "generation": {
+ "num_inference_steps": 200,
+ "audio_length_in_s": 10.0
+ }
}
},
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "ClapModel"
- ],
- "text_encoder_2": [
- "T5EncoderModel",
- "VitsModel"
- ],
- "projection_model": [
- "AudioLDM2ProjectionModel"
- ],
- "language_model": [
- "info.art.gpt2",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "audioldm2"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "audioldm2"
- ],
- "feature_extractor": [
- "ClapFeatureExtractor"
- ],
- "unet": [
- "AudioLDM2UNet2DConditionModel"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "vocoder": [
- "SpeechT5HifiGan"
- ]
- }
+ "file_256": [
+ "359a5ffb89a844beb2fcfac584aae2cd7cd6e87c3ab1ec4e892ef45d91db77c2"
+ ],
+ "layer_b3": [
+ "eac241273f9f30982fc04aa88b4dc1c38b533430956a55b9ed4d3e5c717ec962"
+ ],
+ "layer_256": [
+ "ab109d01b43788063802f00c6ecab024c830ea58d668f5c2df9e3ae5b87d86cb"
+ ]
}
},
"info.unet.blipdiffusion": {
@@ -1367,7 +1360,7 @@
"blipdiffusion"
],
"text_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"vae": [
@@ -1382,7 +1375,8 @@
"*"
],
"image_processor": [
- "BlipImageProcessor"
+ "info.dit.flux1-schnell",
+ "*"
]
}
}
@@ -1393,8 +1387,30 @@
"pkg": {
"0": {
"diffusers": "ChromaPipeline"
+ },
+ "1": {
+ "generation": {
+ "neg_text": "",
+ "num_steps": "28",
+ "latent_size": [
+ 64,
+ 64
+ ]
+ }
}
},
+ "file_256": [
+ "53adcb3b6b6005758d40e2d8058b044ed4892bc8616efb7a62cc2dd384be07de",
+ "2c41e8a9831f3be1eaff2c2ed590abb62e4534e814f7ec58a5fd74ff71dc2036",
+ "0a7b2d9699dbd22b3744ee2692900cabcfb731a43dac13729c33807f2bb7c9f6",
+ "6ddc9e2bbe3376ab5ee9f10b2d947f127b6bf6f879f06f316a2208bb0da357b8"
+ ],
+ "layer_b3": [
+ "15e227ced8a89c41abaa9cc44f84dfffdf5ead0c626035e5a2dde2bbb0935479"
+ ],
+ "layer_256": [
+ "a4daa6ff6f45ca70c738adb8c19bc3b6f228df931e6bf2a3394463e4dd7ec882"
+ ],
"tasks": [
"ChromaPipeline"
],
@@ -1407,7 +1423,8 @@
"AutoencoderKL"
],
"text_encoder": [
- "T5EncoderModel"
+ "info.stst.t5",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
@@ -1417,11 +1434,12 @@
"ChromaTransformer2DModel"
],
"image_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"feature_extractor": [
- "CLIPImageProcessor"
+ "info.dit.flux1-schnell",
+ "*"
]
}
},
@@ -1469,7 +1487,8 @@
"AutoencoderKL"
],
"text_encoder": [
- "T5EncoderModel"
+ "info.stst.t5",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
@@ -1479,11 +1498,12 @@
"ChromaTransformer2DModel"
],
"image_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"feature_extractor": [
- "CLIPImageProcessor"
+ "info.dit.flux1-schnell",
+ "*"
]
}
}
@@ -1508,7 +1528,8 @@
"AutoencoderKL"
],
"text_encoder": [
- "T5EncoderModel"
+ "info.stst.t5",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
@@ -1518,11 +1539,12 @@
"ChromaTransformer2DModel"
],
"image_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"feature_extractor": [
- "CLIPImageProcessor"
+ "info.dit.flux1-schnell",
+ "*"
]
}
}
@@ -1532,35 +1554,24 @@
"repo": "zai-org/CogVideoX-2b",
"pkg": {
"0": {
- "diffusers": "CogVideoXPipeline"
- }
+ "precision": "ops.precision.float.F16",
+ "generation": {
+ "num_videos_per_prompt": 1,
+ "num_inference_steps": 50,
+ "num_frames": 49,
+ "guidance_scale": 6
+ }
+ }
},
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "cogvideox"
- ],
- "text_encoder": [
- "T5EncoderModel"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "transformer": [
- "CogVideoXTransformer3DModel"
- ],
- "scheduler": [
- [
- "ops.scheduler.cogvideoxddim",
- "scheduler"
- ],
- [
- "ops.scheduler.cogvideoxdpm",
- "scheduler"
- ]
- ]
- }
+ "file_256": [
+ "8fbb6a5e67c70885a8ed8e33df144ac61253e45977be5035fa18cfdf77d386c7"
+ ],
+ "layer_b3": [
+ "1db3439649b5362448455fb2ed6ebde0c3b973655a206832731149757ad165bb"
+ ],
+ "layer_256": [
+ "edd6bd51f1236f528ff8d32dc754f0b86cfac901b800642ea497358156dc00bd"
+ ]
}
},
"info.controlnet.cogvideox-fun-v-pose": {
@@ -1587,7 +1598,8 @@
"cogvideox-i2v"
],
"text_encoder": [
- "T5EncoderModel"
+ "info.stst.t5",
+ "*"
],
"vae": [
"info.vae.cogvideox",
@@ -1614,36 +1626,15 @@
"repo": "zai-org/CogView3-Plus-3B",
"pkg": {
"0": {
- "diffusers": "CogView3PlusPipeline"
+ "precision": "ops.precision.float.F16",
+ "generation": {
+ "guidance_scale": 7.0,
+ "num_images_per_prompt": 1,
+ "num_inference_steps": 50,
+ "width": 1024,
+ "height": 1024
+ }
}
- },
- "tasks": [
- "CogView3PlusPipeline"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "cogview3"
- ],
- "text_encoder": [
- "T5EncoderModel"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "transformer": [
- "CogView3PlusTransformer2DModel"
- ],
- "scheduler": [
- [
- "ops.scheduler.cogvideoxddim",
- "scheduler"
- ],
- [
- "ops.scheduler.cogvideoxdpm",
- "scheduler"
- ]
- ]
}
}
},
@@ -1665,7 +1656,8 @@
"cogview4"
],
"text_encoder": [
- "GlmModel"
+ "info.stst.glm-4-chat",
+ "*"
],
"vae": [
"AutoencoderKL"
@@ -1700,7 +1692,8 @@
},
"pipe_names": {
"text_encoder": [
- "Qwen2_5_VLForConditionalGeneration"
+ "info.vit.qwen2-vl",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
@@ -1731,9 +1724,55 @@
"diffusers": "Cosmos2TextToImagePipeline"
}
},
+ "file_256": [
+ "7fbd20dae97cc26a55c7aff3024bc84e554cff8f69966c725a24c8238c5431ec",
+ "6d211f1c14cd793156da3a840dd5462ae072046fcd6f1dc64c613a5343bfe896",
+ "95a2b32ad31a271eb64d35985c7ea46f1448528af70932eb1f35d57f90c27be2",
+ "344e67faf333b7849fa94290c9028bdd5e40eb19700754c833cda0423bc10ad0",
+ "ce15ef565cbb9ef414a6f7a396c455d82d5f762d2174493da87fe009c5fee75b",
+ "94aa9f2b59330b88e97b6b439e2f206a51c86e6b154fb66d43ed149bfac23cf8",
+ "636de5388da249130d51752991a1792b90af31cbf43f021ae07f75756ee2d79a",
+ "472c5e4cf5056a1a59085addb5a86d801de39bf5e000d253f206a7f63c710029",
+ "663266ace67c22529c3b6bfa0e8bd69f0ba6e683f5f02b8e3da50881057ba142",
+ "21a674b314c1364d0dbb3712f5ed702996a7b7403c452835cac22709e01c2f77",
+ "3bf2df806c6472e039efc9e8d3181163d7faa7b385e61519b7d17d5e9c993a49",
+ "1de35e1603c4c30bc80b132ccea15fc0503369caf68290708f17e679e98cd41f",
+ "0738e559bbd71f7351ccba34b2b47362a3f829b92f3dbcffeaf1e44b0d52f42c"
+ ],
+ "layer_b3": [
+ "5a18ba14c41c6601dcc1195ca180ac7744357eb15ace39272788bda1a7151e9b",
+ "67cc3eaf7987c89cd7ccff13de6bc03e3eec59d260d44486e2367cd946ce6f20",
+ "3c6fefa107742488d2e6856714198a762f2fd35c67edd50d4657eaf4b59c7ca3",
+ "4e1f90ee1e8959d334c9b1ea2cc5e58d0b8340e271c35f81c8a5ec26e16d9d76",
+ "f8171071e828524fcc2806126ad100a2198e450c82c0864c8fe8b358c5cbbfbd",
+ "8126101a0207ecfbd741394fd59f306bcb4c492b2a921e0921c426ca7bd38985",
+ "c942c5a85ff7cb602d8ca894f5d180c2224e91f0b62c3a21f6a425f9e0e8554b",
+ "c8c500de74da879a547875fe1046f62ab18bdfd09c09eb3da723cbc2319cb4e3",
+ "c0ac3f67501004e9e9a55d1658402ad97e42bf8a266edf81f6f3bb835ee476b9",
+ "84f5926eb4e11d826815682b076ed7d3bba4c86520859be80aa1ef92c72b26a4",
+ "1d4375aab5548708559b0fde150754a2163cd211eb20a5471e17afaeeb26e082",
+ "68bd8982f59c60d69c301d16dfb5a60f5d43d66c0b60138d48a22f5ded598e7b",
+ "c3e9a10cad7aebf979072092008be6e2815d03d28cbf316c15e8daf22116bd7d"
+ ],
+ "layer_256": [
+ "38f2a75eab667c0cc85f3946a23ca6dc2278438c25a9f93aaaa9f79c3808e180",
+ "ee8434a5e9bc6fa07199de2d0c69fb87f7922c31792bafd13f527c9d92fecb0c",
+ "2f8382657babb4d0ae4f8e425ae33b21ad71deb6ba457fd6734f05208d52e06a",
+ "34b181a8291b571857cdbf67ac0081fea594a2f223bf20bd2fc8b0c889e9602d",
+ "d198c412b972e381acfb812304fa98ed0d97a2f072ddc195cd9a1eb83b1d8146",
+ "79580a13aff9859e67b0a9f4f8893236cdcfa58c3d43770641aaac8daee55a94",
+ "cfd48c7ad71c913fa8768167ed0c2ee8c207311b22b1e5a8761369b5a780e8d6",
+ "da91362ad85d4d2e80a2cb7a55e4ae0e52c9eef8b437a95894ce5ab75d36568c",
+ "15f84001f5205b6dd8c6f1334cb51c46f6171c7795fb2a557ea16b874f0c71e5",
+ "5d29179ad15a15d2561defcdda66f1d1e4d065c1e0738f9cba4db5b68b93d2ea",
+ "7ec489d1e461f5fb2af627b68034ca57f19c516aeccbc5d188b3bd27e3353a15",
+ "c8dc42fe7b411d746ebdf86286b91cd6893c5f028076b8fe4103f7ea8e1d8833",
+ "86df7c095aee01588e961438f322b85ca0100a9e440b8a2b6c724e00f748d8b5"
+ ],
"pipe_names": {
"text_encoder": [
- "T5EncoderModel"
+ "info.stst.t5",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
@@ -1766,7 +1805,8 @@
},
"pipe_names": {
"text_encoder": [
- "T5EncoderModel"
+ "info.stst.t5",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
@@ -1799,7 +1839,8 @@
},
"pipe_names": {
"text_encoder": [
- "T5EncoderModel"
+ "info.stst.t5",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
@@ -1832,7 +1873,8 @@
},
"pipe_names": {
"text_encoder": [
- "T5EncoderModel"
+ "info.stst.t5",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
@@ -1855,30 +1897,30 @@
}
}
},
- "info.unet.if-i-xl-v1": {
+ "info.unet.if-ii-l-v1": {
"*": {
- "repo": "DeepFloyd/IF-I-XL-v1.0",
+ "repo": "DeepFloyd/IF-II-L-v1.0",
"pkg": {
"0": {
- "diffusers": "IFPipeline"
+ "diffusers": "IFSuperResolutionPipeline"
}
},
- "tasks": [
- "IFImg2ImgPipeline",
- "IFInpaintingPipeline",
- "IFPipeline"
- ],
"pipe_names": {
"tokenizer": [
"info.encoder.tokenizer",
- "if-i-xl-v1"
+ "if-ii-l-v1"
],
"text_encoder": [
- "T5EncoderModel"
+ "info.stst.t5",
+ "*"
],
"scheduler": [
"ops.scheduler.ddpm",
"scheduler"
+ ],
+ "image_noising_scheduler": [
+ "ops.scheduler.ddpm",
+ "scheduler"
]
}
}
@@ -1898,7 +1940,10 @@
],
"text_encoder": [
"Qwen2VLForConditionalGeneration",
- "BertModel"
+ [
+ "info.art.bert-uncased",
+ "*"
+ ]
],
"tokenizer": [
"info.encoder.tokenizer",
@@ -1939,7 +1984,10 @@
],
"text_encoder": [
"Qwen2VLForConditionalGeneration",
- "BertModel"
+ [
+ "info.art.bert-uncased",
+ "*"
+ ]
],
"tokenizer": [
"info.encoder.tokenizer",
@@ -1963,6 +2011,15 @@
"diffusers": "HiDreamImagePipeline"
}
},
+ "file_256": [
+ "3cb3f6d77a3fce19b90fa7f66da0cbe997b0785a38a788b559290d3062f6fd26"
+ ],
+ "layer_b3": [
+ "612eb9b2676a3e7b28b10aae045a97a95de2a399fe3801c8f6369589c3a832a6"
+ ],
+ "layer_256": [
+ "78fbfb7fddb9ccbdf91f22b0c3d304cbf0cc7305dbccb216982233849ec727df"
+ ],
"pipe_names": {
"scheduler": [
"ops.scheduler.euler",
@@ -1972,7 +2029,7 @@
"AutoencoderKL"
],
"text_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"tokenizer": [
@@ -1980,7 +2037,7 @@
"hidream-i1"
],
"text_encoder_2": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"tokenizer_2": [
@@ -1988,21 +2045,24 @@
"hidream-i1"
],
"text_encoder_3": [
- "T5EncoderModel"
+ "info.stst.t5",
+ "*"
],
"tokenizer_3": [
"info.encoder.tokenizer",
"hidream-i1"
],
"text_encoder_4": [
- "LlamaForCausalLM"
+ "info.stst.llama-2-hf",
+ "*"
],
"tokenizer_4": [
"info.encoder.tokenizer",
"hidream-i1"
],
"transformer": [
- "HiDreamImageTransformer2DModel"
+ "info.dit.flux1-schnell",
+ "*"
]
}
}
@@ -2012,41 +2072,29 @@
"repo": "tencent-hunyuan/hunyuandiT-v1.2-diffusers",
"pkg": {
"0": {
- "diffusers": "HunyuanDiTPipeline"
+ "precision": "ops.precision.float.F16"
}
},
- "tasks": [
- "HunyuanDiTPipeline"
+ "identifiers": [
+ "extra_embedder",
+ "model.blocks",
+ "skip_norm.weight"
],
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "BertModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "hunyuandit-v1"
- ],
- "transformer": [
- "HunyuanDiT2DModel"
- ],
- "scheduler": [
- "ops.scheduler.ddpm",
- "scheduler"
- ],
- "safety_checker": [
- "StableDiffusionSafetyChecker"
- ],
- "feature_extractor": [
- "CLIPImageProcessor"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "hunyuandit-v1"
- ]
- }
+ "file_256": [
+ "4fb84f84079cda457d171b3c6b15d1be95b5a3e5d9825703951a99ddf92d1787",
+ "e01db5e129e8ca1117e9cf473fc5a2b096949f03ab90048aeabbc328de7ec800",
+ "8af691cadb78047d55721259355d708e87ddbba1b7845df9377d9a5ae917b45d"
+ ],
+ "layer_b3": [
+ "aead6b61b17ebc77c4c186a4b82c193f11ec267b20d909726422ee9852e2e0b2",
+ "885a056b94f6f9844c0660be489844d63bb74cc13316f441d10968fff3dd3120",
+ "390d951cbdda6e2cffb690031b60f02921624651534c2effaaa7d68ab476c700"
+ ],
+ "layer_256": [
+ "d4842ce2b7f927203326b25ff4d6738ec9a8b95327f06791c387e4a351ed6ed0",
+ "5af943f96f5dc9fecb1e92fe2b1fa17c94dd6947690201f4a5ee1a4a2721a68e",
+ "4a1f2b8234fa4336e263842e042d42e8d64d8a4d3941d9c0c78366b50303950c"
+ ]
}
},
"info.dit.hunyuanvideo": {
@@ -2057,9 +2105,19 @@
"diffusers": "HunyuanVideoPipeline"
}
},
+ "file_256": [
+ "bdb957b35585ea74ae42ca92865a68fa1bf1ebc6c5b7e686a889e5c977dc24c7"
+ ],
+ "layer_b3": [
+ "d31c56b4c9444d4c2f1b10120fe964e0956f6b8c7e7c1e4cc5a1f37406fc49f5"
+ ],
+ "layer_256": [
+ "fe741fdfd163bcb1e0ed81d80f79ac3576dbf6e6740674efadfeff782a48bed4"
+ ],
"pipe_names": {
"text_encoder": [
- "LlamaModel"
+ "info.stst.llama-2-hf",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
@@ -2077,7 +2135,7 @@
"discrete"
],
"text_encoder_2": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"tokenizer_2": [
@@ -2097,7 +2155,8 @@
},
"pipe_names": {
"text_encoder": [
- "LlavaForConditionalGeneration"
+ "info.vit.llava",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
@@ -2115,7 +2174,7 @@
"discrete"
],
"text_encoder_2": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"tokenizer_2": [
@@ -2123,7 +2182,8 @@
"hunyuanvideo-i2v"
],
"image_processor": [
- "CLIPImageProcessor"
+ "info.dit.flux1-schnell",
+ "*"
]
}
}
@@ -2138,7 +2198,8 @@
},
"pipe_names": {
"text_encoder": [
- "Qwen2_5_VLTextModel"
+ "info.vit.qwen2-vl",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
@@ -2156,7 +2217,8 @@
"discrete"
],
"text_encoder_2": [
- "T5EncoderModel"
+ "info.stst.t5",
+ "*"
],
"tokenizer_2": [
"info.encoder.tokenizer",
@@ -2178,7 +2240,8 @@
},
"pipe_names": {
"text_encoder": [
- "Qwen2_5_VLTextModel"
+ "info.vit.qwen2-vl",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
@@ -2196,7 +2259,8 @@
"discrete"
],
"text_encoder_2": [
- "T5EncoderModel"
+ "info.stst.t5",
+ "*"
],
"tokenizer_2": [
"info.encoder.tokenizer",
@@ -2209,7 +2273,8 @@
"SiglipVisionModel"
],
"feature_extractor": [
- "SiglipImageProcessor"
+ "info.dit.flux1-schnell",
+ "*"
]
}
}
@@ -2232,21 +2297,24 @@
"audioldm-s-v2"
],
"text_encoder": [
- "Qwen2_5_VLForConditionalGeneration"
+ "info.vit.qwen2-vl",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
"hunyuanimage-2"
],
"text_encoder_2": [
- "T5EncoderModel"
+ "info.stst.t5",
+ "*"
],
"tokenizer_2": [
"info.encoder.tokenizer",
"hunyuanimage-2"
],
"transformer": [
- "HunyuanImageTransformer2DModel"
+ "info.dit.flux1-schnell",
+ "*"
]
}
}
@@ -2269,14 +2337,16 @@
"audioldm-s-v2"
],
"text_encoder": [
- "Qwen2_5_VLForConditionalGeneration"
+ "info.vit.qwen2-vl",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
"hunyuanimage-2-refiner"
],
"transformer": [
- "HunyuanImageTransformer2DModel"
+ "info.dit.flux1-schnell",
+ "*"
]
}
}
@@ -2310,11 +2380,11 @@
"PriorTransformer"
],
"image_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"text_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"tokenizer": [
@@ -2326,7 +2396,8 @@
"scheduler"
],
"image_processor": [
- "CLIPImageProcessor"
+ "info.dit.flux1-schnell",
+ "*"
]
}
}
@@ -2336,35 +2407,19 @@
"repo": "kandinsky-community/kandinsky-2-2-prior",
"pkg": {
"0": {
- "diffusers": "KandinskyPriorPipeline"
+ "diffusers": "KandinskyV22PriorPipeline"
}
},
- "tasks": [
- "Kandinsky3Img2ImgPipeline",
- "Kandinsky3Pipeline",
- "KandinskyCombinedPipeline",
- "KandinskyImg2ImgCombinedPipeline",
- "KandinskyImg2ImgPipeline",
- "KandinskyInpaintCombinedPipeline",
- "KandinskyInpaintPipeline",
- "KandinskyPipeline",
- "KandinskyV22CombinedPipeline",
- "KandinskyV22Img2ImgCombinedPipeline",
- "KandinskyV22Img2ImgPipeline",
- "KandinskyV22InpaintCombinedPipeline",
- "KandinskyV22InpaintPipeline",
- "KandinskyV22Pipeline"
- ],
"pipe_names": {
"prior": [
"PriorTransformer"
],
"image_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"text_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"tokenizer": [
@@ -2376,7 +2431,8 @@
"scheduler"
],
"image_processor": [
- "CLIPImageProcessor"
+ "info.dit.flux1-schnell",
+ "*"
]
}
}
@@ -2395,7 +2451,8 @@
"latte-1"
],
"text_encoder": [
- "T5EncoderModel"
+ "info.stst.t5",
+ "*"
],
"vae": [
"AutoencoderKL"
@@ -2428,7 +2485,8 @@
"ltx-video"
],
"text_encoder": [
- "T5EncoderModel"
+ "info.stst.t5",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
@@ -2458,7 +2516,8 @@
"ltx-video"
],
"text_encoder": [
- "T5EncoderModel"
+ "info.stst.t5",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
@@ -2475,32 +2534,24 @@
"repo": "Alpha-VLLM/Lumina-Next-SFT-diffusers",
"pkg": {
"0": {
- "diffusers": "LuminaPipeline"
+ "precision": " ops.precision.bfloat.B16"
}
},
- "tasks": [
- "Lumina2Pipeline",
- "LuminaPipeline"
+ "identifiers": [
+ "time_caption",
+ "feed_forward"
],
- "pipe_names": {
- "transformer": [
- "LuminaNextDiT2DModel"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "GemmaPreTrainedModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "lumina-next-sft"
- ]
- }
+ "file_256": [
+ "371153b7c7b7a64899d4016970c7cc472039f9c9b21ebe073adf0b8525cdf1bd"
+ ],
+ "layer_b3": [
+ "fa134efd6e9672e7de2965e4895fc58879bd0a6c4fdf9165c278f2748254675f",
+ "4d960ec35c53f72f065b94b836bcd923ea6074d38ad49881061f315d62e3c839"
+ ],
+ "layer_256": [
+ "3938a85568d9df186923edf04391d79e89e6199123bc175afb520e0948d1ae05",
+ "c0ca51fdea051fcd042bf4b56d32e1e8bb9525a921f2e197f370f101e90527f0"
+ ]
}
},
"info.dit.lumina-image-2": {
@@ -2511,6 +2562,18 @@
"diffusers": "Lumina2Pipeline"
}
},
+ "file_256": [
+ "132b4d213fdd3cfc14333746fc3eb8bbe6358cd73c3bc95ac4ccec230b97dca3",
+ "a7c09ebae62996a8289782161338a3cdba58c11d2d849c50b2d6502e152b0d6d"
+ ],
+ "layer_b3": [
+ "198bde52f09736f1fc650dcdbd0e6b0f6a5ce186582554c1d9ee8ab16ac0feb2",
+ "b52807536902cabbf84f99e4fa2f8713fb4ef77e739f06367ee0d486e3222faa"
+ ],
+ "layer_256": [
+ "982893c99860aac8198c2e435cf85f782fce8f10732daf1f2881a26864400a4e",
+ "dc937b59892604f5a86ac96936cd7ff09e25f18ae6b758e8014a24c7fa039e91"
+ ],
"tasks": [
"Lumina2Pipeline"
],
@@ -2526,7 +2589,8 @@
"AutoencoderKL"
],
"text_encoder": [
- "Gemma2PreTrainedModel"
+ "info.stst.gemma2",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
@@ -2550,6 +2614,34 @@
]
}
},
+ "info.dit.lucy-edit-dev": {
+ "*": {
+ "repo": "decart-ai/Lucy-Edit-Dev",
+ "pkg": {
+ "0": {
+ "diffusers": "LucyEditPipeline"
+ }
+ },
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "lucy-edit-dev"
+ ],
+ "text_encoder": [
+ "info.stst.mt5",
+ "*"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ]
+ }
+ }
+ },
"info.dit.longcat-image": {
"*": {
"repo": "meituan-longcat/LongCat-Image",
@@ -2567,7 +2659,8 @@
"AutoencoderKL"
],
"text_encoder": [
- "Qwen2_5_VLForConditionalGeneration"
+ "info.vit.qwen2-vl",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
@@ -2577,7 +2670,8 @@
"Qwen2VLProcessor"
],
"transformer": [
- "LongCatImageTransformer2DModel"
+ "info.dit.flux1-schnell",
+ "*"
]
}
}
@@ -2599,7 +2693,8 @@
"AutoencoderKL"
],
"text_encoder": [
- "Qwen2_5_VLForConditionalGeneration"
+ "info.vit.qwen2-vl",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
@@ -2609,7 +2704,8 @@
"Qwen2VLProcessor"
],
"transformer": [
- "LongCatImageTransformer2DModel"
+ "info.dit.flux1-schnell",
+ "*"
]
}
}
@@ -2632,7 +2728,8 @@
"mochi-1"
],
"text_encoder": [
- "T5EncoderModel"
+ "info.stst.t5",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
@@ -2649,29 +2746,21 @@
"repo": "ucsd-reach/musicldm",
"pkg": {
"0": {
- "diffusers": "MusicLDMPipeline"
+ "generation": {
+ "num_inference_steps": 200,
+ "audio_length_in_s": 10.0
+ }
}
},
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "ClapTextModelWithProjection",
- "ClapModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "musicldm"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "vocoder": [
- "SpeechT5HifiGan"
- ]
- }
+ "file_256": [
+ "853d0ef1d61cbf5d682872322ea8b761ba3d2f85bfbccd58363bd6b2f837268f"
+ ],
+ "layer_b3": [
+ "82fbcc553c1ad770d28fd1866b935249c5ebfbf75f3166ae823e1bc6ef39a95a"
+ ],
+ "layer_256": [
+ "d076446a58a36bf436e37444679d62bcf2f45689d4aa3d799b3fe801c71ed2c8"
+ ]
}
},
"info.dit.omnigen-v1": {
@@ -2720,14 +2809,16 @@
"AutoencoderKL"
],
"text_encoder": [
- "Qwen3Model"
+ "info.stst.qwen3",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
"ovis-image"
],
"transformer": [
- "OvisImageTransformer2DModel"
+ "info.dit.flux1-schnell",
+ "*"
]
}
}
@@ -2749,7 +2840,7 @@
"AutoencoderKL"
],
"text_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"tokenizer": [
@@ -2757,7 +2848,8 @@
"visualclozepipeline-384"
],
"text_encoder_2": [
- "T5EncoderModel"
+ "info.stst.t5",
+ "*"
],
"tokenizer_2": [
"info.encoder.tokenizer",
@@ -2769,14 +2861,39 @@
}
}
},
- "info.dit.pixart-xl-2-1024-ms": {
+ "info.lora.pia-condition-adapter": {
"*": {
- "repo": "PixArt-alpha/PixArt-XL-2-1024-MS",
+ "repo": "openmmlab/PIA-condition-adapter",
+ "pkg": {
+ "0": {
+ "diffusers": "PIAPipeline"
+ }
+ }
+ }
+ },
+ "info.dit.pixart-xl-2-1024-ms": {
+ "*": {
+ "repo": "PixArt-alpha/PixArt-XL-2-1024-MS",
"pkg": {
"0": {
"diffusers": "PixArtAlphaPipeline"
}
},
+ "identifiers": [
+ "aspect_ratio",
+ "y_embedding",
+ "emb.resolution",
+ "caption_projection"
+ ],
+ "file_256": [
+ "809a92d52a4a228f381a4b4f4b76051294b73285fb0cbb02f0ad24f9372217a8"
+ ],
+ "layer_b3": [
+ "c5be83545ce9dbc564bcc9fd8fe4157d131347ccfc8f62adc877ec205b20acee"
+ ],
+ "layer_256": [
+ "117225c0e91423746114b23d3e409708ad55c90ff52b21fa7a1c5105d2e935a5"
+ ],
"tasks": [
"PixArtAlphaPipeline"
],
@@ -2786,7 +2903,8 @@
"pixart-xl-2-1024-ms"
],
"text_encoder": [
- "T5EncoderModel"
+ "info.stst.t5",
+ "*"
],
"vae": [
"AutoencoderKL"
@@ -2809,6 +2927,22 @@
"diffusers": "PixArtSigmaPipeline"
}
},
+ "identifiers": [
+ "adaln_single",
+ "scale_shift_table"
+ ],
+ "file_256": [
+ "c34b520ef473329b945c2a21083cdf1337c5a468d23b3215b65576789bfd0305",
+ "2fa4dee9229c02b03163f57bdb8e80c7a5ee364b7161796abe9c05e8dd13f239"
+ ],
+ "layer_b3": [
+ "a199930ff537994872da77391955f0dd52eddd22ab9105388f0c5852f1b8021f",
+ "ee6f980c32e98da6885f3e97d3f88d9158031e362cd3a49b20d1e23924b251e3"
+ ],
+ "layer_256": [
+ "e0afd203aff5a1d192e325d0f59361373273d85d138b51768c3f10a75c154dc0",
+ "987f3c2ff5d399191e5fd7dd7b1f1f285c197dc8124ad77f05cde7f2fb677a3c"
+ ],
"tasks": [
"PixArtAlphaPipeline",
"PixArtSigmaPipeline"
@@ -2819,7 +2953,8 @@
"pixart-sigma-xl-2-1024-ms"
],
"text_encoder": [
- "T5EncoderModel"
+ "info.stst.t5",
+ "*"
],
"vae": [
"AutoencoderKL"
@@ -2839,33 +2974,24 @@
"repo": "Efficient-Large-Model/Sana_1600M_1024px_BF16_diffusers",
"pkg": {
"0": {
- "diffusers": "SanaPipeline"
+ "generation": {
+ "height": 1024,
+ "width": 1024,
+ "guidance_scale": 4.5,
+ "num_inference_steps": 20
+ },
+ "precision": "ops.precision.bfloat.B16"
}
},
- "tasks": [
- "SanaPAGPipeline",
- "SanaPipeline"
+ "file_256": [
+ "b0b50c33be8758713459aa3c760feef6315d4bea31521fb5b8c3e8fdd9841ffe"
],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "sana-1024px-bf16"
- ],
- "text_encoder": [
- "Gemma2PreTrainedModel"
- ],
- "vae": [
- "info.vae.dc",
- "sana-1024px-bf16"
- ],
- "transformer": [
- "SanaTransformer2DModel"
- ],
- "scheduler": [
- "ops.scheduler.dpm",
- "multistep"
- ]
- }
+ "layer_b3": [
+ "461e3d83dfa7e075ef21e2138ef153922ecfadde3db464b03dff92819f3e86dd"
+ ],
+ "layer_256": [
+ "b928bbcc2ce99d55d21c189e2b1c57498bc313ef5b1457036e356107d567fc4e"
+ ]
}
},
"info.controlnet.sana-1024px-controlnet": {
@@ -2896,7 +3022,8 @@
"sana-sprint-1024px"
],
"text_encoder": [
- "Gemma2PreTrainedModel"
+ "info.stst.gemma2",
+ "*"
],
"vae": [
"info.vae.dc",
@@ -2926,7 +3053,8 @@
"sana-video"
],
"text_encoder": [
- "Gemma2PreTrainedModel"
+ "info.stst.gemma2",
+ "*"
],
"vae": [
[
@@ -2953,28 +3081,13 @@
"repo": "openai/shap-e",
"pkg": {
"0": {
- "diffusers": "ShapEPipeline"
+ "precision": "ops.precision.float.F16",
+ "generation": {
+ "num_inference_steps": 64,
+ "size": 256,
+ "guidance_scale": 15
+ }
}
- },
- "pipe_names": {
- "prior": [
- "PriorTransformer"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch14",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "shap-e"
- ],
- "scheduler": [
- "ops.scheduler.heun",
- "discrete"
- ],
- "shap_e_renderer": [
- "ShapERenderer"
- ]
}
}
},
@@ -2983,31 +3096,13 @@
"repo": "stabilityai/stable-audio-open-1.0",
"pkg": {
"0": {
- "diffusers": "StableAudioPipeline"
+ "precision": "ops.precision.float.F16",
+ "generation": {
+ "num_inference_steps": 200,
+ "audio_end_in_s": 10,
+ "num_waveforms_per_prompt": 3
+ }
}
- },
- "pipe_names": {
- "vae": [
- "info.vae.oobleck",
- "stable-audio-open-1"
- ],
- "text_encoder": [
- "T5EncoderModel"
- ],
- "projection_model": [
- "StableAudioProjectionModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "stable-audio-open-1"
- ],
- "transformer": [
- "StableAudioDiTModel"
- ],
- "scheduler": [
- "ops.scheduler.dpm",
- "multistep"
- ]
}
}
},
@@ -3016,60 +3111,85 @@
"repo": "stabilityai/stable-cascade-prior",
"pkg": {
"0": {
- "diffusers": "StableCascadePriorPipeline"
+ "precision": "ops.precision.bfloat.B16",
+ "generation": {
+ "negative_prompt": "",
+ "num_images_per_prompt": 1,
+ "num_inference_steps": 20,
+ "guidance_scale": 4.0,
+ "width": 1024,
+ "height": 1024
+ }
}
},
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "stable-cascade"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch14",
- "*"
- ],
- "prior": [
- "StableCascadeUNet"
- ],
- "scheduler": [
- "ops.scheduler.ddpmwuerstchen",
- "scheduler"
- ]
- }
- }
- },
- "info.unet.flux1-dev": {
+ "file_256": [
+ "673b3173b037fb5f65b14fde37267390641a36726683de75dcf9df76fce2b866",
+ "45c1eb5ce9b69efac891ad459b15c215cd90a986adbbfaf3effd3a89578cbcaf",
+ "088ddf1e444abf399007b2da2bac87791df165c69f477994f6b3c745a20904b0",
+ "39cec96c7212607f9e526db719bf1df507166d09f4748676c13b0d31cd4adb07",
+ "31ffe2f1a3e2351d658fc7d3002a4eca22466a680f7fb3715b1e3768476f9633",
+ "dfe24009fc881011f350d08d9d13be13a1a3b3cbfed667435efe0fd419aca099"
+ ],
+ "layer_b3": [
+ "c55c83fa435ed128457f605bf1312e54727996d1c94413fc5ab5b49e9933857c",
+ "6fb07ed9fc6ee636e50783802754b3a37bbecfc67037813b616223aeaf6fe877",
+ "2ea194240e105c8962923e2baca88cb6a0c826794afc2ef82474301694711d68",
+ "3412c8a184805621e4595d57268ced0b5c3c1974cd221bf67b2c908eec4fd61c",
+ "53abfb013cfb0e41d0bc7b96bb83e42a4d4c67cb7325f9acf645b02d90efd8fe",
+ "34556558f680c183adc2accd493cb9888a98ba853226bbecb07d95eb2055ff4f"
+ ],
+ "layer_256": [
+ "4f5e0a738b963d3d4f8413387a0966ac1ce51f0f985bcbcc124fa221a2fff467",
+ "8aa77e732a398b7d0dcd9a35d5682c2b5ab090ae90e915c7c91878abff0284d8",
+ "4bbd46ded0916de3108f0da7145a80f5c7acea26ed35b0aaa29af12008352453",
+ "415d1f3ecd06416708c1b83ab21e50b39c9d88d19dc33e60b977b7b7061880b9",
+ "f678c32815c238e14091f690c8a83c3375c8f7738dc7abff79ff086ed9b59204",
+ "17c8da803df7b9bbc8b1d7cc0c44916fea5b5ac0891330c4fdf0326fcd4496cb"
+ ],
+ "identifiers": [
+ "down_blocks.0.2.kv_mapper",
+ "previewer",
+ "backbone"
+ ]
+ },
"decoder": {
- "repo": "black-forest-labs/FLUX.1-dev",
"pkg": {
"0": {
- "diffusers": "WuerstchenDecoderPipeline"
+ "generation": {
+ "negative_prompt": "",
+ "guidance_scale": 0.0,
+ "output_type": "pil",
+ "num_inference_steps": 10
+ },
+ "precision": "ops.precision.bfloat.B16"
}
},
- "tasks": [
- "WuerstchenCombinedPipeline",
- "WuerstchenDecoderPipeline"
+ "file_256": [
+ "fe92687deefcfb33bb3ec181254b55fe4e434c5084ce9d38815eaa32487ad376",
+ "2c8d58b267678aecfa6705a0a0375c88613065a8a8d32ad3a4c3867f5461cb3a",
+ "6c218dc948575e3b14b03dffe2014d7870ac505005770ce3abdc28e920a03c05",
+ "a6c3d534a9be308e95d2c3224af94a854bebd9b503f620f1ae3c8e6ba4a341bf",
+ "7b431ea7d0f10e72b3eaece353bf6bf2f6bc717b6f4207411be186b40dec1f43"
],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "flux1-dev"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch14",
- "*"
- ],
- "decoder": [
- "WuerstchenDiffNeXt"
- ],
- "scheduler": [
- "ops.scheduler.ddpmwuerstchen",
- "scheduler"
- ],
- "vqgan": [
- "PaellaVQModel"
- ]
- }
+ "layer_b3": [
+ "9506d989de0226018de214f7ced4670eb5aad4a0c399a9229488ceccdf9a3ceb",
+ "6c09dcb83e0cd7ad735eb763c5e3721c579d796853f0b9d31ba74fb13cad4f94",
+ "e07025965cee925e31f1d617ea8baa575e7db910d40cc0482fd83df317c0812b",
+ "d9a42e4226fb2778aaeaf0d6bda173a4ff95aa574c6d9e27e41542aa469e40a3",
+ "8dcd87dc7a9b877e8e2a00abac44c4da9eadf2b8df4ae68f27415bb791381a96"
+ ],
+ "layer_256": [
+ "630ec0f3adf97145316c034139836f9df952060d0237ac4e478c55d9a3a50bc8",
+ "80904f707c192ddd06be2cebeb2ebbec3eb0e9c99076d50824d391ef3ac67bf2",
+ "8ccedbe1e8cc4093f05b5f8d90e6103e688ae1ac71e0d6261fb17c42ff7c25e4",
+ "3524e7fa9ca6f7ef695bc2d3410934eabd5272946a05c8cacd7f329e0bd9f1dd",
+ "40499a8f45ae28558ed2fe4fc549a4cb469bd237434b331ccc0b1910310ed733"
+ ],
+ "identifiers": [
+ "0.2.channelwise",
+ "clip_mapper.bias",
+ ".12.self_attn.k_proj.weight"
+ ]
}
},
"info.dit.auraflow": {
@@ -3080,6 +3200,32 @@
"diffusers": "AuraFlowPipeline"
}
},
+ "identifiers": [
+ [
+ 8192,
+ 3072
+ ],
+ "mlpX.c_fc2.weight",
+ "joint_transformer_blocks.2.ff_context.linear_2.weight"
+ ],
+ "file_256": [
+ "ce3e475246258b94ee9dcb8b83292cb34edfffc2bbde46c74604d9c6cd7c585c",
+ "526be97cf581c89ad87c6b19c1f7c2378851137698f7ec436596d061a382d37b",
+ "6a40b011f287452dbca80face78e667055904c5ad97eb2097ade3200259b2203",
+ "05e5493018333d947bb5940083dbc2f071093027ff414bc5b1b1229e4836e5cb"
+ ],
+ "layer_b3": [
+ "cc6d383576c35a9709798d2e2b9e3eb31ba8c608040cf3712bc37871cfd14e21",
+ "ddd54c44fa28fbddecf7cfae91cfa04917fd2f2fa94fc78c528cef2356a4ec3a",
+ "90c694e7d1e20e6da49b571e9954338d384775419790be315304103227b1051b",
+ "9e85aec1bdb616f52f88c80ddc7ab1eae8c16c0b5fbfcdb61a71ac02c325003d"
+ ],
+ "layer_256": [
+ "3c13e6a965d03a49227d8b1606ba6a343a23772d8768407cc78d4ddb9102bc80",
+ "b356cc84a23bc93bda4cc0fce1d0ba1b8e3d5a521e659ffc72e9e4a2d2c7f204",
+ "270df7317fe01abf06333acbbd4f15f8fc7a7c56053219f42efb598454a3af24",
+ "7ab6aa4514dd09f3cf589587d51a81734193ce45dd51bda9db0bd62fe48ef7d5"
+ ],
"tasks": [
"AuraFlowPipeline"
],
@@ -3089,7 +3235,8 @@
"auraflow"
],
"text_encoder": [
- "UMT5EncoderModel"
+ "info.stst.mt5",
+ "*"
],
"vae": [
"AutoencoderKL"
@@ -3109,60 +3256,27 @@
"repo": "stabilityai/stable-diffusion-3.5-medium",
"pkg": {
"0": {
- "diffusers": "StableDiffusion3Pipeline"
+ "precision": "ops.precision.float.F16"
}
},
- "tasks": [
- "StableDiffusion3ControlNetInpaintingPipeline",
- "StableDiffusion3ControlNetPipeline",
- "StableDiffusion3Img2ImgPipeline",
- "StableDiffusion3InpaintPipeline",
- "StableDiffusion3PAGImg2ImgPipeline",
- "StableDiffusion3PAGPipeline",
- "StableDiffusion3Pipeline"
+ "identifiers": [
+ "model.diffusion_model.joint_blocks.",
+ "transformer_blocks.21.norm1_context.linear.weight",
+ "transformer_blocks.31.norm1_context.linear.weight",
+ "blocks.11.ff.net.2.weight"
],
- "pipe_names": {
- "transformer": [
- "SD3Transformer2DModel"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "info.vae.tae",
- "stable-diffusion-3"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch14",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "stable-diffusion-3"
- ],
- "text_encoder_2": [
- "info.vit.clip-vit-patch14",
- "*"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "stable-diffusion-3"
- ],
- "text_encoder_3": [
- "T5EncoderModel"
- ],
- "tokenizer_3": [
- "info.encoder.tokenizer",
- "stable-diffusion-3"
- ],
- "image_encoder": [
- "SiglipVisionModel"
- ],
- "feature_extractor": [
- "SiglipImageProcessor"
- ]
- }
+ "file_256": [
+ "ffef7a279d9134626e6ce0d494fba84fc1c7e720b3c7df2d19a09dc3796d8f93",
+ "11fe06e22364b823dfeedc275912336b932b32a293a0b2f35ffac071990cc4de"
+ ],
+ "layer_b3": [
+ "e411016545785046810b29cc3999f40bc6392be134a1318386c6f1c48f98726a",
+ "a81e07ee67bc627e8b3c5e292ec1ca239009517a2106e8249d670ced0a88f746"
+ ],
+ "layer_256": [
+ "13c982a6dc82d21c9f459e837d8c6f6d4696fd6e7e7b5783bdd2250b1f4fec61",
+ "6ee79050373337bf63ac20916596df778bb22022bb38af986128a7459eda1463"
+ ]
},
"stable-diffusion-3-turbo": {
"repo": "tensorart/stable-diffusion-3.5-medium-turbo",
@@ -3231,7 +3345,7 @@
"audioldm-s-v2"
],
"image_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"unet": [
@@ -3242,7 +3356,8 @@
"discrete"
],
"feature_extractor": [
- "CLIPImageProcessor"
+ "info.dit.flux1-schnell",
+ "*"
]
}
}
@@ -3294,7 +3409,7 @@
"AutoencoderKL"
],
"text_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"tokenizer": [
@@ -3309,7 +3424,8 @@
"StableDiffusionSafetyChecker"
],
"feature_extractor": [
- "CLIPImageProcessor"
+ "info.dit.flux1-schnell",
+ "*"
]
}
}
@@ -3327,7 +3443,7 @@
"AutoencoderKL"
],
"text_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"tokenizer": [
@@ -3335,11 +3451,12 @@
"i2vgen-xl"
],
"image_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"feature_extractor": [
- "CLIPImageProcessor"
+ "info.dit.flux1-schnell",
+ "*"
],
"unet": [
"I2VGenXLUNet"
@@ -3369,7 +3486,7 @@
"wuerstchen"
],
"text_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"prior": [
@@ -3387,31 +3504,43 @@
"repo": "Wan-AI/Wan2.1-T2V-14B-Diffusers",
"pkg": {
"0": {
- "diffusers": "WanPipeline"
+ "precision": "ops.precision.bfloat.B16",
+ "generation": {
+ "height": 480,
+ "width": 832,
+ "num_frames": 81,
+ "guidance_scale": 5.0
+ }
}
},
- "tasks": [
- "WanImageToVideoPipeline",
- "WanPipeline",
- "WanVideoToVideoPipeline"
+ "file_256": [
+ "299e6304544f2783896372fa919e755a8bb9ab8caf898ce08a678dae391e1179",
+ "a9278e6e9c82d174e6c67b3c97d8b97fef30af51dcf59160f2fc241f6819f5dc",
+ "be531024cd9018cb5b48c40cfbb6a6191645b1c792eb8bf4f8c1c6e10f924dc5",
+ "6f999b0d6cb9a72b3d98ac386ed96f57f8cecae13994a69232514ea4974ad5fd",
+ "2e39adde59c5e0e90edbb35873126b0d67928b5c11c501e384e976d6dc597cce",
+ "2ee88ab18d7ed7691c5b7f8bdc3d0a9815e6efe75499287564830fd209d3cdfb",
+ "46c27d3693bf2475990a912e08bf67fc6e6cd5396eab87b5e8dd1fcd3651364a",
+ "193535c6450045f718df5f011de6d94d49bd9b13f37ca0412500f050dbbb01a8"
],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "wan2-t2v"
- ],
- "text_encoder": [
- "UMT5EncoderModel"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
+ "layer_b3": [
+ "32266d1c79b518adb9d21837e6a427f6ae55b68cfdd673a7dadb38820fddeb48",
+ "3b6989856f4f05368524c1852d8660b73c84cfbe44460af017d7139c2a4641b8",
+ "f4d6cee3c112db93b3c9137ad102ec0e79ec7ab68b9bbc59004fbc268ccd5ddb",
+ "e627144f41055619eb5407699c46e69ac0d87cf8873721e3e48c9e842656abf8",
+ "6c00f3fadedacb841c4b9b4321b94a11ef85a08c9dd9253e5f9ba95856715579",
+ "a0c339253c714b05877c8fbab649ed631cf021930978f3696a46f685a07c9092",
+ "6435da89a870fd0e88680d31de75b9a40c408a4768eff384ce9b9e99481e8e66"
+ ],
+ "layer_256": [
+ "52493c23c5fc1d087a283bc4eabb151421b7ae09affa12a5bb059d62656c5766",
+ "058dedb3d2683a9a5b671c6302690e22722c93f6ed92281d5fa74ab190e632a1",
+ "5fbed4b95e7196d3626003ea9e0fbbffd074b4297ca406e01b5b6c5d881a6080",
+ "3a2335c8e7a4359c071b50333b5c00eef6f42a1d5206915e2ee99464a8c5eae7",
+ "0542780670dd75d4cd9deda123d2e150730646c0a1a8d34582460991498a77a6",
+ "e925b8222774905c8fbf10af77811fde7870e563eedcde2c94bd5c727e952d49",
+ "3d915854976284347efa7aa0a117c0fc3b415c4208e1a6c94beb4ccb9720743d"
+ ]
}
},
"info.dit.wan-animate": {
@@ -3433,7 +3562,8 @@
"wan-animate"
],
"text_encoder": [
- "UMT5EncoderModel"
+ "info.stst.mt5",
+ "*"
],
"vae": [
"info.vae.kl",
@@ -3444,10 +3574,11 @@
"multistep"
],
"image_processor": [
- "CLIPImageProcessor"
+ "info.dit.flux1-schnell",
+ "*"
],
"image_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"transformer": [
@@ -3464,6 +3595,31 @@
"diffusers": "WanImageToVideoPipeline"
}
},
+ "file_256": [
+ "b4602c35fa0519750a42c03e3f296c02d542291e344c4d702522cddbd1711f13",
+ "6d7a34b63b70eb608324e546d979167a5e787ac6bca3528e63f54a11572d66aa",
+ "b2051cd29d6b2f0c924fa7a3e78a4772f0134d7b059f21590dcce416f4f6cbe8",
+ "7664fe075b3c82dcecf89012ad3429eee41ee9f10d476f60bc2d2ae3c4ca986c",
+ "8ef7ea5bf9eea636b9b3ebd84c40671b4a18ae2704cb4c8595cb5b25c1d8e8b9",
+ "b2de21b99b2e72cb0ff15253b07e926f26e7cf1b7e229efc32f94ad1f1ed9395",
+ "0ca75338e7a47ca7cacddb7e626647e65829c497387f718ecb6ea0bae456944a",
+ "c058a4ac5363c35d1ab4dd3bdec788c23b267fa42a0d7c68aba599f2f74600c9",
+ "27988f6b510eb8d5fdd7485671b54897f8683f2bba7a772c5671be21d3491253"
+ ],
+ "layer_b3": [
+ "4b6c3354c9ee5694e00a78f5658fdf14129f159c3b78a57f82fb18e0f265a83d",
+ "c36c783559a40d22504f6c4bfb4f5aae760f3f46bbb3a595be79880935122175",
+ "ac62f7d5583fd2e85b738fafaf233e2cde6e2857e04351135bb9ded45f9082ce",
+ "215e89e855b5e9456af9aa68bc67567dc2269002aaa6b01d849ffec425fc628d",
+ "324b8b6c2d512547a2c31bafa12e20acf313fd3aad587b293334f9f629edeec6"
+ ],
+ "layer_256": [
+ "137881dad8c00063bc8bf05f93067736e419173cd171acc22f77b730db688a19",
+ "8c5952fd3d333d3a4b719bf7d8ce6b12d1d2e78caaa7e42d713788cfdcadd244",
+ "86c58bc4864c97f394ea6bccb2ecedc4aab7166f5b9bfeb313edfdcb2918164a",
+ "cac45f7d8f1a0628cb0738bd308689e439b1cc6206e5f887d60d5b37d30138f2",
+ "60e4f71a0961b1346b6f6b5ebe4c8cc93219239c5e13b4c0f1e19e9b8e1324d5"
+ ],
"tasks": [
"WanImageToVideoPipeline",
"WanPipeline",
@@ -3475,7 +3631,8 @@
"wan2-i2v-480p"
],
"text_encoder": [
- "UMT5EncoderModel"
+ "info.stst.mt5",
+ "*"
],
"vae": [
"info.vae.wan",
@@ -3486,10 +3643,11 @@
"discrete"
],
"image_processor": [
- "CLIPImageProcessor"
+ "info.dit.flux1-schnell",
+ "*"
],
"image_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"transformer": [
@@ -3509,6 +3667,25 @@
"diffusers": "WanVACEPipeline"
}
},
+ "file_256": [
+ "bd8bbb8834a274525ab65cbb063f21aa58973a054bfd1638bfe395504c9d9b99",
+ "192804a4e10b5bb0a13f5c224bc4ec9707b3b8cc0def8eea005dbce7c9d6752a",
+ "f202a5c59b8a91ada1862c46a038214f1f7f216c61ec8350d25f69b919da4307",
+ "654693bf2a93a27cd67c3bcee238bc1d0cbb0dd9a74928ed7155fb21a2a1900a",
+ "640ccc0577e6a5d4bb15cd91b11b699ef914fc55f126c5a1c544e152130784f2"
+ ],
+ "layer_b3": [
+ "5357d78799a61cd2d72a8a2824c919d63f718eb3fba624af63689e9c657db032",
+ "7ae67b7ccf79d1c3f4531ae138e1eb63d52dd97a66b3fcbe1d68fded8df4d5b1",
+ "ee63ecdfb3da6901853a59ec950f3e7c3f6595ac46347a03881a4a9c71425377",
+ "82762df3539021d3c0342e0da04137ddbe95ef37ea933cd0a68c09c2c650f2ac"
+ ],
+ "layer_256": [
+ "2684413479030170fb3f08c1069c02957ffc386a59168d23b55d579d5c675269",
+ "d527680fa735e5f30ef8852aabf8a49f02a094bc4718f0787c5b85710a13c026",
+ "9677492a107b3ed827c7285db3393f5321d451cc6d922a4d0488d2a67e939446",
+ "aaef66a4f65ecf852888d160b2122753fe4c6d642b5d41db29e4ce9e6855b5a0"
+ ],
"tasks": [
"WanImageToVideoPipeline",
"WanPipeline",
@@ -3520,7 +3697,8 @@
"wan21-vace"
],
"text_encoder": [
- "UMT5EncoderModel"
+ "info.stst.mt5",
+ "*"
],
"vae": [
"info.vae.kl",
@@ -3544,7 +3722,14 @@
"repo": "Wan-AI/Wan2.1-T2V-1.3B-Diffusers",
"pkg": {
"0": {
- "diffusers": "WanVideoToVideoPipeline"
+ "diffusers": "WanPipeline",
+ "precision": "ops.precision.bfloat.B16",
+ "generation": {
+ "height": 480,
+ "width": 832,
+ "num_frames": 81,
+ "guidance_scale": 5.0
+ }
}
},
"tasks": [
@@ -3558,10 +3743,8 @@
"wan21-t2v"
],
"text_encoder": [
- "UMT5EncoderModel"
- ],
- "transformer": [
- "WanTransformer3DModel"
+ "info.stst.mt5",
+ "*"
],
"vae": [
"info.vae.kl",
@@ -3591,14 +3774,15 @@
"hunyuanvideo-i2v"
],
"text_encoder": [
- "Qwen2_5_VLForConditionalGeneration"
+ "info.vit.qwen2-vl",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
"kandinsky-5-t2v-lite-sft-5s"
],
"text_encoder_2": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"tokenizer_2": [
@@ -3628,14 +3812,15 @@
"AutoencoderKL"
],
"text_encoder": [
- "Qwen2_5_VLForConditionalGeneration"
+ "info.vit.qwen2-vl",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
"kandinsky-5-i2i-lite-sft"
],
"text_encoder_2": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"tokenizer_2": [
@@ -3666,14 +3851,15 @@
"hunyuanvideo-i2v"
],
"text_encoder": [
- "Qwen2_5_VLForConditionalGeneration"
+ "info.vit.qwen2-vl",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
"kandinsky-5-i2v-sft-5s"
],
"text_encoder_2": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"tokenizer_2": [
@@ -3703,14 +3889,15 @@
"AutoencoderKL"
],
"text_encoder": [
- "Qwen2_5_VLForConditionalGeneration"
+ "info.vit.qwen2-vl",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
"kandinsky-5-t2i-lite-sft"
],
"text_encoder_2": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"tokenizer_2": [
@@ -3755,13 +3942,16 @@
"z-image-turbo"
],
"transformer": [
- "ZImageTransformer2DModel"
+ "info.dit.flux1-schnell",
+ "*"
],
"siglip": [
- "Siglip2VisionModel"
+ "info.vit.siglip2-patch16-224",
+ "*"
],
"siglip_processor": [
- "Siglip2ImageProcessorFast"
+ "info.dit.flux1-schnell",
+ "*"
]
}
}
@@ -3790,7 +3980,8 @@
"skyreels-v2-t2v-720p"
],
"text_encoder": [
- "UMT5EncoderModel"
+ "info.stst.mt5",
+ "*"
],
"transformer": [
"SkyReelsV2Transformer3DModel"
@@ -3820,7 +4011,8 @@
"skyreels-v2-df-720p"
],
"text_encoder": [
- "UMT5EncoderModel"
+ "info.stst.mt5",
+ "*"
],
"transformer": [
"SkyReelsV2Transformer3DModel"
@@ -3850,10 +4042,11 @@
"skyreels-v2-i2v-720p"
],
"text_encoder": [
- "UMT5EncoderModel"
+ "info.stst.mt5",
+ "*"
],
"image_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"image_processor": [
@@ -3881,6 +4074,15 @@
"diffusers": "QwenImageInpaintPipeline"
}
},
+ "file_256": [
+ "9f33a59093af3abcc2836d4cf4b7bd122c238ca70a26c70f34fdde64646b3bcd"
+ ],
+ "layer_b3": [
+ "c87eedda853c12844a8deb3592a90bbcbd4dff2f7a850c28755e4aa171432150"
+ ],
+ "layer_256": [
+ "fda2472d8ef6587a4c979021a2390eeb7c8fc2bcf565330ab8dc6b22f5348ec9"
+ ],
"tasks": [
"QwenImageControlNetPipeline",
"QwenImageEditInpaintPipeline",
@@ -3900,14 +4102,16 @@
"qwen-image"
],
"text_encoder": [
- "Qwen2_5_VLForConditionalGeneration"
+ "info.vit.qwen2-vl",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
"qwen-image"
],
"transformer": [
- "QwenImageTransformer2DModel"
+ "info.dit.flux1-schnell",
+ "*"
]
}
}
@@ -3959,7 +4163,8 @@
"qwen-image"
],
"text_encoder": [
- "Qwen2_5_VLForConditionalGeneration"
+ "info.vit.qwen2-vl",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
@@ -3969,7 +4174,8 @@
"Qwen2VLProcessor"
],
"transformer": [
- "QwenImageTransformer2DModel"
+ "info.dit.flux1-schnell",
+ "*"
]
}
}
@@ -4001,7 +4207,8 @@
"qwen-image"
],
"text_encoder": [
- "Qwen2_5_VLForConditionalGeneration"
+ "info.vit.qwen2-vl",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
@@ -4011,7 +4218,8 @@
"Qwen2VLProcessor"
],
"transformer": [
- "QwenImageTransformer2DModel"
+ "info.dit.flux1-schnell",
+ "*"
]
}
}
@@ -4043,7 +4251,8 @@
"qwen-image"
],
"text_encoder": [
- "Qwen2_5_VLForConditionalGeneration"
+ "info.vit.qwen2-vl",
+ "*"
],
"tokenizer": [
"info.encoder.tokenizer",
@@ -4053,7 +4262,8 @@
"Qwen2VLProcessor"
],
"transformer": [
- "QwenImageTransformer2DModel"
+ "info.dit.flux1-schnell",
+ "*"
]
}
}
@@ -4072,14 +4282,16 @@
"chronoedit"
],
"text_encoder": [
- "UMT5EncoderModel"
+ "info.stst.mt5",
+ "*"
],
"image_encoder": [
- "info.vit.clip-vit-patch14",
+ "info.vit.clip-vit-patch32",
"*"
],
"image_processor": [
- "CLIPImageProcessor"
+ "info.dit.flux1-schnell",
+ "*"
],
"transformer": [
"ChronoEditTransformer3DModel"
@@ -4100,1078 +4312,7799 @@
"repo": "Kwai-Kolors/Kolors-diffusers",
"pkg": {
"0": {
- "diffusers": "KolorsPipeline"
+ "precision": "ops.precision.float.F16",
+ "generation": {
+ "negative_prompt": "",
+ "guidance_scale": 5.0,
+ "num_inference_steps": 50,
+ "width": 1024,
+ "height": 1024
+ }
+ },
+ "1": {
+ "diffusers": "DiffusionPipeline"
}
},
- "tasks": [
- "KolorsImg2ImgPipeline",
- "KolorsPAGPipeline",
- "KolorsPipeline"
+ "file_256": [
+ "425ff1dcbe3a70ac13d3afdd69bd4e3176b0c3260722527c80b210f11d2d966c"
],
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "ChatGLMModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "kolors"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch14",
- "*"
- ],
- "feature_extractor": [
- "CLIPImageProcessor"
- ]
- }
+ "layer_b3": [
+ "6eb15506fa38b4cbb26391ab1b6c9ead05f86c711e46583bfbe8fc4421571414"
+ ],
+ "layer_256": [
+ "04e3c17170b8a200481f6941b370fdc5056a00fe5a16956de01790f8a93c0dcd"
+ ],
+ "identifiers": [
+ ".DenseReluDense.wi.weight",
+ "encoder_hid_proj.weight"
+ ],
+ "pipe_names": {}
+ }
+ },
+ "info.moe.trinity": {
+ "*": {
+ "repo": "arcee-ai/Trinity-Mini",
+ "pkg": {
+ "0": {
+ "transformers": "AfmoeModel"
+ }
+ },
+ "tasks": [
+ "AfmoeForCausalLM",
+ "AfmoeModel",
+ "AfmoePreTrainedModel"
+ ]
}
},
"info.encoder.tokenizer": {
- "omdet-turbo-swin-hf": {
+ "aimv2-patch14-224-lit": {
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "13": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "14": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "15": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "16": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
}
}
},
- "blip2-opt": {
+ "albert-xx-v2": {
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "transformers.models.albert.tokenization_albert.AlbertTokenizer"
}
}
},
- "deberta-v2-x": {
+ "align": {
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "13": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "14": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "15": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "16": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "17": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
}
}
},
- "bert-for-seq-generation-l-24-bbc-encoder": {
+ "afm": {
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "13": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "14": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "15": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "16": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "17": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "18": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "19": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "20": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "21": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "22": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
}
}
},
- "wav2vec2-bert-rel-pos": {
+ "aria": {
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "13": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "14": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "15": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "16": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "17": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "18": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "19": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
}
}
},
- "nllb-moe": {
+ "audio-flamingo-3-hf": {
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
}
}
},
- "efficient-mlm-m0-0": {
+ "aya-vision": {
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "13": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "14": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "15": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "transformers.models.cohere.tokenization_cohere.CohereTokenizer"
}
}
},
- "xlm-roberta": {
+ "bark": {
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "13": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "14": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "15": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "16": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "17": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "18": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
}
}
},
- "mgp-str": {
+ "bart": {
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "13": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "14": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
}
}
},
- "blenderbot": {
+ "bert-uncased": {
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "13": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "14": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "15": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "16": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "17": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "18": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "19": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "20": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "21": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "22": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "23": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
}
}
},
- "xlm-roberta-xl": {
+ "bert-for-seq-generation-l-24-bbc-encoder": {
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "13": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "14": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "15": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "16": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "17": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "18": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "transformers.models.bert_generation.tokenization_bert_generation.BertGenerationTokenizer"
+ }
+ }
+ },
+ "bigbird-roberta": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.big_bird.tokenization_big_bird.BigBirdTokenizer"
+ }
+ }
+ },
+ "bigbird-pegasus-arxiv": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.pegasus.tokenization_pegasus.PegasusTokenizer"
+ }
+ }
+ },
+ "biogpt": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.biogpt.tokenization_biogpt.BioGptTokenizer"
+ }
+ }
+ },
+ "bitnet-b18-4t": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "blenderbot": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.blenderbot_small.tokenization_blenderbot_small.BlenderbotSmallTokenizer"
+ }
+ }
+ },
+ "blip-vqa": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "blip2-opt": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "bloom": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "blt": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "bridgetower": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
+ }
+ }
+ },
+ "bros-uncased": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "camembert": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.camembert.tokenization_camembert.CamembertTokenizer"
+ }
+ }
+ },
+ "canine-s": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.canine.tokenization_canine.CanineTokenizer"
+ }
+ }
+ },
+ "chameleon": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "chinese-clip-vit-patch16": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "clap-htsat-fused": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
+ }
+ }
+ },
+ "clip-vit-patch32": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
+ }
+ }
+ },
+ "clipseg-rd64": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
+ }
+ }
+ },
+ "clvp-dev": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.clvp.tokenization_clvp.ClvpTokenizer"
+ }
+ }
+ },
+ "llama-2-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "codegen-mono": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "c4ai-command-r-v01": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.cohere.tokenization_cohere.CohereTokenizer"
+ }
+ }
+ },
+ "conv-bert": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "cpm-ant": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.cpmant.tokenization_cpmant.CpmAntTokenizer"
+ }
+ }
+ },
+ "csm": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "ctrl": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.ctrl.tokenization_ctrl.CTRLTokenizer"
+ }
+ }
+ },
+ "data2vec-audio-960h": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer"
+ }
+ }
+ },
+ "data2vec-text": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
+ }
+ }
+ },
+ "dbrx": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "deberta": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.deberta.tokenization_deberta.DebertaTokenizer"
+ }
+ }
+ },
+ "deberta-v2-x": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.deberta_v2.tokenization_deberta_v2.DebertaV2Tokenizer"
+ }
+ }
+ },
+ "deepseek-v2-lite": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "deepseek-v3": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "deepseek-vl-chat": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "dia": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.dia.tokenization_dia.DiaTokenizer"
+ }
+ }
+ },
+ "diffllama-handcut": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "distilbert-uncased": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "dpr-ctx-encoder-single-nq": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.dpr.tokenization_dpr_fast.DPRQuestionEncoderTokenizerFast"
+ }
+ }
+ },
+ "electra-discriminator": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "emu3-chat-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "ernie-3-zh": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "ernie-45-pt": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "ernie-4-a-pt": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "ernie-4-vl-a-pt": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "esm": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.esm.tokenization_esm.EsmTokenizer"
+ }
+ }
+ },
+ "exaone-4": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "falcon": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "falcon-mamba": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
+ }
+ }
+ },
+ "flaubert-uncased": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.flaubert.tokenization_flaubert.FlaubertTokenizer"
+ }
+ }
+ },
+ "flava": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "flexolmo-7x-1t": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "florence-2": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
+ }
+ }
+ },
+ "fnet": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.fnet.tokenization_fnet.FNetTokenizer"
+ }
+ }
+ },
+ "wmt19-en-ru": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.fsmt.tokenization_fsmt.FSMTTokenizer"
+ }
+ }
+ },
+ "funnel": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.funnel.tokenization_funnel.FunnelTokenizer"
+ }
+ }
+ },
+ "fuyu": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "gemma": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
+ }
+ }
+ },
+ "gemma2": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
+ }
+ }
+ },
+ "gemma-3": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
+ }
+ }
+ },
+ "gemma3-text": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
+ }
+ }
+ },
+ "gemma-3n-e": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
+ }
+ }
+ },
+ "git": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "glm-4-chat": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "glm-4-0414": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "glm-4-a": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "glm-4v-thinking": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "glm-4v": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "glm-asr-nano-2512": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "got-ocr-2-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "gpt2": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "gpt-bigcode-santacoder": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "gpt-neo": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "gpt-neox": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
+ }
+ }
+ },
+ "gpt-neox-japanese": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer"
+ }
+ }
+ },
+ "gpt-oss": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "gpt-j": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "granite": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "powermoe": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "granite-4-h": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "moe-active-shared-experts": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "grounding-dino": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "groupvit-gcc-yfcc": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
+ }
+ }
+ },
+ "helium": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "hubert-ls960": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer"
+ }
+ }
+ },
+ "ibert-roberta": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
+ }
+ }
+ },
+ "idefics": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "idefics2": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "idefics3-llama3": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "blip-flan-t5": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "internvl3-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
+ }
+ }
+ },
+ "jais-2-chat": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "jamba-v0": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "janus": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "jetmoe": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "kosmos-2-patch14-224": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer"
+ }
+ }
+ },
+ "kosmos-2": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "todo": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.parakeet.tokenization_parakeet_fast.ParakeetTokenizerFast"
+ }
+ }
+ },
+ "layoutlm-uncased": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "layoutlmv2-uncased": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer"
+ }
+ }
+ },
+ "layoutlmv3": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.layoutlmv3.tokenization_layoutlmv3.LayoutLMv3Tokenizer"
+ }
+ }
+ },
+ "led-16384": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
+ }
+ }
+ },
+ "lfm2-vl": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "lilt-roberta-en": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
+ }
+ }
+ },
+ "llama-4-scout-16e": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "llava": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "llava-v1-mistral-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "llava-next-video-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "llava-onevision-qwen2-ov-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "longformer-4096": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
+ }
+ }
+ },
+ "long-t5-local": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
+ }
+ }
+ },
+ "luke": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.luke.tokenization_luke.LukeTokenizer"
+ }
+ }
+ },
+ "lxmert-uncased": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "m": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.m2m_100.tokenization_m2m_100.M2M100Tokenizer"
+ }
+ }
+ },
+ "mamba": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
+ }
+ }
+ },
+ "mamba2": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
+ }
+ }
+ },
+ "opus-mt-en-de": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.marian.tokenization_marian.MarianTokenizer"
+ }
+ }
+ },
+ "mbart-cc25": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.mbart.tokenization_mbart.MBartTokenizer"
+ }
+ }
+ },
+ "megatron-bert-uncased": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "metaclip-2-worldwide-huge-quickgelu": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer"
+ }
+ }
+ },
+ "mgp-str": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.mgp_str.tokenization_mgp_str.MgpstrTokenizer"
+ }
+ }
+ },
+ "max-text-01-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "stral-3-2512": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "mistral-v0": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "mistral-3-2503": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "mixtral-8x": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "llama-3-vision": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "mm-grounding-dino-o365v1-goldg-v3det": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "mobilebert-uncased": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "modernbert": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "moonshine": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "hf-moshiko": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "mpnet": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.mpnet.tokenization_mpnet.MPNetTokenizer"
+ }
+ }
+ },
+ "mpt": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
+ }
+ }
+ },
+ "mra-512-4": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
+ }
+ }
+ },
+ "mt5": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
+ }
+ }
+ },
+ "musicgen": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
+ }
+ }
+ },
+ "musicgen-melody": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
+ }
+ }
+ },
+ "mvp": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
+ }
+ }
+ },
+ "nemotron-3-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "nllb-moe": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.nllb.tokenization_nllb.NllbTokenizer"
+ }
+ }
+ },
+ "nystromformer-512": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.albert.tokenization_albert.AlbertTokenizer"
+ }
+ }
+ },
+ "olmo-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
+ }
+ }
+ },
+ "olmo2-1124-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
+ }
+ }
+ },
+ "olmo-3-0725": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "olmoe-0924": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
+ }
+ }
+ },
+ "omdet-turbo-swin-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
+ }
+ }
+ },
+ "oneformer-ade-swin": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
+ }
+ }
+ },
+ "openai-gpt": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.openai.tokenization_openai.OpenAIGPTTokenizer"
+ }
+ }
+ },
+ "opt": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "ovis2-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
+ }
+ }
+ },
+ "owlv2-patch16": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
+ }
+ }
+ },
+ "owlvit-patch32": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
+ }
+ }
+ },
+ "paligemma": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "pegasus": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.pegasus.tokenization_pegasus.PegasusTokenizer"
+ }
+ }
+ },
+ "pegasus-x": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.pegasus.tokenization_pegasus.PegasusTokenizer"
+ }
+ }
+ },
+ "language-perceiver": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.perceiver.tokenization_perceiver.PerceiverTokenizer"
+ }
+ }
+ },
+ "persimmon": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "phi-1": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "phi-3": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "phi-3-moe": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "pixtral": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "plbart": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.plbart.tokenization_plbart.PLBartTokenizer"
+ }
+ }
+ },
+ "phetnet-uncased": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.prophetnet.tokenization_prophetnet.ProphetNetTokenizer"
+ }
+ }
+ },
+ "qwen2": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
+ }
+ }
+ },
+ "qwen2-vl": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
+ }
+ }
+ },
+ "qwen15-moe-a": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
+ }
+ }
+ },
+ "qwen3": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
+ }
+ }
+ },
+ "qwen3-a": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
+ }
+ }
+ },
+ "qwen3-next-a": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
+ }
+ }
+ },
+ "qwen3-vl": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
+ }
+ }
+ },
+ "qwen3-vl-a": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
+ }
+ }
+ },
+ "recurrentgemma": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
+ }
+ }
+ },
+ "reformer-crime-and-punishment": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.reformer.tokenization_reformer.ReformerTokenizer"
+ }
+ }
+ },
+ "rembert": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.rembert.tokenization_rembert.RemBertTokenizer"
+ }
+ }
+ },
+ "roberta": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
+ }
+ }
+ },
+ "efficient-mlm-m0-0": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
+ }
+ }
+ },
+ "roc-bert-zh": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.roc_bert.tokenization_roc_bert.RoCBertTokenizer"
+ }
+ }
+ },
+ "roformer-chinese": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.roformer.tokenization_roformer.RoFormerTokenizer"
+ }
+ }
+ },
+ "rwkv-4-pile": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
+ }
+ }
+ },
+ "hf-seamless-m4t": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.seamless_m4t.tokenization_seamless_m4t.SeamlessM4TTokenizer"
+ }
+ }
+ },
+ "seamless-m4t-v2": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.seamless_m4t.tokenization_seamless_m4t.SeamlessM4TTokenizer"
+ }
+ }
+ },
+ "siglip-patch16-224": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.siglip.tokenization_siglip.SiglipTokenizer"
+ }
+ }
+ },
+ "siglip2-patch16-224": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
+ }
+ }
+ },
+ "smollm3": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "s2t-librispeech-asr": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.speech_to_text.tokenization_speech_to_text.Speech2TextTokenizer"
+ }
+ }
+ },
+ "speecht5-asr": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.speecht5.tokenization_speecht5.SpeechT5Tokenizer"
+ }
+ }
+ },
+ "splinter": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.splinter.tokenization_splinter.SplinterTokenizer"
+ }
+ }
+ },
+ "squeezebert-uncased": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "stablelm-4e1t": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
+ }
+ }
+ },
+ "starcoder2": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "switch-8": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
+ }
+ }
+ },
+ "t5": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
+ }
+ }
+ },
+ "t5gemma-prefixlm": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
+ }
+ }
+ },
+ "tapas-finetuned-sqa": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.tapas.tokenization_tapas.TapasTokenizer"
+ }
+ }
+ },
+ "tvp": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "udop": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.udop.tokenization_udop.UdopTokenizer"
+ }
+ }
+ },
+ "umt5": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
+ }
+ }
+ },
+ "video-llava-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "vilt-b32-mlm": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "vip-llava-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "visualbert-vqa-coco-pre": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "mms-tts-eng": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.vits.tokenization_vits.VitsTokenizer"
+ }
+ }
+ },
+ "voxtral-2507": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "wav2vec2-960h": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer"
+ }
+ }
+ },
+ "wav2vec2-bert-rel-pos": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer"
+ }
+ }
+ },
+ "wav2vec2-conformer-rel-pos": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer"
+ }
+ }
+ },
+ "whisper": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.whisper.tokenization_whisper.WhisperTokenizer"
+ }
+ }
+ },
+ "xclip-patch32": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
+ }
+ }
+ },
+ "xglm": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.xglm.tokenization_xglm.XGLMTokenizer"
+ }
+ }
+ },
+ "xlm-mlm-en-2048": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.xlm.tokenization_xlm.XLMTokenizer"
+ }
+ }
+ },
+ "xlm-roberta": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer"
+ }
+ }
+ },
+ "xlm-roberta-xl": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer"
+ }
+ }
+ },
+ "xlnet-cased": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer"
+ }
+ }
+ },
+ "xlstm": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
+ }
+ }
+ },
+ "xmod": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer"
+ }
+ }
+ },
+ "yoso-4096": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.albert.tokenization_albert.AlbertTokenizer"
+ }
+ }
+ },
+ "zamba-v1": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "zamba2": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ }
+ },
+ "info.vit.aimv2-patch14-224-lit": {
+ "*": {
+ "repo": "apple/aimv2-large-patch14-224-lit",
+ "pkg": {
+ "0": {
+ "transformers": "Aimv2Model"
+ }
+ },
+ "tasks": [
+ "Aimv2VisionModel",
+ "Aimv2Model",
+ "Aimv2PreTrainedModel",
+ "Aimv2TextModel"
+ ]
+ }
+ },
+ "info.vit.aimv2-patch14-224": {
+ "*": {
+ "repo": "apple/aimv2-large-patch14-224",
+ "pkg": {
+ "0": {
+ "transformers": "Aimv2VisionModel"
+ }
+ },
+ "tasks": [
+ "Aimv2VisionModel",
+ "Aimv2Model",
+ "Aimv2PreTrainedModel",
+ "Aimv2TextModel"
+ ]
+ }
+ },
+ "info.art.albert-xx-v2": {
+ "*": {
+ "repo": "albert/albert-xxlarge-v2",
+ "pkg": {
+ "0": {
+ "transformers": "AlbertModel"
+ }
+ },
+ "tasks": [
+ "AlbertPreTrainedModel",
+ "AlbertModel",
+ "AlbertForPreTraining",
+ "AlbertForMaskedLM",
+ "AlbertForSequenceClassification",
+ "AlbertForTokenClassification",
+ "AlbertForQuestionAnswering",
+ "AlbertForMultipleChoice"
+ ]
+ }
+ },
+ "info.vit.align": {
+ "*": {
+ "repo": "kakaobrain/align-base",
+ "pkg": {
+ "0": {
+ "transformers": "AlignModel"
+ }
+ },
+ "tasks": [
+ "AlignPreTrainedModel",
+ "AlignTextModel",
+ "AlignVisionModel",
+ "AlignModel"
+ ]
+ }
+ },
+ "info.vit.altclip": {
+ "*": {
+ "repo": "BAAI/AltCLIP",
+ "pkg": {
+ "0": {
+ "transformers": "AltCLIPModel"
+ }
+ },
+ "tasks": [
+ "AltCLIPPreTrainedModel",
+ "AltCLIPVisionModel",
+ "AltCLIPTextModel",
+ "AltCLIPModel"
+ ]
+ }
+ },
+ "info.stst.apertus": {
+ "*": {
+ "repo": "swiss-ai/Apertus-8B",
+ "pkg": {
+ "0": {
+ "transformers": "ApertusModel"
+ }
+ },
+ "tasks": [
+ "ApertusModel",
+ "ApertusForCausalLM",
+ "ApertusForTokenClassification",
+ "ApertusPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.afm": {
+ "*": {
+ "repo": "arcee-ai/AFM-4.5B",
+ "pkg": {
+ "0": {
+ "transformers": "ArceeModel"
+ }
+ },
+ "tasks": [
+ "ArceeForCausalLM",
+ "ArceeForQuestionAnswering",
+ "ArceeForSequenceClassification",
+ "ArceeForTokenClassification",
+ "ArceeModel",
+ "ArceePreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.aria": {
+ "*": {
+ "repo": "rhymes-ai/Aria",
+ "pkg": {
+ "0": {
+ "transformers": "AriaModel"
+ }
+ },
+ "tasks": [
+ "AriaForConditionalGeneration",
+ "AriaPreTrainedModel",
+ "AriaTextPreTrainedModel",
+ "AriaTextModel",
+ "AriaModel",
+ "AriaTextForCausalLM"
+ ]
+ }
+ },
+ "info.vit.ast-finetuned-audioset-10-10-0593": {
+ "*": {
+ "repo": "MIT/ast-finetuned-audioset-10-10-0.4593",
+ "pkg": {
+ "0": {
+ "transformers": "ASTModel"
+ }
+ },
+ "tasks": [
+ "ASTForAudioClassification",
+ "ASTModel",
+ "ASTPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.audio-flamingo-3-hf": {
+ "*": {
+ "repo": "nvidia/audio-flamingo-3-hf",
+ "pkg": {
+ "0": {
+ "transformers": "AudioFlamingo3ForConditionalGeneration"
+ }
+ },
+ "tasks": [
+ "AudioFlamingo3ForConditionalGeneration",
+ "AudioFlamingo3PreTrainedModel",
+ "AudioFlamingo3Encoder"
+ ]
+ }
+ },
+ "info.aet.audio-flamingo-3-hf": {
+ "*": {
+ "repo": "nvidia/audio-flamingo-3-hf",
+ "pkg": {
+ "0": {
+ "transformers": "AudioFlamingo3Encoder"
+ }
+ },
+ "tasks": [
+ "AudioFlamingo3ForConditionalGeneration",
+ "AudioFlamingo3PreTrainedModel",
+ "AudioFlamingo3Encoder"
+ ]
+ }
+ },
+ "info.stst.autoformer-tourism-monthly": {
+ "*": {
+ "repo": "huggingface/autoformer-tourism-monthly",
+ "pkg": {
+ "0": {
+ "transformers": "AutoformerModel"
+ }
+ },
+ "tasks": [
+ "AutoformerForPrediction",
+ "AutoformerModel",
+ "AutoformerPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.aya-vision": {
+ "*": {
+ "repo": "CohereForAI/aya-vision-8b",
+ "pkg": {
+ "0": {
+ "transformers": "AyaVisionModel"
+ }
+ },
+ "tasks": [
+ "AyaVisionForConditionalGeneration",
+ "AyaVisionPreTrainedModel",
+ "AyaVisionModel"
+ ]
+ }
+ },
+ "info.ssm.bamba-t-hf": {
+ "*": {
+ "repo": "ibm-fms/Bamba-9.8b-2.2T-hf",
+ "pkg": {
+ "0": {
+ "transformers": "BambaModel"
+ }
+ },
+ "tasks": [
+ "BambaModel",
+ "BambaForCausalLM",
+ "BambaPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.bark": {
+ "*": {
+ "repo": "suno/bark",
+ "pkg": {
+ "0": {
+ "transformers": "BarkModel"
+ }
+ },
+ "tasks": [
+ "BarkFineModel",
+ "BarkSemanticModel",
+ "BarkCoarseModel",
+ "BarkModel",
+ "BarkPreTrainedModel",
+ "BarkCausalModel"
+ ]
+ }
+ },
+ "info.stst.bart": {
+ "*": {
+ "repo": "facebook/bart-large",
+ "pkg": {
+ "0": {
+ "transformers": "BartModel"
+ }
+ },
+ "tasks": [
+ "BartForCausalLM",
+ "BartForConditionalGeneration",
+ "BartForQuestionAnswering",
+ "BartForSequenceClassification",
+ "BartModel",
+ "BartPreTrainedModel",
+ "BartPretrainedModel",
+ "PretrainedBartModel"
+ ]
+ }
+ },
+ "info.vit.beit-patch16-224-pt": {
+ "*": {
+ "repo": "microsoft/beit-base-patch16-224-pt22k",
+ "pkg": {
+ "0": {
+ "transformers": "BeitModel"
+ }
+ },
+ "tasks": [
+ "BeitForImageClassification",
+ "BeitForMaskedImageModeling",
+ "BeitForSemanticSegmentation",
+ "BeitModel",
+ "BeitPreTrainedModel",
+ "BeitBackbone"
+ ]
+ }
+ },
+ "info.art.bert-uncased": {
+ "*": {
+ "repo": "google-bert/bert-base-uncased",
+ "pkg": {
+ "0": {
+ "transformers": "BertModel"
+ }
+ },
+ "file_256": [
+ "c6c6348af2cb4d5852fe51102ce39605903dbe7925c005cf8995506cc21ea914"
+ ],
+ "layer_b3": [
+ "30d7d2cc3ec9e4ba45844e005d0bbcb5887b6a0976042f73da916237dc5c4c12"
+ ],
+ "layer_256": [
+ "94fd2508680ff684eff57e4a5a8ca46bf338fc356a9cf6fe8db2b84543dd7971"
+ ],
+ "tasks": [
+ "BertForMaskedLM",
+ "BertForMultipleChoice",
+ "BertForNextSentencePrediction",
+ "BertForPreTraining",
+ "BertForQuestionAnswering",
+ "BertForSequenceClassification",
+ "BertForTokenClassification",
+ "BertLayer",
+ "BertLMHeadModel",
+ "BertModel",
+ "BertPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.bert-for-seq-generation-l-24-bbc-encoder": {
+ "*": {
+ "repo": "google/bert_for_seq_generation_L-24_bbc_encoder",
+ "pkg": {
+ "0": {
+ "transformers": "BertGenerationEncoder"
+ }
+ },
+ "tasks": [
+ "BertGenerationDecoder",
+ "BertGenerationEncoder",
+ "BertGenerationPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.bigbird-roberta": {
+ "*": {
+ "repo": "google/bigbird-roberta-base",
+ "pkg": {
+ "0": {
+ "transformers": "BigBirdModel"
+ }
+ },
+ "tasks": [
+ "BigBirdForCausalLM",
+ "BigBirdForMaskedLM",
+ "BigBirdForMultipleChoice",
+ "BigBirdForPreTraining",
+ "BigBirdForQuestionAnswering",
+ "BigBirdForSequenceClassification",
+ "BigBirdForTokenClassification",
+ "BigBirdLayer",
+ "BigBirdModel",
+ "BigBirdPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.bigbird-pegasus-arxiv": {
+ "*": {
+ "repo": "google/bigbird-pegasus-large-arxiv",
+ "pkg": {
+ "0": {
+ "transformers": "BigBirdPegasusModel"
+ }
+ },
+ "tasks": [
+ "BigBirdPegasusForCausalLM",
+ "BigBirdPegasusForConditionalGeneration",
+ "BigBirdPegasusForQuestionAnswering",
+ "BigBirdPegasusForSequenceClassification",
+ "BigBirdPegasusModel",
+ "BigBirdPegasusPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.biogpt": {
+ "*": {
+ "repo": "microsoft/biogpt",
+ "pkg": {
+ "0": {
+ "transformers": "BioGptModel"
+ }
+ },
+ "tasks": [
+ "BioGptForCausalLM",
+ "BioGptForTokenClassification",
+ "BioGptForSequenceClassification",
+ "BioGptModel",
+ "BioGptPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.bit-50": {
+ "*": {
+ "repo": "google/bit-50",
+ "pkg": {
+ "0": {
+ "transformers": "BitModel"
+ }
+ },
+ "tasks": [
+ "BitForImageClassification",
+ "BitModel",
+ "BitPreTrainedModel",
+ "BitBackbone"
+ ]
+ }
+ },
+ "info.stst.bitnet-b18-4t": {
+ "*": {
+ "repo": "microsoft/bitnet-b1.58-2B-4T",
+ "pkg": {
+ "0": {
+ "transformers": "BitNetModel"
+ }
+ },
+ "tasks": [
+ "BitNetForCausalLM",
+ "BitNetModel",
+ "BitNetPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.blenderbot": {
+ "*": {
+ "repo": "facebook/blenderbot-3B",
+ "pkg": {
+ "0": {
+ "transformers": "BlenderbotModel"
+ }
+ },
+ "tasks": [
+ "BlenderbotForCausalLM",
+ "BlenderbotForConditionalGeneration",
+ "BlenderbotModel",
+ "BlenderbotPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.blip-vqa": {
+ "*": {
+ "repo": "Salesforce/blip-vqa-base",
+ "pkg": {
+ "0": {
+ "transformers": "BlipModel"
+ }
+ },
+ "tasks": [
+ "BlipModel",
+ "BlipPreTrainedModel",
+ "BlipForConditionalGeneration",
+ "BlipForQuestionAnswering",
+ "BlipVisionModel",
+ "BlipTextModel",
+ "BlipForImageTextRetrieval"
+ ]
+ }
+ },
+ "info.vit.blip2-opt": {
+ "*": {
+ "repo": "Salesforce/blip2-opt-2.7b",
+ "pkg": {
+ "0": {
+ "transformers": "Blip2Model"
+ }
+ },
+ "tasks": [
+ "Blip2Model",
+ "Blip2VisionModelWithProjection",
+ "Blip2QFormerModel",
+ "Blip2PreTrainedModel",
+ "Blip2ForConditionalGeneration",
+ "Blip2ForImageTextRetrieval",
+ "Blip2VisionModel",
+ "Blip2TextModelWithProjection"
+ ]
+ }
+ },
+ "info.stst.blip2-opt": {
+ "*": {
+ "repo": "Salesforce/blip2-opt-2.7b",
+ "pkg": {
+ "0": {
+ "transformers": "Blip2QFormerModel"
+ }
+ },
+ "tasks": [
+ "Blip2Model",
+ "Blip2VisionModelWithProjection",
+ "Blip2QFormerModel",
+ "Blip2PreTrainedModel",
+ "Blip2ForConditionalGeneration",
+ "Blip2ForImageTextRetrieval",
+ "Blip2VisionModel",
+ "Blip2TextModelWithProjection"
+ ]
+ }
+ },
+ "info.art.bloom": {
+ "*": {
+ "repo": "bigscience/bloom",
+ "pkg": {
+ "0": {
+ "transformers": "BloomModel"
+ }
+ },
+ "tasks": [
+ "BloomForCausalLM",
+ "BloomModel",
+ "BloomPreTrainedModel",
+ "BloomForSequenceClassification",
+ "BloomForTokenClassification",
+ "BloomForQuestionAnswering"
+ ]
+ }
+ },
+ "info.vit.blt": {
+ "*": {
+ "repo": "facebook/blt",
+ "pkg": {
+ "0": {
+ "transformers": "BltModel"
+ }
+ },
+ "tasks": [
+ "BltPreTrainedModel",
+ "BltModel",
+ "BltPatcher",
+ "BltForCausalLM"
+ ]
+ }
+ },
+ "info.vit.bridgetower": {
+ "*": {
+ "repo": "BridgeTower/bridgetower-base",
+ "pkg": {
+ "0": {
+ "transformers": "BridgeTowerModel"
+ }
+ },
+ "tasks": [
+ "BridgeTowerForContrastiveLearning",
+ "BridgeTowerForImageAndTextRetrieval",
+ "BridgeTowerForMaskedLM",
+ "BridgeTowerModel",
+ "BridgeTowerPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.bros-uncased": {
+ "*": {
+ "repo": "jinho8345/bros-base-uncased",
+ "pkg": {
+ "0": {
+ "transformers": "BrosModel"
+ }
+ },
+ "tasks": [
+ "BrosPreTrainedModel",
+ "BrosModel",
+ "BrosForTokenClassification",
+ "BrosSpadeEEForTokenClassification",
+ "BrosSpadeELForTokenClassification"
+ ]
+ }
+ },
+ "info.art.camembert": {
+ "*": {
+ "repo": "almanach/camembert-base",
+ "pkg": {
+ "0": {
+ "transformers": "CamembertModel"
+ }
+ },
+ "tasks": [
+ "CamembertForCausalLM",
+ "CamembertForMaskedLM",
+ "CamembertForMultipleChoice",
+ "CamembertForQuestionAnswering",
+ "CamembertForSequenceClassification",
+ "CamembertForTokenClassification",
+ "CamembertModel",
+ "CamembertPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.canine-s": {
+ "*": {
+ "repo": "google/canine-s",
+ "pkg": {
+ "0": {
+ "transformers": "CanineModel"
+ }
+ },
+ "tasks": [
+ "CanineForMultipleChoice",
+ "CanineForQuestionAnswering",
+ "CanineForSequenceClassification",
+ "CanineForTokenClassification",
+ "CanineLayer",
+ "CanineModel",
+ "CaninePreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.chameleon": {
+ "*": {
+ "repo": "meta/chameleon-7B",
+ "pkg": {
+ "0": {
+ "transformers": "ChameleonModel"
+ }
+ },
+ "tasks": [
+ "ChameleonForConditionalGeneration",
+ "ChameleonModel",
+ "ChameleonPreTrainedModel",
+ "ChameleonVQVAE"
+ ]
+ }
+ },
+ "info.vit.chinese-clip-vit-patch16": {
+ "*": {
+ "repo": "OFA-Sys/chinese-clip-vit-base-patch16",
+ "pkg": {
+ "0": {
+ "transformers": "ChineseCLIPModel"
+ }
+ },
+ "tasks": [
+ "ChineseCLIPModel",
+ "ChineseCLIPPreTrainedModel",
+ "ChineseCLIPTextModel",
+ "ChineseCLIPVisionModel"
+ ]
+ }
+ },
+ "info.vit.clap-htsat-fused": {
+ "*": {
+ "repo": "laion/clap-htsat-fused",
+ "pkg": {
+ "0": {
+ "transformers": "ClapModel"
+ }
+ },
+ "file_256": [
+ "c92b5a2bee69ff5dd05820d9e0a5cddbc9c9b9dd19a6cb3214f0cf4f29a4d1b0",
+ "ae69f555e7f1a2333b8e684c9fa8233f44a47bbadf76d484f941b74f74d2753d"
+ ],
+ "layer_b3": [
+ "a4d26450ac399d51b9abbe37859615bb02a5cbf63521da4c7cdc549d04a2872c",
+ "ddf310d8eb2d4e3f61e605978675a9d3a748cad9406b9aee8335eae013e77573"
+ ],
+ "layer_256": [
+ "843ba86000971d6067bfc4f3ed6dd01bd6f6726188aaa15d86b05554f4fe8481",
+ "27529e30442d030a28badf9d62710f4b74e38e9c4424ed169c7e0ac072f5a771"
+ ],
+ "tasks": [
+ "ClapModel",
+ "ClapPreTrainedModel",
+ "ClapTextModel",
+ "ClapTextModelWithProjection",
+ "ClapAudioModel",
+ "ClapAudioModelWithProjection"
+ ]
+ }
+ },
+ "info.vit.clip-vit-patch32": {
+ "*": {
+ "repo": "openai/clip-vit-base-patch32",
+ "pkg": {
+ "0": {
+ "transformers": "CLIPModel"
+ }
+ },
+ "tasks": [
+ "CLIPModel",
+ "CLIPPreTrainedModel",
+ "CLIPTextModel",
+ "CLIPTextModelWithProjection",
+ "CLIPVisionModel",
+ "CLIPVisionModelWithProjection",
+ "CLIPForImageClassification"
+ ]
+ }
+ },
+ "info.vit.clipseg-rd64": {
+ "*": {
+ "repo": "CIDAS/clipseg-rd64",
+ "pkg": {
+ "0": {
+ "transformers": "CLIPSegModel"
+ }
+ },
+ "tasks": [
+ "CLIPSegModel",
+ "CLIPSegPreTrainedModel",
+ "CLIPSegTextModel",
+ "CLIPSegVisionModel",
+ "CLIPSegForImageSegmentation"
+ ]
+ }
+ },
+ "info.vit.clvp-dev": {
+ "*": {
+ "repo": "susnato/clvp_dev",
+ "pkg": {
+ "0": {
+ "transformers": "ClvpModelForConditionalGeneration"
+ }
+ },
+ "tasks": [
+ "ClvpModelForConditionalGeneration",
+ "ClvpForCausalLM",
+ "ClvpModel",
+ "ClvpPreTrainedModel",
+ "ClvpEncoder",
+ "ClvpDecoder"
+ ]
+ }
+ },
+ "info.stst.llama-2-hf": {
+ "*": {
+ "repo": "meta-llama/Llama-2-7b-hf",
+ "pkg": {
+ "0": {
+ "transformers": "LlamaModel"
+ }
+ },
+ "tasks": [
+ "LlamaForCausalLM",
+ "LlamaModel",
+ "LlamaPreTrainedModel",
+ "LlamaForSequenceClassification",
+ "LlamaForQuestionAnswering",
+ "LlamaForTokenClassification"
+ ]
+ }
+ },
+ "info.art.codegen-mono": {
+ "*": {
+ "repo": "Salesforce/codegen-2B-mono",
+ "pkg": {
+ "0": {
+ "transformers": "CodeGenModel"
+ }
+ },
+ "tasks": [
+ "CodeGenForCausalLM",
+ "CodeGenModel",
+ "CodeGenPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.c4ai-command-r-v01": {
+ "*": {
+ "repo": "CohereForAI/c4ai-command-r-v01",
+ "pkg": {
+ "0": {
+ "transformers": "CohereModel"
+ }
+ },
+ "tasks": [
+ "CohereForCausalLM",
+ "CohereModel",
+ "CoherePreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.command-a-vision-07-2025": {
+ "*": {
+ "repo": "CohereLabs/command-a-vision-07-2025",
+ "pkg": {
+ "0": {
+ "transformers": "Cohere2VisionModel"
+ }
+ },
+ "tasks": [
+ "Cohere2VisionForConditionalGeneration",
+ "Cohere2VisionPreTrainedModel",
+ "Cohere2VisionModel"
+ ]
+ }
+ },
+ "info.detr.conditional-detr-resnet-50": {
+ "*": {
+ "repo": "microsoft/conditional-detr-resnet-50",
+ "pkg": {
+ "0": {
+ "transformers": "ConditionalDetrModel"
+ }
+ },
+ "tasks": [
+ "ConditionalDetrForObjectDetection",
+ "ConditionalDetrForSegmentation",
+ "ConditionalDetrModel",
+ "ConditionalDetrPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.conv-bert": {
+ "*": {
+ "repo": "YituTech/conv-bert-base",
+ "pkg": {
+ "0": {
+ "transformers": "ConvBertModel"
+ }
+ },
+ "tasks": [
+ "ConvBertForMaskedLM",
+ "ConvBertForMultipleChoice",
+ "ConvBertForQuestionAnswering",
+ "ConvBertForSequenceClassification",
+ "ConvBertForTokenClassification",
+ "ConvBertLayer",
+ "ConvBertModel",
+ "ConvBertPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.convnext-224": {
+ "*": {
+ "repo": "facebook/convnext-tiny-224",
+ "pkg": {
+ "0": {
+ "transformers": "ConvNextModel"
+ }
+ },
+ "tasks": [
+ "ConvNextForImageClassification",
+ "ConvNextModel",
+ "ConvNextPreTrainedModel",
+ "ConvNextBackbone"
+ ]
+ }
+ },
+ "info.vit.convnextv2-224": {
+ "*": {
+ "repo": "facebook/convnextv2-tiny-1k-224",
+ "pkg": {
+ "0": {
+ "transformers": "ConvNextV2Model"
+ }
+ },
+ "tasks": [
+ "ConvNextV2ForImageClassification",
+ "ConvNextV2Model",
+ "ConvNextV2PreTrainedModel",
+ "ConvNextV2Backbone"
+ ]
+ }
+ },
+ "info.stst.cpm-ant": {
+ "*": {
+ "repo": "openbmb/cpm-ant-10b",
+ "pkg": {
+ "0": {
+ "transformers": "CpmAntModel"
+ }
+ },
+ "tasks": [
+ "CpmAntForCausalLM",
+ "CpmAntModel",
+ "CpmAntPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.csm": {
+ "*": {
+ "repo": "sesame/csm-1b",
+ "pkg": {
+ "0": {
+ "transformers": "CsmForConditionalGeneration"
+ }
+ },
+ "tasks": [
+ "CsmPreTrainedModel",
+ "CsmBackboneModel",
+ "CsmDepthDecoderModel",
+ "CsmDepthDecoderForCausalLM",
+ "CsmForConditionalGeneration"
+ ]
+ }
+ },
+ "info.art.ctrl": {
+ "*": {
+ "repo": "Salesforce/ctrl",
+ "pkg": {
+ "0": {
+ "transformers": "CTRLModel"
+ }
+ },
+ "tasks": [
+ "CTRLForSequenceClassification",
+ "CTRLLMHeadModel",
+ "CTRLModel",
+ "CTRLPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.cvt-13": {
+ "*": {
+ "repo": "microsoft/cvt-13",
+ "pkg": {
+ "0": {
+ "transformers": "CvtModel"
+ }
+ },
+ "tasks": [
+ "CvtForImageClassification",
+ "CvtModel",
+ "CvtPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.cwm": {
+ "*": {
+ "repo": "facebook/cwm",
+ "pkg": {
+ "0": {
+ "transformers": "CwmModel"
+ }
+ },
+ "tasks": [
+ "CwmPreTrainedModel",
+ "CwmModel",
+ "CwmForCausalLM"
+ ]
+ }
+ },
+ "info.detr.dfine-x-coco": {
+ "*": {
+ "repo": "ustc-community/dfine-xlarge-coco",
+ "pkg": {
+ "0": {
+ "transformers": "DFineModel"
+ }
+ },
+ "tasks": [
+ "DFineModel",
+ "DFinePreTrainedModel",
+ "DFineForObjectDetection"
+ ]
+ }
+ },
+ "info.detr.dab-detr": {
+ "*": {
+ "repo": "IDEA-Research/dab-detr-resnet-50",
+ "pkg": {
+ "0": {
+ "transformers": "DabDetrModel"
+ }
+ },
+ "tasks": [
+ "DabDetrForObjectDetection",
+ "DabDetrModel",
+ "DabDetrPreTrainedModel"
+ ]
+ }
+ },
+ "info.gan.dac": {
+ "*": {
+ "repo": "descript/dac_16khz",
+ "pkg": {
+ "0": {
+ "transformers": "DacModel"
+ }
+ },
+ "tasks": [
+ "DacModel",
+ "DacPreTrainedModel"
+ ]
+ }
+ },
+ "info.aet.data2vec-audio-960h": {
+ "*": {
+ "repo": "facebook/data2vec-audio-base-960h",
+ "pkg": {
+ "0": {
+ "transformers": "Data2VecAudioModel"
+ }
+ },
+ "tasks": [
+ "Data2VecAudioForAudioFrameClassification",
+ "Data2VecAudioForCTC",
+ "Data2VecAudioForSequenceClassification",
+ "Data2VecAudioForXVector",
+ "Data2VecAudioModel",
+ "Data2VecAudioPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.data2vec-text": {
+ "*": {
+ "repo": "facebook/data2vec-text-base",
+ "pkg": {
+ "0": {
+ "transformers": "Data2VecTextModel"
+ }
+ },
+ "tasks": [
+ "Data2VecTextForCausalLM",
+ "Data2VecTextForMaskedLM",
+ "Data2VecTextForMultipleChoice",
+ "Data2VecTextForQuestionAnswering",
+ "Data2VecTextForSequenceClassification",
+ "Data2VecTextForTokenClassification",
+ "Data2VecTextModel",
+ "Data2VecTextPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.data2vec-vision": {
+ "*": {
+ "repo": "facebook/data2vec-vision-base",
+ "pkg": {
+ "0": {
+ "transformers": "Data2VecVisionModel"
+ }
+ },
+ "tasks": [
+ "Data2VecVisionForImageClassification",
+ "Data2VecVisionForSemanticSegmentation",
+ "Data2VecVisionModel",
+ "Data2VecVisionPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.dbrx": {
+ "*": {
+ "repo": "databricks/dbrx-instruct",
+ "pkg": {
+ "0": {
+ "transformers": "DbrxModel"
+ }
+ },
+ "tasks": [
+ "DbrxForCausalLM",
+ "DbrxModel",
+ "DbrxPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.deberta": {
+ "*": {
+ "repo": "microsoft/deberta-base",
+ "pkg": {
+ "0": {
+ "transformers": "DebertaModel"
+ }
+ },
+ "tasks": [
+ "DebertaForMaskedLM",
+ "DebertaForQuestionAnswering",
+ "DebertaForSequenceClassification",
+ "DebertaForTokenClassification",
+ "DebertaModel",
+ "DebertaPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.deberta-v2-x": {
+ "*": {
+ "repo": "microsoft/deberta-v2-xlarge",
+ "pkg": {
+ "0": {
+ "transformers": "DebertaV2Model"
+ }
+ },
+ "tasks": [
+ "DebertaV2ForMaskedLM",
+ "DebertaV2ForMultipleChoice",
+ "DebertaV2ForQuestionAnswering",
+ "DebertaV2ForSequenceClassification",
+ "DebertaV2ForTokenClassification",
+ "DebertaV2Model",
+ "DebertaV2PreTrainedModel"
+ ]
+ }
+ },
+ "info.art.decision-transformer-gym-hopper": {
+ "*": {
+ "repo": "edbeeching/decision-transformer-gym-hopper-medium",
+ "pkg": {
+ "0": {
+ "transformers": "DecisionTransformerModel"
+ }
+ },
+ "tasks": [
+ "DecisionTransformerGPT2Model",
+ "DecisionTransformerGPT2PreTrainedModel",
+ "DecisionTransformerModel",
+ "DecisionTransformerPreTrainedModel"
+ ]
+ }
+ },
+ "info.moe.deepseek-v2-lite": {
+ "*": {
+ "repo": "deepseek-ai/DeepSeek-V2-Lite",
+ "pkg": {
+ "0": {
+ "transformers": "DeepseekV2Model"
+ }
+ },
+ "tasks": [
+ "DeepseekV2PreTrainedModel",
+ "DeepseekV2Model",
+ "DeepseekV2ForCausalLM",
+ "DeepseekV2ForSequenceClassification"
+ ]
+ }
+ },
+ "info.moe.deepseek-v3": {
+ "*": {
+ "repo": "bzantium/tiny-deepseek-v3",
+ "pkg": {
+ "0": {
+ "transformers": "DeepseekV3Model"
+ }
+ },
+ "tasks": [
+ "DeepseekV3PreTrainedModel",
+ "DeepseekV3Model",
+ "DeepseekV3ForCausalLM",
+ "DeepseekV3ForSequenceClassification",
+ "DeepseekV3ForTokenClassification"
+ ]
+ }
+ },
+ "info.vit.deepseek-vl-chat": {
+ "*": {
+ "repo": "deepseek-community/deepseek-vl-1.3b-chat",
+ "pkg": {
+ "0": {
+ "transformers": "DeepseekVLModel"
+ }
+ },
+ "tasks": [
+ "DeepseekVLPreTrainedModel",
+ "DeepseekVLModel",
+ "DeepseekVLForConditionalGeneration"
+ ]
+ }
+ },
+ "info.detr.deformable-detr": {
+ "*": {
+ "repo": "SenseTime/deformable-detr",
+ "pkg": {
+ "0": {
+ "transformers": "DeformableDetrModel"
+ }
+ },
+ "tasks": [
+ "DeformableDetrForObjectDetection",
+ "DeformableDetrModel",
+ "DeformableDetrPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.deit-distilled-patch16-224": {
+ "*": {
+ "repo": "facebook/deit-base-distilled-patch16-224",
+ "pkg": {
+ "0": {
+ "transformers": "DeiTModel"
+ }
+ },
+ "tasks": [
+ "DeiTForImageClassification",
+ "DeiTForImageClassificationWithTeacher",
+ "DeiTForMaskedImageModeling",
+ "DeiTModel",
+ "DeiTPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.depth": {
+ "*": {
+ "repo": "apple/DepthPro",
+ "pkg": {
+ "0": {
+ "transformers": "DepthProModel"
+ }
+ },
+ "tasks": [
+ "DepthProPreTrainedModel",
+ "DepthProModel",
+ "DepthProForDepthEstimation"
+ ]
+ }
+ },
+ "info.detr.detr-resnet-50": {
+ "*": {
+ "repo": "facebook/detr-resnet-50",
+ "pkg": {
+ "0": {
+ "transformers": "DetrModel"
+ }
+ },
+ "tasks": [
+ "DetrForObjectDetection",
+ "DetrForSegmentation",
+ "DetrModel",
+ "DetrPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.dia": {
+ "*": {
+ "repo": "nari-labs/Dia-1.6B",
+ "pkg": {
+ "0": {
+ "transformers": "DiaModel"
+ }
+ },
+ "tasks": [
+ "DiaModel",
+ "DiaPreTrainedModel",
+ "DiaForConditionalGeneration"
+ ]
+ }
+ },
+ "info.stst.diffllama-handcut": {
+ "*": {
+ "repo": "kajuma/DiffLlama-0.3B-handcut",
+ "pkg": {
+ "0": {
+ "transformers": "DiffLlamaModel"
+ }
+ },
+ "tasks": [
+ "DiffLlamaPreTrainedModel",
+ "DiffLlamaModel",
+ "DiffLlamaForCausalLM",
+ "DiffLlamaForSequenceClassification",
+ "DiffLlamaForQuestionAnswering",
+ "DiffLlamaForTokenClassification"
+ ]
+ }
+ },
+ "info.gan.dinat-in-224": {
+ "*": {
+ "repo": "shi-labs/dinat-mini-in1k-224",
+ "pkg": {
+ "0": {
+ "transformers": "DinatModel"
+ }
+ },
+ "tasks": [
+ "DinatForImageClassification",
+ "DinatModel",
+ "DinatPreTrainedModel",
+ "DinatBackbone"
+ ]
+ }
+ },
+ "info.vit.dinov2-patch16-224": {
+ "*": {
+ "repo": "google/dinov2-base-patch16-224",
+ "pkg": {
+ "0": {
+ "transformers": "Dinov2Model"
+ }
+ },
+ "tasks": [
+ "Dinov2ForImageClassification",
+ "Dinov2Model",
+ "Dinov2PreTrainedModel",
+ "Dinov2Backbone"
+ ]
+ }
+ },
+ "info.vit.dinov2-with-registers": {
+ "*": {
+ "repo": "facebook/dinov2-with-registers-base",
+ "pkg": {
+ "0": {
+ "transformers": "Dinov2WithRegistersModel"
+ }
+ },
+ "tasks": [
+ "Dinov2WithRegistersPreTrainedModel",
+ "Dinov2WithRegistersModel",
+ "Dinov2WithRegistersForImageClassification",
+ "Dinov2WithRegistersBackbone"
+ ]
+ }
+ },
+ "info.vit.dinov3-convnext-pretrain-lvd": {
+ "*": {
+ "repo": "facebook/dinov3-convnext-tiny-pretrain-lvd1689m",
+ "pkg": {
+ "0": {
+ "transformers": "DINOv3ConvNextModel"
+ }
+ },
+ "tasks": [
+ "DINOv3ConvNextModel",
+ "DINOv3ConvNextPreTrainedModel",
+ "DINOv3ConvNextBackbone"
+ ]
+ }
+ },
+ "info.vit.dinov3-vits16-pretrain-lvd": {
+ "*": {
+ "repo": "facebook/dinov3-vits16-pretrain-lvd1689m",
+ "pkg": {
+ "0": {
+ "transformers": "DINOv3ViTModel"
+ }
+ },
+ "tasks": [
+ "DINOv3ViTModel",
+ "DINOv3ViTPreTrainedModel",
+ "DINOv3ViTBackbone"
+ ]
+ }
+ },
+ "info.art.distilbert-uncased": {
+ "*": {
+ "repo": "distilbert-base-uncased",
+ "pkg": {
+ "0": {
+ "transformers": "DistilBertModel"
+ }
+ },
+ "tasks": [
+ "DistilBertForMaskedLM",
+ "DistilBertForMultipleChoice",
+ "DistilBertForQuestionAnswering",
+ "DistilBertForSequenceClassification",
+ "DistilBertForTokenClassification",
+ "DistilBertModel",
+ "DistilBertPreTrainedModel"
+ ]
+ }
+ },
+ "info.moe.doge": {
+ "*": {
+ "repo": "SmallDoge/Doge-320M",
+ "pkg": {
+ "0": {
+ "transformers": "DogeModel"
+ }
+ },
+ "tasks": [
+ "DogeForCausalLM",
+ "DogeModel",
+ "DogePreTrainedModel",
+ "DogeForSequenceClassification"
+ ]
+ }
+ },
+ "info.vit.donut": {
+ "*": {
+ "repo": "naver-clova-ix/donut-base",
+ "pkg": {
+ "0": {
+ "transformers": "DonutSwinModel"
+ }
+ },
+ "tasks": [
+ "DonutSwinModel",
+ "DonutSwinPreTrainedModel",
+ "DonutSwinForImageClassification"
+ ]
+ }
+ },
+ "info.moe.dots-llm1": {
+ "*": {
+ "repo": "rednote-hilab/dots.llm1.base",
+ "pkg": {
+ "0": {
+ "transformers": "Dots1Model"
+ }
+ },
+ "tasks": [
+ "Dots1PreTrainedModel",
+ "Dots1Model",
+ "Dots1ForCausalLM"
+ ]
+ }
+ },
+ "info.vit.dpr-ctx-encoder-single-nq": {
+ "*": {
+ "repo": "facebook/dpr-ctx_encoder-single-nq-base",
+ "pkg": {
+ "0": {
+ "transformers": "DPRQuestionEncoder"
+ }
+ },
+ "tasks": [
+ "DPRContextEncoder",
+ "DPRPretrainedContextEncoder",
+ "DPRPreTrainedModel",
+ "DPRPretrainedQuestionEncoder",
+ "DPRPretrainedReader",
+ "DPRQuestionEncoder",
+ "DPRReader"
+ ]
+ }
+ },
+ "info.detr.dpt": {
+ "*": {
+ "repo": "Intel/dpt-large",
+ "pkg": {
+ "0": {
+ "transformers": "DPTModel"
+ }
+ },
+ "tasks": [
+ "DPTForDepthEstimation",
+ "DPTForSemanticSegmentation",
+ "DPTModel",
+ "DPTPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.edgetam1-hiera": {
+ "*": {
+ "repo": "facebook/edgetam.1-hiera-tiny",
+ "pkg": {
+ "0": {
+ "transformers": "EdgeTamModel"
+ }
+ },
+ "tasks": [
+ "EdgeTamModel",
+ "EdgeTamVisionModel",
+ "EdgeTamPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.edgetam": {
+ "*": {
+ "repo": "facebook/EdgeTAM",
+ "pkg": {
+ "0": {
+ "transformers": "EdgeTamVideoModel"
+ }
+ },
+ "tasks": [
+ "EdgeTamVideoModel",
+ "EdgeTamVideoInferenceSession",
+ "EdgeTamVideoPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.efficientloftr": {
+ "*": {
+ "repo": "zju-community/efficientloftr",
+ "pkg": {
+ "0": {
+ "transformers": "EfficientLoFTRModel"
+ }
+ },
+ "tasks": [
+ "EfficientLoFTRPreTrainedModel",
+ "EfficientLoFTRModel",
+ "EfficientLoFTRForKeypointMatching"
+ ]
+ }
+ },
+ "info.vit.efficientnet-b7": {
+ "*": {
+ "repo": "google/efficientnet-b7",
+ "pkg": {
+ "0": {
+ "transformers": "EfficientNetModel"
+ }
+ },
+ "tasks": [
+ "EfficientNetForImageClassification",
+ "EfficientNetModel",
+ "EfficientNetPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.electra-discriminator": {
+ "*": {
+ "repo": "google/electra-small-discriminator",
+ "pkg": {
+ "0": {
+ "transformers": "ElectraModel"
+ }
+ },
+ "tasks": [
+ "ElectraForCausalLM",
+ "ElectraForMaskedLM",
+ "ElectraForMultipleChoice",
+ "ElectraForPreTraining",
+ "ElectraForQuestionAnswering",
+ "ElectraForSequenceClassification",
+ "ElectraForTokenClassification",
+ "ElectraModel",
+ "ElectraPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.emu3-chat-hf": {
+ "*": {
+ "repo": "Emu3-community/Emu3-Chat-hf",
+ "pkg": {
+ "0": {
+ "transformers": "Emu3Model"
+ }
+ },
+ "tasks": [
+ "Emu3ForConditionalGeneration",
+ "Emu3ForCausalLM",
+ "Emu3TextModel",
+ "Emu3PreTrainedModel",
+ "Emu3VQVAE",
+ "Emu3Model"
+ ]
+ }
+ },
+ "info.gan.encodec": {
+ "*": {
+ "repo": "facebook/encodec_24khz",
+ "pkg": {
+ "0": {
+ "transformers": "EncodecModel"
+ }
+ },
+ "tasks": [
+ "EncodecModel",
+ "EncodecPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.ernie-3-zh": {
+ "*": {
+ "repo": "nghuyong/ernie-3.0-base-zh",
+ "pkg": {
+ "0": {
+ "transformers": "ErnieModel"
+ }
+ },
+ "tasks": [
+ "ErnieForCausalLM",
+ "ErnieForMaskedLM",
+ "ErnieForMultipleChoice",
+ "ErnieForNextSentencePrediction",
+ "ErnieForPreTraining",
+ "ErnieForQuestionAnswering",
+ "ErnieForSequenceClassification",
+ "ErnieForTokenClassification",
+ "ErnieModel",
+ "ErniePreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.ernie-45-pt": {
+ "*": {
+ "repo": "baidu/ERNIE-4.5-0.3B-PT",
+ "pkg": {
+ "0": {
+ "transformers": "Ernie4_5Model"
+ }
+ },
+ "tasks": [
+ "Ernie4_5ForCausalLM",
+ "Ernie4_5Model",
+ "Ernie4_5PreTrainedModel"
+ ]
+ }
+ },
+ "info.moe.ernie-4-a-pt": {
+ "*": {
+ "repo": "baidu/ERNIE-4.5-21B-A3B-PT",
+ "pkg": {
+ "0": {
+ "transformers": "Ernie4_5_MoeModel"
+ }
+ },
+ "tasks": [
+ "Ernie4_5_MoeForCausalLM",
+ "Ernie4_5_MoeModel",
+ "Ernie4_5_MoePreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.ernie-4-vl-a-pt": {
+ "*": {
+ "repo": "baidu/ERNIE-4.5-VL-28B-A3B-PT",
+ "pkg": {
+ "0": {
+ "transformers": "Ernie4_5_VL_MoeModel"
+ }
+ },
+ "tasks": [
+ "Ernie4_5_VL_MoePreTrainedModel",
+ "Ernie4_5_VL_MoeForConditionalGeneration",
+ "Ernie4_5_VL_MoeModel",
+ "Ernie4_5_VL_MoeTextModel",
+ "Ernie4_5_VL_MoeVisionTransformerPretrainedModel",
+ "Ernie4_5_VL_MoeVariableResolutionResamplerModel"
+ ]
+ }
+ },
+ "info.aet.esm": {
+ "*": {
+ "repo": "facebook/esm-1b",
+ "pkg": {
+ "0": {
+ "transformers": "EsmModel"
+ }
+ },
+ "tasks": [
+ "EsmForMaskedLM",
+ "EsmForSequenceClassification",
+ "EsmForTokenClassification",
+ "EsmModel",
+ "EsmPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.evolla-hf": {
+ "*": {
+ "repo": "westlake-repl/Evolla-10B-hf",
+ "pkg": {
+ "0": {
+ "transformers": "EvollaModel"
+ }
+ },
+ "tasks": [
+ "EvollaForProteinText2Text",
+ "EvollaModel",
+ "EvollaPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.exaone-4": {
+ "*": {
+ "repo": "LGAI-EXAONE/EXAONE-4.0-32B",
+ "pkg": {
+ "0": {
+ "transformers": "Exaone4Model"
+ }
+ },
+ "tasks": [
+ "Exaone4PreTrainedModel",
+ "Exaone4Model",
+ "Exaone4ForCausalLM",
+ "Exaone4ForSequenceClassification",
+ "Exaone4ForTokenClassification",
+ "Exaone4ForQuestionAnswering"
+ ]
+ }
+ },
+ "info.ssm.falcon": {
+ "*": {
+ "repo": "tiiuae/falcon-7b",
+ "pkg": {
+ "0": {
+ "transformers": "FalconModel"
+ }
+ },
+ "tasks": [
+ "FalconForCausalLM",
+ "FalconModel",
+ "FalconPreTrainedModel",
+ "FalconForSequenceClassification",
+ "FalconForTokenClassification",
+ "FalconForQuestionAnswering"
+ ]
+ }
+ },
+ "info.ssm.falconh1-t-hf": {
+ "*": {
+ "repo": "tiiuae/Falcon-H1-34B-Instruct",
+ "pkg": {
+ "0": {
+ "transformers": "FalconH1Model"
+ }
+ },
+ "tasks": [
+ "FalconH1Model",
+ "FalconH1ForCausalLM",
+ "FalconH1PreTrainedModel"
+ ]
+ }
+ },
+ "info.ssm.falcon-mamba": {
+ "*": {
+ "repo": "tiiuae/falcon-mamba-7b",
+ "pkg": {
+ "0": {
+ "transformers": "FalconMambaModel"
+ }
+ },
+ "tasks": [
+ "FalconMambaForCausalLM",
+ "FalconMambaModel",
+ "FalconMambaPreTrainedModel",
+ "FalconMambaCache"
+ ]
+ }
+ },
+ "info.vit.fastvlm": {
+ "*": {
+ "repo": "KamilaMila/FastVLM-7B",
+ "pkg": {
+ "0": {
+ "transformers": "FastVlmModel"
+ }
+ },
+ "tasks": [
+ "FastVlmForConditionalGeneration",
+ "FastVlmModel",
+ "FastVlmPreTrainedModel"
+ ]
+ }
+ },
+ "info.aet.fastspeech2-conformer": {
+ "*": {
+ "repo": "espnet/fastspeech2_conformer",
+ "pkg": {
+ "0": {
+ "transformers": "FastSpeech2ConformerModel"
+ }
+ },
+ "tasks": [
+ "FastSpeech2ConformerWithHifiGan",
+ "FastSpeech2ConformerHifiGan",
+ "FastSpeech2ConformerModel",
+ "FastSpeech2ConformerPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.fastspeech2-conformer": {
+ "*": {
+ "repo": "espnet/fastspeech2_conformer",
+ "pkg": {
+ "0": {
+ "transformers": "FastSpeech2ConformerWithHifiGan"
+ }
+ },
+ "tasks": [
+ "FastSpeech2ConformerWithHifiGan",
+ "FastSpeech2ConformerHifiGan",
+ "FastSpeech2ConformerModel",
+ "FastSpeech2ConformerPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.flaubert-uncased": {
+ "*": {
+ "repo": "flaubert/flaubert_base_uncased",
+ "pkg": {
+ "0": {
+ "transformers": "FlaubertModel"
+ }
+ },
+ "tasks": [
+ "FlaubertForMultipleChoice",
+ "FlaubertForQuestionAnswering",
+ "FlaubertForQuestionAnsweringSimple",
+ "FlaubertForSequenceClassification",
+ "FlaubertForTokenClassification",
+ "FlaubertModel",
+ "FlaubertWithLMHeadModel",
+ "FlaubertPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.flava": {
+ "*": {
+ "repo": "facebook/flava-full",
+ "pkg": {
+ "0": {
+ "transformers": "FlavaModel"
+ }
+ },
+ "tasks": [
+ "FlavaForPreTraining",
+ "FlavaImageCodebook",
+ "FlavaImageModel",
+ "FlavaModel",
+ "FlavaMultimodalModel",
+ "FlavaPreTrainedModel",
+ "FlavaTextModel"
+ ]
+ }
+ },
+ "info.moe.flexolmo-7x-1t": {
+ "*": {
+ "repo": "allenai/FlexOlmo-7x7B-1T",
+ "pkg": {
+ "0": {
+ "transformers": "FlexOlmoModel"
+ }
+ },
+ "tasks": [
+ "FlexOlmoForCausalLM",
+ "FlexOlmoModel",
+ "FlexOlmoPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.florence-2": {
+ "*": {
+ "repo": "florence-community/Florence-2-base",
+ "pkg": {
+ "0": {
+ "transformers": "Florence2Model"
+ }
+ },
+ "tasks": [
+ "Florence2Model",
+ "Florence2ForConditionalGeneration",
+ "Florence2PreTrainedModel",
+ "Florence2VisionBackbone",
+ "Florence2VisionPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.fnet": {
+ "*": {
+ "repo": "google/fnet-base",
+ "pkg": {
+ "0": {
+ "transformers": "FNetModel"
+ }
+ },
+ "tasks": [
+ "FNetForMaskedLM",
+ "FNetForMultipleChoice",
+ "FNetForNextSentencePrediction",
+ "FNetForPreTraining",
+ "FNetForQuestionAnswering",
+ "FNetForSequenceClassification",
+ "FNetForTokenClassification",
+ "FNetLayer",
+ "FNetModel",
+ "FNetPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.focalnet": {
+ "*": {
+ "repo": "microsoft/focalnet-tiny",
+ "pkg": {
+ "0": {
+ "transformers": "FocalNetModel"
+ }
+ },
+ "tasks": [
+ "FocalNetForImageClassification",
+ "FocalNetForMaskedImageModeling",
+ "FocalNetBackbone",
+ "FocalNetModel",
+ "FocalNetPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.wmt19-en-ru": {
+ "*": {
+ "repo": "facebook/wmt19-en-ru",
+ "pkg": {
+ "0": {
+ "transformers": "FSMTModel"
+ }
+ },
+ "tasks": [
+ "FSMTForConditionalGeneration",
+ "FSMTModel",
+ "PretrainedFSMTModel"
+ ]
+ }
+ },
+ "info.aet.funnel": {
+ "*": {
+ "repo": "funnel-transformer/small",
+ "pkg": {
+ "0": {
+ "transformers": "FunnelModel"
+ }
+ },
+ "tasks": [
+ "FunnelBaseModel",
+ "FunnelForMaskedLM",
+ "FunnelForMultipleChoice",
+ "FunnelForPreTraining",
+ "FunnelForQuestionAnswering",
+ "FunnelForSequenceClassification",
+ "FunnelForTokenClassification",
+ "FunnelModel",
+ "FunnelPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.fuyu": {
+ "*": {
+ "repo": "adept/fuyu-8b",
+ "pkg": {
+ "0": {
+ "transformers": "FuyuModel"
+ }
+ },
+ "tasks": [
+ "FuyuForCausalLM",
+ "FuyuPreTrainedModel",
+ "FuyuModel"
+ ]
+ }
+ },
+ "info.stst.gemma": {
+ "*": {
+ "repo": "google/gemma-7b",
+ "pkg": {
+ "0": {
+ "transformers": "GemmaModel"
+ }
+ },
+ "file_256": [
+ "01676b4c6e765f737a5e9854a315de3887e939c370cae116d505777729099a68"
+ ],
+ "layer_b3": [
+ "438d82c867240f194a4e15798eef2886a911c8f57fa2d9f4ffad1d56e7bd1ccf",
+ "1de38e09f5f2c5345de48b8cd4dddcfff3e341cc0059752446e186b3863f0981"
+ ],
+ "layer_256": [
+ "e4835a72d582b4ae066d6ff0519f2ee9f8b21fb02e8c28d8eaa317f8d1e9ea75",
+ "1657c7180b48672004f4463308dfdd56d92eedeb23d1408ea766985ca208e5aa"
+ ],
+ "tasks": [
+ "GemmaModel",
+ "GemmaForCausalLM",
+ "GemmaForSequenceClassification",
+ "GemmaForTokenClassification",
+ "GemmaPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.gemma2": {
+ "*": {
+ "repo": "google/gemma-2-9b",
+ "pkg": {
+ "0": {
+ "transformers": "Gemma2Model"
+ }
+ },
+ "file_256": [
+ "e909230aabafad02d097c7dc02f2ae062b4e6b0593477c1f07679d277e09ce71",
+ "d61628bc793240439e608c5ae744f55ec8770f684abb63602648a24cb6da60bc"
+ ],
+ "layer_b3": [
+ "55a3c812ac0832d154867f5927365bcc776926e48e65f7f35a81fc11f4bb81da",
+ "543572889beb25cad83a43ce70cdd255d2c82951d6595e8c97ff62fd05871c99"
+ ],
+ "layer_256": [
+ "a0d820c39578cf888f398579d9a00d69b31c81e049795ba70008dad8fe5b3a33",
+ "abc83b04a04467579ea1952a7efbdd252b8641ac0e2a6a9be2a5a73e371111d6"
+ ],
+ "tasks": [
+ "Gemma2ForCausalLM",
+ "Gemma2Model",
+ "Gemma2PreTrainedModel",
+ "Gemma2ForSequenceClassification",
+ "Gemma2ForTokenClassification"
+ ]
+ }
+ },
+ "info.vit.gemma-3": {
+ "*": {
+ "repo": "google/gemma-3-4b-it",
+ "pkg": {
+ "0": {
+ "transformers": "Gemma3Model"
+ }
+ },
+ "tasks": [
+ "Gemma3PreTrainedModel",
+ "Gemma3TextModel",
+ "Gemma3ForCausalLM",
+ "Gemma3ForConditionalGeneration",
+ "Gemma3Model",
+ "Gemma3ForSequenceClassification",
+ "Gemma3TextForSequenceClassification"
+ ]
+ }
+ },
+ "info.stst.gemma3-text": {
+ "*": {
+ "repo": "google/gemma-3-12b-it",
+ "pkg": {
+ "0": {
+ "transformers": "Gemma3TextModel"
+ }
+ },
+ "tasks": [
+ "Gemma3PreTrainedModel",
+ "Gemma3TextModel",
+ "Gemma3ForCausalLM",
+ "Gemma3ForConditionalGeneration",
+ "Gemma3Model",
+ "Gemma3ForSequenceClassification",
+ "Gemma3TextForSequenceClassification"
+ ]
+ }
+ },
+ "info.vit.gemma-3n-e": {
+ "*": {
+ "repo": "google/gemma-3n-E4B",
+ "pkg": {
+ "0": {
+ "transformers": "Gemma3nModel"
+ }
+ },
+ "tasks": [
+ "Gemma3nAudioEncoder",
+ "Gemma3nForCausalLM",
+ "Gemma3nForConditionalGeneration",
+ "Gemma3nModel",
+ "Gemma3nPreTrainedModel",
+ "Gemma3nTextModel"
+ ]
+ }
+ },
+ "info.art.gemma-3n-e": {
+ "*": {
+ "repo": "google/gemma-3n-E4B",
+ "pkg": {
+ "0": {
+ "transformers": "Gemma3nAudioEncoder"
+ }
+ },
+ "tasks": [
+ "Gemma3nAudioEncoder",
+ "Gemma3nForCausalLM",
+ "Gemma3nForConditionalGeneration",
+ "Gemma3nModel",
+ "Gemma3nPreTrainedModel",
+ "Gemma3nTextModel"
+ ]
+ }
+ },
+ "info.stst.gemma-3n-e": {
+ "*": {
+ "repo": "google/gemma-3n-E4B",
+ "pkg": {
+ "0": {
+ "transformers": "Gemma3nTextModel"
+ }
+ },
+ "tasks": [
+ "Gemma3nAudioEncoder",
+ "Gemma3nForCausalLM",
+ "Gemma3nForConditionalGeneration",
+ "Gemma3nModel",
+ "Gemma3nPreTrainedModel",
+ "Gemma3nTextModel"
+ ]
+ }
+ },
+ "info.vit.git": {
+ "*": {
+ "repo": "microsoft/git-base",
+ "pkg": {
+ "0": {
+ "transformers": "GitModel"
+ }
+ },
+ "tasks": [
+ "GitForCausalLM",
+ "GitModel",
+ "GitPreTrainedModel",
+ "GitVisionModel"
+ ]
+ }
+ },
+ "info.stst.glm-4-chat": {
+ "*": {
+ "repo": "zai-org/glm-4-9b-chat",
+ "pkg": {
+ "0": {
+ "transformers": "GlmModel"
+ }
+ },
+ "tasks": [
+ "GlmPreTrainedModel",
+ "GlmModel",
+ "GlmForCausalLM",
+ "GlmForSequenceClassification",
+ "GlmForTokenClassification"
+ ]
+ }
+ },
+ "info.stst.glm-4-0414": {
+ "*": {
+ "repo": "zai-org/GLM-4-9B-0414",
+ "pkg": {
+ "0": {
+ "transformers": "Glm4Model"
+ }
+ },
+ "tasks": [
+ "Glm4PreTrainedModel",
+ "Glm4Model",
+ "Glm4ForCausalLM",
+ "Glm4ForSequenceClassification",
+ "Glm4ForTokenClassification"
+ ]
+ }
+ },
+ "info.vit.glm-4v-thinking": {
+ "*": {
+ "repo": "zai-org/GLM-4.1V-9B-Thinking",
+ "pkg": {
+ "0": {
+ "transformers": "Glm46VModel"
+ }
+ },
+ "tasks": [
+ "Glm46VModel",
+ "Glm46VPreTrainedModel",
+ "Glm46VForConditionalGeneration"
+ ]
+ }
+ },
+ "info.moe.glm-4-a": {
+ "*": {
+ "repo": "zai-org/GLM-4.5-Air",
+ "pkg": {
+ "0": {
+ "transformers": "Glm4MoeModel"
+ }
+ },
+ "tasks": [
+ "Glm4MoePreTrainedModel",
+ "Glm4MoeModel",
+ "Glm4MoeForCausalLM"
+ ]
+ }
+ },
+ "info.vit.glm-4v": {
+ "*": {
+ "repo": "zai-org/GLM-4.5V",
+ "pkg": {
+ "0": {
+ "transformers": "Glm4vMoeModel"
+ }
+ },
+ "tasks": [
+ "Glm4vMoeForConditionalGeneration",
+ "Glm4vMoeModel",
+ "Glm4vMoePreTrainedModel",
+ "Glm4vMoeTextModel",
+ "Glm4vMoeVisionModel"
+ ]
+ }
+ },
+ "info.moe.glm-4v": {
+ "*": {
+ "repo": "zai-org/GLM-4.5V",
+ "pkg": {
+ "0": {
+ "transformers": "Glm4vMoeTextModel"
+ }
+ },
+ "tasks": [
+ "Glm4vMoeForConditionalGeneration",
+ "Glm4vMoeModel",
+ "Glm4vMoePreTrainedModel",
+ "Glm4vMoeTextModel",
+ "Glm4vMoeVisionModel"
+ ]
+ }
+ },
+ "info.stst.glm-4v-thinking": {
+ "*": {
+ "repo": "zai-org/GLM-4.1V-9B-Thinking",
+ "pkg": {
+ "0": {
+ "transformers": "Glm4vTextModel"
+ }
+ },
+ "tasks": [
+ "Glm4vForConditionalGeneration",
+ "Glm4vModel",
+ "Glm4vPreTrainedModel",
+ "Glm4vTextModel",
+ "Glm4vVisionModel"
+ ]
+ }
+ },
+ "info.stst.glm-asr-nano-2512": {
+ "*": {
+ "repo": "zai-org/GLM-ASR-Nano-2512",
+ "pkg": {
+ "0": {
+ "transformers": "GlmAsrForConditionalGeneration"
+ }
+ },
+ "tasks": [
+ "GlmAsrEncoder",
+ "GlmAsrForConditionalGeneration",
+ "GlmAsrPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.glpn-kitti": {
+ "*": {
+ "repo": "vinvino02/glpn-kitti",
+ "pkg": {
+ "0": {
+ "transformers": "GLPNModel"
+ }
+ },
+ "tasks": [
+ "GLPNForDepthEstimation",
+ "GLPNLayer",
+ "GLPNModel",
+ "GLPNPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.got-ocr-2-hf": {
+ "*": {
+ "repo": "stepfun-ai/GOT-OCR-2.0-hf",
+ "pkg": {
+ "0": {
+ "transformers": "GotOcr2Model"
+ }
+ },
+ "tasks": [
+ "GotOcr2PreTrainedModel",
+ "GotOcr2Model",
+ "GotOcr2ForConditionalGeneration"
+ ]
+ }
+ },
+ "info.art.gpt2": {
+ "*": {
+ "repo": "openai-community/gpt2",
+ "pkg": {
+ "0": {
+ "transformers": "GPT2Model"
+ }
+ },
+ "tasks": [
+ "GPT2DoubleHeadsModel",
+ "GPT2ForQuestionAnswering",
+ "GPT2ForSequenceClassification",
+ "GPT2ForTokenClassification",
+ "GPT2LMHeadModel",
+ "GPT2Model",
+ "GPT2PreTrainedModel"
+ ]
+ }
+ },
+ "info.art.gpt-bigcode-santacoder": {
+ "*": {
+ "repo": "bigcode/gpt_bigcode-santacoder",
+ "pkg": {
+ "0": {
+ "transformers": "GPTBigCodeModel"
+ }
+ },
+ "tasks": [
+ "GPTBigCodeForSequenceClassification",
+ "GPTBigCodeForTokenClassification",
+ "GPTBigCodeForCausalLM",
+ "GPTBigCodeModel",
+ "GPTBigCodePreTrainedModel"
+ ]
+ }
+ },
+ "info.art.gpt-neo": {
+ "*": {
+ "repo": "EleutherAI/gpt-neo-1.3B",
+ "pkg": {
+ "0": {
+ "transformers": "GPTNeoModel"
+ }
+ },
+ "tasks": [
+ "GPTNeoForCausalLM",
+ "GPTNeoForQuestionAnswering",
+ "GPTNeoForSequenceClassification",
+ "GPTNeoForTokenClassification",
+ "GPTNeoModel",
+ "GPTNeoPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.gpt-neox": {
+ "*": {
+ "repo": "EleutherAI/gpt-neox-20b",
+ "pkg": {
+ "0": {
+ "transformers": "GPTNeoXModel"
+ }
+ },
+ "tasks": [
+ "GPTNeoXForCausalLM",
+ "GPTNeoXForQuestionAnswering",
+ "GPTNeoXForSequenceClassification",
+ "GPTNeoXForTokenClassification",
+ "GPTNeoXLayer",
+ "GPTNeoXModel",
+ "GPTNeoXPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.gpt-neox-japanese": {
+ "*": {
+ "repo": "abeja/gpt-neox-japanese-2.7b",
+ "pkg": {
+ "0": {
+ "transformers": "GPTNeoXJapaneseModel"
+ }
+ },
+ "tasks": [
+ "GPTNeoXJapaneseForCausalLM",
+ "GPTNeoXJapaneseLayer",
+ "GPTNeoXJapaneseModel",
+ "GPTNeoXJapanesePreTrainedModel"
+ ]
+ }
+ },
+ "info.moe.gpt-oss": {
+ "*": {
+ "repo": "openai/gpt-oss-120b",
+ "pkg": {
+ "0": {
+ "transformers": "GptOssModel"
+ }
+ },
+ "file_256": [
+ "68a8dc1f8e2e5996cb702f14332a25ddf3463daeab2df68e21ca09ef181203c3",
+ "a881aa5f561b26a22b14a8262aa61849ace349ffd73d74769e030ac90a1fcf8a"
+ ],
+ "layer_b3": [
+ "b52807536902cabbf84f99e4fa2f8713fb4ef77e739f06367ee0d486e3222faa",
+ "43c618018db1fd6e915dead610652da261d9058b73bc5355c85c6ac69af4d913",
+ "ab27ce7391b7fbd6ce3c319faa119afdac68f746af6a0ce2c3400a132f36f6ac"
+ ],
+ "layer_256": [
+ "de5dcad822be5ed6196f0f3f6965739993118d14db97b33a94a269f4f1b7a363",
+ "575f1977ed42d95a050e13dadaafc05a6d94c8aadca8364dca8a62aa4f2b146c"
+ ],
+ "tasks": [
+ "GptOssForCausalLM",
+ "GptOssForSequenceClassification",
+ "GptOssForTokenClassification",
+ "GptOssModel",
+ "GptOssPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.gpt-j": {
+ "*": {
+ "repo": "EleutherAI/gpt-j-6B",
+ "pkg": {
+ "0": {
+ "transformers": "GPTJModel"
+ }
+ },
+ "tasks": [
+ "GPTJForCausalLM",
+ "GPTJForQuestionAnswering",
+ "GPTJForSequenceClassification",
+ "GPTJModel",
+ "GPTJPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.granite": {
+ "*": {
+ "repo": "ibm-granite/granite-3.3-2b-base",
+ "pkg": {
+ "0": {
+ "transformers": "GraniteModel"
+ }
+ },
+ "tasks": [
+ "GraniteForCausalLM",
+ "GraniteModel",
+ "GranitePreTrainedModel"
+ ]
+ }
+ },
+ "info.moe.powermoe": {
+ "*": {
+ "repo": "ibm-research/PowerMoE-3b",
+ "pkg": {
+ "0": {
+ "transformers": "GraniteMoeModel"
+ }
+ },
+ "tasks": [
+ "GraniteMoeForCausalLM",
+ "GraniteMoeModel",
+ "GraniteMoePreTrainedModel"
+ ]
+ }
+ },
+ "info.ssm.granite-4-h": {
+ "*": {
+ "repo": "ibm-granite/granite-4.0-h-small",
+ "pkg": {
+ "0": {
+ "transformers": "GraniteMoeHybridModel"
+ }
+ },
+ "tasks": [
+ "GraniteMoeHybridForCausalLM",
+ "GraniteMoeHybridModel",
+ "GraniteMoeHybridPreTrainedModel"
+ ]
+ }
+ },
+ "info.moe.moe-active-shared-experts": {
+ "*": {
+ "repo": "ibm-research/moe-7b-1b-active-shared-experts",
+ "pkg": {
+ "0": {
+ "transformers": "GraniteMoeSharedModel"
+ }
+ },
+ "tasks": [
+ "GraniteMoeSharedForCausalLM",
+ "GraniteMoeSharedModel",
+ "GraniteMoeSharedPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.llava-v1-mistral-hf": {
+ "*": {
+ "repo": "llava-hf/llava-v1.6-mistral-7b-hf",
+ "pkg": {
+ "0": {
+ "transformers": "LlavaNextModel"
+ }
+ },
+ "tasks": [
+ "LlavaNextForConditionalGeneration",
+ "LlavaNextPreTrainedModel",
+ "LlavaNextModel"
+ ]
+ }
+ },
+ "info.detr.grounding-dino": {
+ "*": {
+ "repo": "IDEA-Research/grounding-dino-tiny",
+ "pkg": {
+ "0": {
+ "transformers": "GroundingDinoModel"
+ }
+ },
+ "tasks": [
+ "GroundingDinoForObjectDetection",
+ "GroundingDinoModel",
+ "GroundingDinoPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.groupvit-gcc-yfcc": {
+ "*": {
+ "repo": "nvidia/groupvit-gcc-yfcc",
+ "pkg": {
+ "0": {
+ "transformers": "GroupViTModel"
+ }
+ },
+ "tasks": [
+ "GroupViTModel",
+ "GroupViTPreTrainedModel",
+ "GroupViTTextModel",
+ "GroupViTVisionModel"
+ ]
+ }
+ },
+ "info.stst.helium": {
+ "*": {
+ "repo": "kyutai/helium-1-2b",
+ "pkg": {
+ "0": {
+ "transformers": "HeliumModel"
+ }
+ },
+ "tasks": [
+ "HeliumPreTrainedModel",
+ "HeliumModel",
+ "HeliumForCausalLM",
+ "HeliumForSequenceClassification",
+ "HeliumForTokenClassification"
+ ]
+ }
+ },
+ "info.vit.dfine-x-coco": {
+ "*": {
+ "repo": "ustc-community/dfine_x_coco",
+ "pkg": {
+ "0": {
+ "transformers": "HGNetV2Backbone"
+ }
+ },
+ "tasks": [
+ "HGNetV2Backbone",
+ "HGNetV2PreTrainedModel",
+ "HGNetV2ForImageClassification"
+ ]
+ }
+ },
+ "info.vit.hiera-224": {
+ "*": {
+ "repo": "facebook/hiera-base-224-hf",
+ "pkg": {
+ "0": {
+ "transformers": "HieraModel"
+ }
+ },
+ "tasks": [
+ "HieraForImageClassification",
+ "HieraForPreTraining",
+ "HieraBackbone",
+ "HieraModel",
+ "HieraPreTrainedModel"
+ ]
+ }
+ },
+ "info.aet.hubert-ls960": {
+ "*": {
+ "repo": "facebook/hubert-base-ls960",
+ "pkg": {
+ "0": {
+ "transformers": "HubertModel"
+ }
+ },
+ "tasks": [
+ "HubertForCTC",
+ "HubertForSequenceClassification",
+ "HubertModel",
+ "HubertPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.hunyuan": {
+ "*": {
+ "repo": "tencent/Hunyuan-7B-Instruct",
+ "pkg": {
+ "0": {
+ "transformers": "HunYuanDenseV1Model"
+ }
+ },
+ "tasks": [
+ "HunYuanDenseV1ForCausalLM",
+ "HunYuanDenseV1Model",
+ "HunYuanDenseV1PreTrainedModel",
+ "HunYuanDenseV1ForSequenceClassification"
+ ]
+ }
+ },
+ "info.moe.hunyuan-a": {
+ "*": {
+ "repo": "tencent/Hunyuan-A13B-Instruct",
+ "pkg": {
+ "0": {
+ "transformers": "HunYuanMoEV1Model"
+ }
+ },
+ "tasks": [
+ "HunYuanMoEV1ForCausalLM",
+ "HunYuanMoEV1Model",
+ "HunYuanMoEV1PreTrainedModel",
+ "HunYuanMoEV1ForSequenceClassification"
+ ]
+ }
+ },
+ "info.art.ibert-roberta": {
+ "*": {
+ "repo": "kssteven/ibert-roberta-base",
+ "pkg": {
+ "0": {
+ "transformers": "IBertModel"
+ }
+ },
+ "tasks": [
+ "IBertForMaskedLM",
+ "IBertForMultipleChoice",
+ "IBertForQuestionAnswering",
+ "IBertForSequenceClassification",
+ "IBertForTokenClassification",
+ "IBertModel",
+ "IBertPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.idefics": {
+ "*": {
+ "repo": "HuggingFaceM4/idefics-9b",
+ "pkg": {
+ "0": {
+ "transformers": "IdeficsModel"
+ }
+ },
+ "tasks": [
+ "IdeficsForVisionText2Text",
+ "IdeficsModel",
+ "IdeficsPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.idefics2": {
+ "*": {
+ "repo": "HuggingFaceM4/idefics2-8b",
+ "pkg": {
+ "0": {
+ "transformers": "Idefics2Model"
+ }
+ },
+ "tasks": [
+ "Idefics2ForConditionalGeneration",
+ "Idefics2PreTrainedModel",
+ "Idefics2Model"
+ ]
+ }
+ },
+ "info.vit.idefics3-llama3": {
+ "*": {
+ "repo": "HuggingFaceM4/Idefics3-8B-Llama3",
+ "pkg": {
+ "0": {
+ "transformers": "Idefics3Model"
+ }
+ },
+ "tasks": [
+ "Idefics3ForConditionalGeneration",
+ "Idefics3PreTrainedModel",
+ "Idefics3Model",
+ "Idefics3VisionTransformer"
+ ]
+ }
+ },
+ "info.vit.siglip-patch16-224": {
+ "*": {
+ "repo": "google/siglip-base-patch16-224",
+ "pkg": {
+ "0": {
+ "transformers": "Idefics3VisionTransformer"
+ }
+ },
+ "tasks": [
+ "Idefics3ForConditionalGeneration",
+ "Idefics3PreTrainedModel",
+ "Idefics3Model",
+ "Idefics3VisionTransformer"
+ ]
+ }
+ },
+ "info.vit.ijepa-vith14": {
+ "*": {
+ "repo": "facebook/ijepa_vith14_1k",
+ "pkg": {
+ "0": {
+ "transformers": "IJepaModel"
+ }
+ },
+ "tasks": [
+ "IJepaPreTrainedModel",
+ "IJepaModel",
+ "IJepaForImageClassification"
+ ]
+ }
+ },
+ "info.art.imagegpt": {
+ "*": {
+ "repo": "openai/imagegpt-small",
+ "pkg": {
+ "0": {
+ "transformers": "ImageGPTModel"
+ }
+ },
+ "tasks": [
+ "ImageGPTForCausalImageModeling",
+ "ImageGPTForImageClassification",
+ "ImageGPTModel",
+ "ImageGPTPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.informer-tourism-monthly": {
+ "*": {
+ "repo": "huggingface/informer-tourism-monthly",
+ "pkg": {
+ "0": {
+ "transformers": "InformerModel"
+ }
+ },
+ "tasks": [
+ "InformerForPrediction",
+ "InformerModel",
+ "InformerPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.blip-flan-t5": {
+ "*": {
+ "repo": "Salesforce/instructblip-flan-t5-xl",
+ "pkg": {
+ "0": {
+ "transformers": "InstructBlipModel"
+ }
+ },
+ "tasks": [
+ "InstructBlipQFormerModel",
+ "InstructBlipPreTrainedModel",
+ "InstructBlipModel",
+ "InstructBlipForConditionalGeneration",
+ "InstructBlipVisionModel"
+ ]
+ }
+ },
+ "info.vit.internvl3-hf": {
+ "*": {
+ "repo": "OpenGVLab/InternVL3-1B-hf",
+ "pkg": {
+ "0": {
+ "transformers": "InternVLModel"
+ }
+ },
+ "tasks": [
+ "InternVLVisionPreTrainedModel",
+ "InternVLVisionModel",
+ "InternVLPreTrainedModel",
+ "InternVLModel",
+ "InternVLForConditionalGeneration"
+ ]
+ }
+ },
+ "info.stst.jais-2-chat": {
+ "*": {
+ "repo": "inceptionai/Jais-2-8B-Chat",
+ "pkg": {
+ "0": {
+ "transformers": "Jais2Model"
+ }
+ },
+ "tasks": [
+ "Jais2Model",
+ "Jais2ForCausalLM",
+ "Jais2PreTrainedModel"
+ ]
+ }
+ },
+ "info.ssm.jamba-v0": {
+ "*": {
+ "repo": "ai21labs/Jamba-v0.1",
+ "pkg": {
+ "0": {
+ "transformers": "JambaModel"
+ }
+ },
+ "tasks": [
+ "JambaForCausalLM",
+ "JambaForSequenceClassification",
+ "JambaModel",
+ "JambaPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.janus": {
+ "*": {
+ "repo": "deepseek-community/Janus-Pro-1B",
+ "pkg": {
+ "0": {
+ "transformers": "JanusModel"
+ }
+ },
+ "tasks": [
+ "JanusPreTrainedModel",
+ "JanusForConditionalGeneration",
+ "JanusModel",
+ "JanusVQVAE",
+ "JanusVisionModel"
+ ]
+ }
+ },
+ "info.moe.jetmoe": {
+ "*": {
+ "repo": "jetmoe/jetmoe-8b",
+ "pkg": {
+ "0": {
+ "transformers": "JetMoeModel"
+ }
+ },
+ "tasks": [
+ "JetMoeForCausalLM",
+ "JetMoeModel",
+ "JetMoePreTrainedModel",
+ "JetMoeForSequenceClassification"
+ ]
+ }
+ },
+ "info.vit.kosmos-2-patch14-224": {
+ "*": {
+ "repo": "microsoft/kosmos-2-patch14-224",
+ "pkg": {
+ "0": {
+ "transformers": "Kosmos2Model"
+ }
+ },
+ "tasks": [
+ "Kosmos2ForConditionalGeneration",
+ "Kosmos2Model",
+ "Kosmos2PreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.kosmos-2": {
+ "*": {
+ "repo": "microsoft/kosmos-2.5",
+ "pkg": {
+ "0": {
+ "transformers": "Kosmos2_5Model"
+ }
+ },
+ "tasks": [
+ "Kosmos2_5ForConditionalGeneration",
+ "Kosmos2_5Model",
+ "Kosmos2_5PreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.stt-en-trfs": {
+ "*": {
+ "repo": "kyutai/stt-2.6b-en-trfs",
+ "pkg": {
+ "0": {
+ "transformers": "KyutaiSpeechToTextModel"
+ }
+ },
+ "tasks": [
+ "KyutaiSpeechToTextPreTrainedModel",
+ "KyutaiSpeechToTextModel",
+ "KyutaiSpeechToTextForConditionalGeneration"
+ ]
+ }
+ },
+ "info.aet.todo": {
+ "*": {
+ "repo": "TODO/TODO",
+ "pkg": {
+ "0": {
+ "transformers": "LasrForCTC"
+ }
+ },
+ "tasks": [
+ "LasrForCTC",
+ "LasrEncoder",
+ "LasrPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.todo": {
+ "*": {
+ "repo": "TODO/TODO",
+ "pkg": {
+ "0": {
+ "transformers": "LasrEncoder"
+ }
+ },
+ "tasks": [
+ "LasrForCTC",
+ "LasrEncoder",
+ "LasrPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.layoutlm-uncased": {
+ "*": {
+ "repo": "microsoft/layoutlm-base-uncased",
+ "pkg": {
+ "0": {
+ "transformers": "LayoutLMModel"
+ }
+ },
+ "tasks": [
+ "LayoutLMForMaskedLM",
+ "LayoutLMForSequenceClassification",
+ "LayoutLMForTokenClassification",
+ "LayoutLMForQuestionAnswering",
+ "LayoutLMModel",
+ "LayoutLMPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.layoutlmv2-uncased": {
+ "*": {
+ "repo": "microsoft/layoutlmv2-base-uncased",
+ "pkg": {
+ "0": {
+ "transformers": "LayoutLMv2Model"
+ }
+ },
+ "tasks": [
+ "LayoutLMv2ForQuestionAnswering",
+ "LayoutLMv2ForSequenceClassification",
+ "LayoutLMv2ForTokenClassification",
+ "LayoutLMv2Layer",
+ "LayoutLMv2Model",
+ "LayoutLMv2PreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.layoutlmv3": {
+ "*": {
+ "repo": "microsoft/layoutlmv3-base",
+ "pkg": {
+ "0": {
+ "transformers": "LayoutLMv3Model"
+ }
+ },
+ "tasks": [
+ "LayoutLMv3ForQuestionAnswering",
+ "LayoutLMv3ForSequenceClassification",
+ "LayoutLMv3ForTokenClassification",
+ "LayoutLMv3Model",
+ "LayoutLMv3PreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.led-16384": {
+ "*": {
+ "repo": "allenai/led-base-16384",
+ "pkg": {
+ "0": {
+ "transformers": "LEDModel"
+ }
+ },
+ "tasks": [
+ "LEDForConditionalGeneration",
+ "LEDForQuestionAnswering",
+ "LEDForSequenceClassification",
+ "LEDModel",
+ "LEDPreTrainedModel"
+ ]
+ }
+ },
+ "info.gan.levit-128s": {
+ "*": {
+ "repo": "facebook/levit-128S",
+ "pkg": {
+ "0": {
+ "transformers": "LevitModel"
+ }
+ },
+ "tasks": [
+ "LevitForImageClassification",
+ "LevitForImageClassificationWithTeacher",
+ "LevitModel",
+ "LevitPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.lfm": {
+ "*": {
+ "repo": "LiquidAI/LFM2-1.2B",
+ "pkg": {
+ "0": {
+ "transformers": "Lfm2Model"
+ }
+ },
+ "tasks": [
+ "Lfm2ForCausalLM",
+ "Lfm2Model",
+ "Lfm2PreTrainedModel"
+ ]
+ }
+ },
+ "info.moe.lfm2-a": {
+ "*": {
+ "repo": "LiquidAI/LFM2-8B-A1B",
+ "pkg": {
+ "0": {
+ "transformers": "Lfm2MoeModel"
+ }
+ },
+ "tasks": [
+ "Lfm2MoeForCausalLM",
+ "Lfm2MoeModel",
+ "Lfm2MoePreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.lfm2-vl": {
+ "*": {
+ "repo": "LiquidAI/LFM2-VL-1.6B",
+ "pkg": {
+ "0": {
+ "transformers": "Lfm2VlModel"
+ }
+ },
+ "tasks": [
+ "Lfm2VlForConditionalGeneration",
+ "Lfm2VlPreTrainedModel",
+ "Lfm2VlModel"
+ ]
+ }
+ },
+ "info.aet.lightglue-superpoint": {
+ "*": {
+ "repo": "ETH-CVG/lightglue_superpoint",
+ "pkg": {
+ "0": {
+ "transformers": "LightGlueForKeypointMatching"
+ }
+ },
+ "tasks": [
+ "LightGluePreTrainedModel",
+ "LightGlueForKeypointMatching"
+ ]
+ }
+ },
+ "info.art.lilt-roberta-en": {
+ "*": {
+ "repo": "SCUT-DLVCLab/lilt-roberta-en-base",
+ "pkg": {
+ "0": {
+ "transformers": "LiltModel"
+ }
+ },
+ "tasks": [
+ "LiltForQuestionAnswering",
+ "LiltForSequenceClassification",
+ "LiltForTokenClassification",
+ "LiltModel",
+ "LiltPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.llama-4-scout-16e": {
+ "*": {
+ "repo": "meta-llama/Llama-4-Scout-17B-16E",
+ "pkg": {
+ "0": {
+ "transformers": "Llama4ForConditionalGeneration"
+ }
+ },
+ "tasks": [
+ "Llama4PreTrainedModel",
+ "Llama4TextModel",
+ "Llama4VisionModel",
+ "Llama4ForCausalLM",
+ "Llama4ForConditionalGeneration"
+ ]
+ }
+ },
+ "info.moe.llama-4-scout-16e": {
+ "*": {
+ "repo": "meta-llama/Llama-4-Scout-17B-16E",
+ "pkg": {
+ "0": {
+ "transformers": "Llama4TextModel"
+ }
+ },
+ "tasks": [
+ "Llama4PreTrainedModel",
+ "Llama4TextModel",
+ "Llama4VisionModel",
+ "Llama4ForCausalLM",
+ "Llama4ForConditionalGeneration"
+ ]
+ }
+ },
+ "info.vit.llava": {
+ "*": {
+ "repo": "llava-hf/llava-9b",
+ "pkg": {
+ "0": {
+ "transformers": "LlavaModel"
+ }
+ },
+ "file_256": [
+ "f5ad57d3eda300a3195bc9c0bb36ab76ebe88831f128e9851e63440aff4a6741"
+ ],
+ "layer_b3": [
+ "d7d6ccb9dbba90b64e4cd259b6309e56708b3f4fbd6e9f85e9f0410e549133ef"
+ ],
+ "layer_256": [
+ "9969c41152aba689413b7f63888ecdc0c0badad2c2960e689ebc4c0e4a696c73"
+ ],
+ "tasks": [
+ "LlavaForConditionalGeneration",
+ "LlavaPreTrainedModel",
+ "LlavaModel"
+ ]
+ }
+ },
+ "info.vit.llava-next-video-hf": {
+ "*": {
+ "repo": "llava-hf/LLaVA-NeXT-Video-7B-hf",
+ "pkg": {
+ "0": {
+ "transformers": "LlavaNextVideoModel"
+ }
+ },
+ "tasks": [
+ "LlavaNextVideoForConditionalGeneration",
+ "LlavaNextVideoModel",
+ "LlavaNextVideoPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.llava-onevision-qwen2-ov-hf": {
+ "*": {
+ "repo": "llava-hf/llava-onevision-qwen2-7b-ov-hf",
+ "pkg": {
+ "0": {
+ "transformers": "LlavaOnevisionModel"
+ }
+ },
+ "tasks": [
+ "LlavaOnevisionModel",
+ "LlavaOnevisionForConditionalGeneration",
+ "LlavaOnevisionPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.longcat-flash-chat": {
+ "*": {
+ "repo": "meituan-longcat/LongCat-Flash-Chat",
+ "pkg": {
+ "0": {
+ "transformers": "LongcatFlashModel"
+ }
+ },
+ "tasks": [
+ "LongcatFlashPreTrainedModel",
+ "LongcatFlashModel",
+ "LongcatFlashForCausalLM"
+ ]
+ }
+ },
+ "info.art.longformer-4096": {
+ "*": {
+ "repo": "allenai/longformer-base-4096",
+ "pkg": {
+ "0": {
+ "transformers": "LongformerModel"
+ }
+ },
+ "tasks": [
+ "LongformerForMaskedLM",
+ "LongformerForMultipleChoice",
+ "LongformerForQuestionAnswering",
+ "LongformerForSequenceClassification",
+ "LongformerForTokenClassification",
+ "LongformerModel",
+ "LongformerPreTrainedModel",
+ "LongformerSelfAttention"
+ ]
+ }
+ },
+ "info.stst.long-t5-local": {
+ "*": {
+ "repo": "google/long-t5-local-base",
+ "pkg": {
+ "0": {
+ "transformers": "LongT5Model"
+ }
+ },
+ "tasks": [
+ "LongT5EncoderModel",
+ "LongT5ForConditionalGeneration",
+ "LongT5Model",
+ "LongT5PreTrainedModel"
+ ]
+ }
+ },
+ "info.art.luke": {
+ "*": {
+ "repo": "studio-ousia/luke-base",
+ "pkg": {
+ "0": {
+ "transformers": "LukeModel"
+ }
+ },
+ "tasks": [
+ "LukeForEntityClassification",
+ "LukeForEntityPairClassification",
+ "LukeForEntitySpanClassification",
+ "LukeForMultipleChoice",
+ "LukeForQuestionAnswering",
+ "LukeForSequenceClassification",
+ "LukeForTokenClassification",
+ "LukeForMaskedLM",
+ "LukeModel",
+ "LukePreTrainedModel"
+ ]
+ }
+ },
+ "info.art.lxmert-uncased": {
+ "*": {
+ "repo": "unc-nlp/lxmert-base-uncased",
+ "pkg": {
+ "0": {
+ "transformers": "LxmertModel"
+ }
+ },
+ "tasks": [
+ "LxmertEncoder",
+ "LxmertForPreTraining",
+ "LxmertForQuestionAnswering",
+ "LxmertModel",
+ "LxmertPreTrainedModel",
+ "LxmertVisualFeatureEncoder",
+ "LxmertXLayer"
+ ]
+ }
+ },
+ "info.stst.m": {
+ "*": {
+ "repo": "facebook/m2m100_418M",
+ "pkg": {
+ "0": {
+ "transformers": "M2M100Model"
+ }
+ },
+ "tasks": [
+ "M2M100ForConditionalGeneration",
+ "M2M100Model",
+ "M2M100PreTrainedModel"
+ ]
+ }
+ },
+ "info.ssm.mamba": {
+ "*": {
+ "repo": "state-spaces/mamba-2.8b",
+ "pkg": {
+ "0": {
+ "transformers": "MambaModel"
+ }
+ },
+ "tasks": [
+ "MambaForCausalLM",
+ "MambaModel",
+ "MambaPreTrainedModel",
+ "MambaCache"
+ ]
+ }
+ },
+ "info.ssm.mamba2": {
+ "*": {
+ "repo": "AntonV/mamba2-2.7b-hf",
+ "pkg": {
+ "0": {
+ "transformers": "Mamba2Model"
+ }
+ },
+ "tasks": [
+ "Mamba2ForCausalLM",
+ "Mamba2Model",
+ "Mamba2PreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.opus-mt-en-de": {
+ "*": {
+ "repo": "Helsinki-NLP/opus-mt-en-de",
+ "pkg": {
+ "0": {
+ "transformers": "MarianModel"
+ }
+ },
+ "tasks": [
+ "MarianForCausalLM",
+ "MarianModel",
+ "MarianMTModel",
+ "MarianPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.markuplm": {
+ "*": {
+ "repo": "microsoft/markuplm-base",
+ "pkg": {
+ "0": {
+ "transformers": "MarkupLMModel"
+ }
+ },
+ "tasks": [
+ "MarkupLMForQuestionAnswering",
+ "MarkupLMForSequenceClassification",
+ "MarkupLMForTokenClassification",
+ "MarkupLMModel",
+ "MarkupLMPreTrainedModel"
+ ]
+ }
+ },
+ "info.detr.mask2former-swin-coco-instance": {
+ "*": {
+ "repo": "facebook/mask2former-swin-small-coco-instance",
+ "pkg": {
+ "0": {
+ "transformers": "Mask2FormerModel"
+ }
+ },
+ "tasks": [
+ "Mask2FormerForUniversalSegmentation",
+ "Mask2FormerModel",
+ "Mask2FormerPreTrainedModel"
+ ]
+ }
+ },
+ "info.detr.maskformer-swin-ade": {
+ "*": {
+ "repo": "facebook/maskformer-swin-base-ade",
+ "pkg": {
+ "0": {
+ "transformers": "MaskFormerModel"
+ }
+ },
+ "tasks": [
+ "MaskFormerForInstanceSegmentation",
+ "MaskFormerModel",
+ "MaskFormerPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.swin-patch4-window7-224": {
+ "*": {
+ "repo": "microsoft/swin-tiny-patch4-window7-224",
+ "pkg": {
+ "0": {
+ "transformers": "MaskFormerSwinModel"
+ }
+ },
+ "tasks": [
+ "MaskFormerSwinBackbone",
+ "MaskFormerSwinModel",
+ "MaskFormerSwinPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.mbart-cc25": {
+ "*": {
+ "repo": "facebook/mbart-large-cc25",
+ "pkg": {
+ "0": {
+ "transformers": "MBartModel"
+ }
+ },
+ "tasks": [
+ "MBartForCausalLM",
+ "MBartForConditionalGeneration",
+ "MBartForQuestionAnswering",
+ "MBartForSequenceClassification",
+ "MBartModel",
+ "MBartPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.megatron-bert-uncased": {
+ "*": {
+ "repo": "nvidia/megatron-bert-uncased-345m",
+ "pkg": {
+ "0": {
+ "transformers": "MegatronBertModel"
+ }
+ },
+ "tasks": [
+ "MegatronBertForCausalLM",
+ "MegatronBertForMaskedLM",
+ "MegatronBertForMultipleChoice",
+ "MegatronBertForNextSentencePrediction",
+ "MegatronBertForPreTraining",
+ "MegatronBertForQuestionAnswering",
+ "MegatronBertForSequenceClassification",
+ "MegatronBertForTokenClassification",
+ "MegatronBertModel",
+ "MegatronBertPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.metaclip-2-worldwide-huge-quickgelu": {
+ "*": {
+ "repo": "facebook/metaclip-2-worldwide-huge-quickgelu",
+ "pkg": {
+ "0": {
+ "transformers": "MetaClip2Model"
+ }
+ },
+ "tasks": [
+ "MetaClip2Model",
+ "MetaClip2PreTrainedModel",
+ "MetaClip2TextModel",
+ "MetaClip2TextModelWithProjection",
+ "MetaClip2VisionModel",
+ "MetaClip2VisionModelWithProjection",
+ "MetaClip2ForImageClassification"
+ ]
+ }
+ },
+ "info.vit.mgp-str": {
+ "*": {
+ "repo": "alibaba-damo/mgp-str-base",
+ "pkg": {
+ "0": {
+ "transformers": "MgpstrForSceneTextRecognition"
+ }
+ },
+ "tasks": [
+ "MgpstrModel",
+ "MgpstrPreTrainedModel",
+ "MgpstrForSceneTextRecognition"
+ ]
+ }
+ },
+ "info.gan.mimi": {
+ "*": {
+ "repo": "kyutai/mimi",
+ "pkg": {
+ "0": {
+ "transformers": "MimiModel"
+ }
+ },
+ "tasks": [
+ "MimiModel",
+ "MimiPreTrainedModel"
+ ]
+ }
+ },
+ "info.moe.max-text-01-hf": {
+ "*": {
+ "repo": "MiniMaxAI/MiniMax-Text-01-hf",
+ "pkg": {
+ "0": {
+ "transformers": "MiniMaxModel"
+ }
+ },
+ "tasks": [
+ "MiniMaxPreTrainedModel",
+ "MiniMaxModel",
+ "MiniMaxForCausalLM",
+ "MiniMaxForSequenceClassification",
+ "MiniMaxForTokenClassification",
+ "MiniMaxForQuestionAnswering"
+ ]
+ }
+ },
+ "info.stst.stral-2410": {
+ "*": {
+ "repo": "mistralai/Ministral-8B-Instruct-2410",
+ "pkg": {
+ "0": {
+ "transformers": "MinistralModel"
+ }
+ },
+ "tasks": [
+ "MinistralPreTrainedModel",
+ "MinistralModel",
+ "MinistralForCausalLM",
+ "MinistralForSequenceClassification",
+ "MinistralForTokenClassification",
+ "MinistralForQuestionAnswering"
+ ]
+ }
+ },
+ "info.stst.stral-3-2512": {
+ "*": {
+ "repo": "mistralai/Ministral-3-8B-Base-2512",
+ "pkg": {
+ "0": {
+ "transformers": "Ministral3Model"
+ }
+ },
+ "tasks": [
+ "Ministral3ForCausalLM",
+ "Ministral3ForQuestionAnswering",
+ "Ministral3Model",
+ "Ministral3PreTrainedModel",
+ "Ministral3ForSequenceClassification",
+ "Ministral3ForTokenClassification"
+ ]
+ }
+ },
+ "info.stst.mistral-v0": {
+ "*": {
+ "repo": "mistralai/Mistral-7B-v0.1",
+ "pkg": {
+ "0": {
+ "transformers": "MistralModel"
+ }
+ },
+ "tasks": [
+ "MistralForCausalLM",
+ "MistralForQuestionAnswering",
+ "MistralModel",
+ "MistralPreTrainedModel",
+ "MistralForSequenceClassification",
+ "MistralForTokenClassification"
+ ]
+ }
+ },
+ "info.vit.mistral-3-2503": {
+ "*": {
+ "repo": "mistralai/Mistral-Small-3.1-24B-Instruct-2503",
+ "pkg": {
+ "0": {
+ "transformers": "Mistral3Model"
+ }
+ },
+ "tasks": [
+ "Mistral3Model",
+ "Mistral3PreTrainedModel",
+ "Mistral3ForConditionalGeneration"
+ ]
+ }
+ },
+ "info.moe.mixtral-8x": {
+ "*": {
+ "repo": "mistralai/Mixtral-8x7B-v0.1",
+ "pkg": {
+ "0": {
+ "transformers": "MixtralModel"
+ }
+ },
+ "tasks": [
+ "MixtralForCausalLM",
+ "MixtralForQuestionAnswering",
+ "MixtralModel",
+ "MixtralPreTrainedModel",
+ "MixtralForSequenceClassification",
+ "MixtralForTokenClassification"
+ ]
+ }
+ },
+ "info.vit.mlcd-vit-bigg-patch14-336": {
+ "*": {
+ "repo": "DeepGlint-AI/mlcd-vit-bigG-patch14-336",
+ "pkg": {
+ "0": {
+ "transformers": "MLCDVisionModel"
+ }
+ },
+ "tasks": [
+ "MLCDPreTrainedModel",
+ "MLCDVisionModel"
+ ]
+ }
+ },
+ "info.vit.llama-3-vision": {
+ "*": {
+ "repo": "meta-llama/Llama-3.2-11B-Vision",
+ "pkg": {
+ "0": {
+ "transformers": "MllamaModel"
+ }
+ },
+ "tasks": [
+ "MllamaForConditionalGeneration",
+ "MllamaForCausalLM",
+ "MllamaTextModel",
+ "MllamaVisionModel",
+ "MllamaPreTrainedModel",
+ "MllamaModel"
+ ]
+ }
+ },
+ "info.detr.mm-grounding-dino-o365v1-goldg-v3det": {
+ "*": {
+ "repo": "openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det",
+ "pkg": {
+ "0": {
+ "transformers": "MMGroundingDinoModel"
+ }
+ },
+ "tasks": [
+ "MMGroundingDinoForObjectDetection",
+ "MMGroundingDinoModel",
+ "MMGroundingDinoPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.mobilebert-uncased": {
+ "*": {
+ "repo": "google/mobilebert-uncased",
+ "pkg": {
+ "0": {
+ "transformers": "MobileBertModel"
+ }
+ },
+ "tasks": [
+ "MobileBertForMaskedLM",
+ "MobileBertForMultipleChoice",
+ "MobileBertForNextSentencePrediction",
+ "MobileBertForPreTraining",
+ "MobileBertForQuestionAnswering",
+ "MobileBertForSequenceClassification",
+ "MobileBertForTokenClassification",
+ "MobileBertLayer",
+ "MobileBertModel",
+ "MobileBertPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.mobilenet-v1-1--224": {
+ "*": {
+ "repo": "google/mobilenet_v1_1.0_224",
+ "pkg": {
+ "0": {
+ "transformers": "MobileNetV1Model"
+ }
+ },
+ "tasks": [
+ "MobileNetV1ForImageClassification",
+ "MobileNetV1Model",
+ "MobileNetV1PreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.mobilenet-v2-1--224": {
+ "*": {
+ "repo": "google/mobilenet_v2_1.0_224",
+ "pkg": {
+ "0": {
+ "transformers": "MobileNetV2Model"
+ }
+ },
+ "tasks": [
+ "MobileNetV2ForImageClassification",
+ "MobileNetV2ForSemanticSegmentation",
+ "MobileNetV2Model",
+ "MobileNetV2PreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.mobilevit": {
+ "*": {
+ "repo": "apple/mobilevit-small",
+ "pkg": {
+ "0": {
+ "transformers": "MobileViTModel"
+ }
+ },
+ "tasks": [
+ "MobileViTForImageClassification",
+ "MobileViTForSemanticSegmentation",
+ "MobileViTModel",
+ "MobileViTPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.mobilevitv2-1": {
+ "*": {
+ "repo": "apple/mobilevitv2-1.0-imagenet1k-256",
+ "pkg": {
+ "0": {
+ "transformers": "MobileViTV2Model"
+ }
+ },
+ "tasks": [
+ "MobileViTV2ForImageClassification",
+ "MobileViTV2ForSemanticSegmentation",
+ "MobileViTV2Model",
+ "MobileViTV2PreTrainedModel"
+ ]
+ }
+ },
+ "info.aet.modernbert": {
+ "*": {
+ "repo": "answerdotai/ModernBERT-base",
+ "pkg": {
+ "0": {
+ "transformers": "ModernBertModel"
+ }
+ },
+ "tasks": [
+ "ModernBertModel",
+ "ModernBertPreTrainedModel",
+ "ModernBertForMaskedLM",
+ "ModernBertForSequenceClassification",
+ "ModernBertForTokenClassification",
+ "ModernBertForQuestionAnswering",
+ "ModernBertForMultipleChoice"
+ ]
+ }
+ },
+ "info.aet.test-dec": {
+ "*": {
+ "repo": "blab-jhu/test-32m-dec",
+ "pkg": {
+ "0": {
+ "transformers": "ModernBertDecoderModel"
+ }
+ },
+ "tasks": [
+ "ModernBertDecoderModel",
+ "ModernBertDecoderPreTrainedModel",
+ "ModernBertDecoderForCausalLM",
+ "ModernBertDecoderForSequenceClassification"
+ ]
+ }
+ },
+ "info.stst.moonshine": {
+ "*": {
+ "repo": "UsefulSensors/moonshine-tiny",
+ "pkg": {
+ "0": {
+ "transformers": "MoonshineModel"
+ }
+ },
+ "tasks": [
+ "MoonshineModel",
+ "MoonshinePreTrainedModel",
+ "MoonshineForConditionalGeneration"
+ ]
+ }
+ },
+ "info.stst.hf-moshiko": {
+ "*": {
+ "repo": "kmhf/hf-moshiko",
+ "pkg": {
+ "0": {
+ "transformers": "MoshiModel"
+ }
+ },
+ "tasks": [
+ "MoshiForCausalLM",
+ "MoshiForConditionalGeneration",
+ "MoshiModel",
+ "MoshiPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.mpnet": {
+ "*": {
+ "repo": "microsoft/mpnet-base",
+ "pkg": {
+ "0": {
+ "transformers": "MPNetModel"
+ }
+ },
+ "tasks": [
+ "MPNetForMaskedLM",
+ "MPNetForMultipleChoice",
+ "MPNetForQuestionAnswering",
+ "MPNetForSequenceClassification",
+ "MPNetForTokenClassification",
+ "MPNetLayer",
+ "MPNetModel",
+ "MPNetPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.mpt": {
+ "*": {
+ "repo": "mosaicml/mpt-7b",
+ "pkg": {
+ "0": {
+ "transformers": "MptModel"
+ }
+ },
+ "tasks": [
+ "MptForCausalLM",
+ "MptModel",
+ "MptPreTrainedModel",
+ "MptForSequenceClassification",
+ "MptForTokenClassification",
+ "MptForQuestionAnswering"
+ ]
+ }
+ },
+ "info.art.mra-512-4": {
+ "*": {
+ "repo": "uw-madison/mra-base-512-4",
+ "pkg": {
+ "0": {
+ "transformers": "MraModel"
+ }
+ },
+ "tasks": [
+ "MraForMaskedLM",
+ "MraForMultipleChoice",
+ "MraForQuestionAnswering",
+ "MraForSequenceClassification",
+ "MraForTokenClassification",
+ "MraLayer",
+ "MraModel",
+ "MraPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.mt5": {
+ "*": {
+ "repo": "google/mt5-small",
+ "pkg": {
+ "0": {
+ "transformers": "MT5Model"
+ }
+ },
+ "identifiers": [
+ [
+ 250112,
+ 2048
+ ],
+ "text_encoders.mt5xl.transformer.shared.weight"
+ ],
+ "file_256": [
+ "0524484ec81425ba9deef6fac1393a78ba9b1c9bfed704a4be5f9c7255975cc1",
+ "32f70f1d187e131a5fc3e4f0edc97ce89360d8e2f1d90177a443a05296097acc"
+ ],
+ "layer_b3": [
+ "a1d616c37711ec7b9073d04734af2f5fd02f9035a322eb46efeace922e104c51"
+ ],
+ "layer_256": [
+ "bd337daf0c1aa36896013109b406a0580aa3bb8ab9291d89df3015d737358e95",
+ "2e40c48c96fc7df636aad96d3e78ed0ba9f68c3059e21b7fcf917f284c569a61"
+ ],
+ "tasks": [
+ "MT5EncoderModel",
+ "MT5ForConditionalGeneration",
+ "MT5ForQuestionAnswering",
+ "MT5ForSequenceClassification",
+ "MT5ForTokenClassification",
+ "MT5Model",
+ "MT5PreTrainedModel"
+ ]
+ }
+ },
+ "info.art.musicgen": {
+ "*": {
+ "repo": "facebook/musicgen-small",
+ "pkg": {
+ "0": {
+ "transformers": "MusicgenModel"
+ }
+ },
+ "tasks": [
+ "MusicgenForConditionalGeneration",
+ "MusicgenForCausalLM",
+ "MusicgenModel",
+ "MusicgenPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.musicgen-melody": {
+ "*": {
+ "repo": "facebook/musicgen-melody",
+ "pkg": {
+ "0": {
+ "transformers": "MusicgenMelodyModel"
+ }
+ },
+ "tasks": [
+ "MusicgenMelodyForConditionalGeneration",
+ "MusicgenMelodyForCausalLM",
+ "MusicgenMelodyModel",
+ "MusicgenMelodyPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.mvp": {
+ "*": {
+ "repo": "RUCAIBox/mvp",
+ "pkg": {
+ "0": {
+ "transformers": "MvpModel"
+ }
+ },
+ "tasks": [
+ "MvpForCausalLM",
+ "MvpForConditionalGeneration",
+ "MvpForQuestionAnswering",
+ "MvpForSequenceClassification",
+ "MvpModel",
+ "MvpPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.nanochat-d32": {
+ "*": {
+ "repo": "karpathy/nanochat-d32",
+ "pkg": {
+ "0": {
+ "transformers": "NanoChatModel"
+ }
+ },
+ "tasks": [
+ "NanoChatPreTrainedModel",
+ "NanoChatModel",
+ "NanoChatForCausalLM"
+ ]
+ }
+ },
+ "info.stst.nemotron-3-hf": {
+ "*": {
+ "repo": "mgoin/nemotron-3-8b-chat-4k-sft-hf",
+ "pkg": {
+ "0": {
+ "transformers": "NemotronModel"
+ }
+ },
+ "tasks": [
+ "NemotronForQuestionAnswering",
+ "NemotronForCausalLM",
+ "NemotronModel",
+ "NemotronPreTrainedModel",
+ "NemotronForSequenceClassification",
+ "NemotronForTokenClassification"
+ ]
+ }
+ },
+ "info.moe.nllb-moe": {
+ "*": {
+ "repo": "facebook/nllb-moe-54b",
+ "pkg": {
+ "0": {
+ "transformers": "NllbMoeModel"
+ }
+ },
+ "tasks": [
+ "NllbMoeForConditionalGeneration",
+ "NllbMoeModel",
+ "NllbMoePreTrainedModel",
+ "NllbMoeTop2Router",
+ "NllbMoeSparseMLP"
+ ]
+ }
+ },
+ "info.art.nystromformer-512": {
+ "*": {
+ "repo": "uw-madison/nystromformer-512",
+ "pkg": {
+ "0": {
+ "transformers": "NystromformerModel"
+ }
+ },
+ "tasks": [
+ "NystromformerForMaskedLM",
+ "NystromformerForMultipleChoice",
+ "NystromformerForQuestionAnswering",
+ "NystromformerForSequenceClassification",
+ "NystromformerForTokenClassification",
+ "NystromformerLayer",
+ "NystromformerModel",
+ "NystromformerPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.olmo-hf": {
+ "*": {
+ "repo": "allenai/OLMo-7B-hf",
+ "pkg": {
+ "0": {
+ "transformers": "OlmoModel"
+ }
+ },
+ "tasks": [
+ "OlmoForCausalLM",
+ "OlmoModel",
+ "OlmoPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.olmo2-1124-hf": {
+ "*": {
+ "repo": "allenai/Olmo-2-1124-7B",
+ "pkg": {
+ "0": {
+ "transformers": "Olmo2Model"
+ }
+ },
+ "tasks": [
+ "Olmo2ForCausalLM",
+ "Olmo2Model",
+ "Olmo2PreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.olmo-3-0725": {
+ "*": {
+ "repo": "allenai/OLMo-3-0725-1B",
+ "pkg": {
+ "0": {
+ "transformers": "Olmo3Model"
+ }
+ },
+ "tasks": [
+ "Olmo3ForCausalLM",
+ "Olmo3Model",
+ "Olmo3PreTrainedModel"
+ ]
+ }
+ },
+ "info.moe.olmoe-0924": {
+ "*": {
+ "repo": "allenai/OLMoE-1B-7B-0924",
+ "pkg": {
+ "0": {
+ "transformers": "OlmoeModel"
+ }
+ },
+ "tasks": [
+ "OlmoeForCausalLM",
+ "OlmoeModel",
+ "OlmoePreTrainedModel"
+ ]
+ }
+ },
+ "info.detr.omdet-turbo-swin-hf": {
+ "*": {
+ "repo": "omlab/omdet-turbo-swin-tiny-hf",
+ "pkg": {
+ "0": {
+ "transformers": "OmDetTurboForObjectDetection"
+ }
+ },
+ "tasks": [
+ "OmDetTurboForObjectDetection",
+ "OmDetTurboPreTrainedModel"
+ ]
+ }
+ },
+ "info.detr.oneformer-ade-swin": {
+ "*": {
+ "repo": "shi-labs/oneformer_ade20k_swin_tiny",
+ "pkg": {
+ "0": {
+ "transformers": "OneFormerModel"
+ }
+ },
+ "tasks": [
+ "OneFormerForUniversalSegmentation",
+ "OneFormerModel",
+ "OneFormerPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.openai-gpt": {
+ "*": {
+ "repo": "openai-community/openai-gpt",
+ "pkg": {
+ "0": {
+ "transformers": "OpenAIGPTModel"
+ }
+ },
+ "tasks": [
+ "OpenAIGPTDoubleHeadsModel",
+ "OpenAIGPTForSequenceClassification",
+ "OpenAIGPTLMHeadModel",
+ "OpenAIGPTModel",
+ "OpenAIGPTPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.opt": {
+ "*": {
+ "repo": "facebook/opt-350m",
+ "pkg": {
+ "0": {
+ "transformers": "OPTModel"
+ }
+ },
+ "tasks": [
+ "OPTForCausalLM",
+ "OPTModel",
+ "OPTPreTrainedModel",
+ "OPTForSequenceClassification",
+ "OPTForQuestionAnswering"
+ ]
+ }
+ },
+ "info.vit.ovis2-hf": {
+ "*": {
+ "repo": "thisisiron/Ovis2-1B-hf",
+ "pkg": {
+ "0": {
+ "transformers": "Ovis2Model"
+ }
+ },
+ "tasks": [
+ "Ovis2PreTrainedModel",
+ "Ovis2Model",
+ "Ovis2ForConditionalGeneration"
+ ]
+ }
+ },
+ "info.vit.owlv2-patch16": {
+ "*": {
+ "repo": "google/owlv2-base-patch16",
+ "pkg": {
+ "0": {
+ "transformers": "Owlv2Model"
+ }
+ },
+ "tasks": [
+ "Owlv2Model",
+ "Owlv2PreTrainedModel",
+ "Owlv2TextModel",
+ "Owlv2VisionModel",
+ "Owlv2ForObjectDetection"
+ ]
+ }
+ },
+ "info.vit.owlvit-patch32": {
+ "*": {
+ "repo": "google/owlvit-base-patch32",
+ "pkg": {
+ "0": {
+ "transformers": "OwlViTModel"
+ }
+ },
+ "tasks": [
+ "OwlViTModel",
+ "OwlViTPreTrainedModel",
+ "OwlViTTextModel",
+ "OwlViTVisionModel",
+ "OwlViTForObjectDetection"
+ ]
+ }
+ },
+ "info.vit.paligemma": {
+ "*": {
+ "repo": "google/paligemma2-3b-mix-224",
+ "pkg": {
+ "0": {
+ "transformers": "PaliGemmaModel"
+ }
+ },
+ "tasks": [
+ "PaliGemmaForConditionalGeneration",
+ "PaliGemmaPreTrainedModel",
+ "PaliGemmaModel"
+ ]
+ }
+ },
+ "info.aet.parakeet-ctc-b": {
+ "*": {
+ "repo": "nvidia/parakeet-ctc-1.1b",
+ "pkg": {
+ "0": {
+ "transformers": "ParakeetForCTC"
+ }
+ },
+ "tasks": [
+ "ParakeetForCTC",
+ "ParakeetEncoder",
+ "ParakeetPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.parakeet-ctc-b": {
+ "*": {
+ "repo": "nvidia/parakeet-ctc-1.1b",
+ "pkg": {
+ "0": {
+ "transformers": "ParakeetEncoder"
+ }
+ },
+ "tasks": [
+ "ParakeetForCTC",
+ "ParakeetEncoder",
+ "ParakeetPreTrainedModel"
+ ]
+ }
+ },
+ "info.mlp.patchtsmixer-etth1-pretrain": {
+ "*": {
+ "repo": "ibm/patchtsmixer-etth1-pretrain",
+ "pkg": {
+ "0": {
+ "transformers": "PatchTSMixerModel"
+ }
+ },
+ "tasks": [
+ "PatchTSMixerPreTrainedModel",
+ "PatchTSMixerModel",
+ "PatchTSMixerForPretraining",
+ "PatchTSMixerForPrediction",
+ "PatchTSMixerForTimeSeriesClassification",
+ "PatchTSMixerForRegression"
+ ]
+ }
+ },
+ "info.art.patchtst": {
+ "*": {
+ "repo": "ibm/patchtst",
+ "pkg": {
+ "0": {
+ "transformers": "PatchTSTModel"
+ }
+ },
+ "tasks": [
+ "PatchTSTModel",
+ "PatchTSTPreTrainedModel",
+ "PatchTSTForPrediction",
+ "PatchTSTForPretraining",
+ "PatchTSTForRegression",
+ "PatchTSTForClassification"
+ ]
+ }
+ },
+ "info.stst.pe-av": {
+ "*": {
+ "repo": "facebook/pe-av-large",
+ "pkg": {
+ "0": {
+ "transformers": "PeAudioModel"
+ }
+ },
+ "tasks": [
+ "PeAudioFrameLevelModel",
+ "PeAudioModel",
+ "PeAudioEncoder"
+ ]
+ }
+ },
+ "info.aet.pe-av": {
+ "*": {
+ "repo": "facebook/pe-av-large",
+ "pkg": {
+ "0": {
+ "transformers": "PeAudioVideoModel"
+ }
+ },
+ "tasks": [
+ "PeAudioVideoModel",
+ "PeAudioVideoEncoder"
+ ]
+ }
+ },
+ "info.vit.pe-av": {
+ "*": {
+ "repo": "facebook/pe-av-large",
+ "pkg": {
+ "0": {
+ "transformers": "PeVideoEncoder"
+ }
+ },
+ "tasks": [
+ "PeVideoEncoder",
+ "PeVideoModel"
+ ]
+ }
+ },
+ "info.stst.pegasus": {
+ "*": {
+ "repo": "google/pegasus-large",
+ "pkg": {
+ "0": {
+ "transformers": "PegasusModel"
+ }
+ },
+ "tasks": [
+ "PegasusForCausalLM",
+ "PegasusForConditionalGeneration",
+ "PegasusModel",
+ "PegasusPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.pegasus-x": {
+ "*": {
+ "repo": "google/pegasus-x-large",
+ "pkg": {
+ "0": {
+ "transformers": "PegasusXModel"
+ }
+ },
+ "tasks": [
+ "PegasusXForConditionalGeneration",
+ "PegasusXModel",
+ "PegasusXPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.language-perceiver": {
+ "*": {
+ "repo": "deepmind/language-perceiver",
+ "pkg": {
+ "0": {
+ "transformers": "PerceiverModel"
+ }
+ },
+ "tasks": [
+ "PerceiverForImageClassificationConvProcessing",
+ "PerceiverForImageClassificationFourier",
+ "PerceiverForImageClassificationLearned",
+ "PerceiverForMaskedLM",
+ "PerceiverForMultimodalAutoencoding",
+ "PerceiverForOpticalFlow",
+ "PerceiverForSequenceClassification",
+ "PerceiverLayer",
+ "PerceiverModel",
+ "PerceiverPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.perception-lm": {
+ "*": {
+ "repo": "facebook/Perception-LM-1B",
+ "pkg": {
+ "0": {
+ "transformers": "PerceptionLMModel"
+ }
+ },
+ "tasks": [
+ "PerceptionLMForConditionalGeneration",
+ "PerceptionLMPreTrainedModel",
+ "PerceptionLMModel"
+ ]
+ }
+ },
+ "info.stst.persimmon": {
+ "*": {
+ "repo": "adept/persimmon-8b-base",
+ "pkg": {
+ "0": {
+ "transformers": "PersimmonModel"
+ }
+ },
+ "tasks": [
+ "PersimmonForCausalLM",
+ "PersimmonModel",
+ "PersimmonPreTrainedModel",
+ "PersimmonForSequenceClassification",
+ "PersimmonForTokenClassification"
+ ]
+ }
+ },
+ "info.stst.phi-1": {
+ "*": {
+ "repo": "microsoft/phi-1",
+ "pkg": {
+ "0": {
+ "transformers": "PhiModel"
+ }
+ },
+ "tasks": [
+ "PhiPreTrainedModel",
+ "PhiModel",
+ "PhiForCausalLM",
+ "PhiForSequenceClassification",
+ "PhiForTokenClassification"
+ ]
+ }
+ },
+ "info.stst.phi-3": {
+ "*": {
+ "repo": "microsoft/Phi-3-mini-4k-instruct",
+ "pkg": {
+ "0": {
+ "transformers": "Phi3Model"
+ }
+ },
+ "tasks": [
+ "Phi3PreTrainedModel",
+ "Phi3Model",
+ "Phi3ForCausalLM",
+ "Phi3ForSequenceClassification",
+ "Phi3ForTokenClassification"
+ ]
+ }
+ },
+ "info.vit.phi-4": {
+ "*": {
+ "repo": "microsoft/Phi-4-multimodal-instruct",
+ "pkg": {
+ "0": {
+ "transformers": "Phi4MultimodalModel"
+ }
+ },
+ "file_256": [
+ "bc703090b63eda16f639fa4de7ac54635c23105ab1da2f6ec4d3403151d38ee6"
+ ],
+ "layer_b3": [
+ "cf4add4ada6082f448788eaf2937f645b5212db88e06ee81475b8be0e99063dc"
+ ],
+ "layer_256": [
+ "7ff992b780b2f8993dd6bb9612207943638b2a42badc976ce80893bc205e801b"
+ ],
+ "tasks": [
+ "Phi4MultimodalAudioPreTrainedModel",
+ "Phi4MultimodalAudioModel",
+ "Phi4MultimodalVisionPreTrainedModel",
+ "Phi4MultimodalVisionModel",
+ "Phi4MultimodalPreTrainedModel",
+ "Phi4MultimodalModel",
+ "Phi4MultimodalForCausalLM"
+ ]
+ }
+ },
+ "info.moe.phi-3-moe": {
+ "*": {
+ "repo": "microsoft/Phi-3.5-MoE-instruct",
+ "pkg": {
+ "0": {
+ "transformers": "PhimoeModel"
+ }
+ },
+ "tasks": [
+ "PhimoePreTrainedModel",
+ "PhimoeModel",
+ "PhimoeForCausalLM",
+ "PhimoeForSequenceClassification"
+ ]
+ }
+ },
+ "info.vit.pixio-huge": {
+ "*": {
+ "repo": "facebook/pixio-huge",
+ "pkg": {
+ "0": {
+ "transformers": "PixioModel"
+ }
+ },
+ "tasks": [
+ "PixioModel",
+ "PixioPreTrainedModel",
+ "PixioBackbone"
+ ]
+ }
+ },
+ "info.vit.pixtral": {
+ "*": {
+ "repo": "mistralai/Pixtral-12B-Base-2409",
+ "pkg": {
+ "0": {
+ "transformers": "PixtralVisionModel"
+ }
+ },
+ "tasks": [
+ "PixtralVisionModel",
+ "PixtralPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.plbart": {
+ "*": {
+ "repo": "uclanlp/plbart-base",
+ "pkg": {
+ "0": {
+ "transformers": "PLBartModel"
+ }
+ },
+ "tasks": [
+ "PLBartForCausalLM",
+ "PLBartForConditionalGeneration",
+ "PLBartForSequenceClassification",
+ "PLBartModel",
+ "PLBartPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.poolformer-s12": {
+ "*": {
+ "repo": "sail/poolformer_s12",
+ "pkg": {
+ "0": {
+ "transformers": "PoolFormerModel"
+ }
+ },
+ "tasks": [
+ "PoolFormerForImageClassification",
+ "PoolFormerModel",
+ "PoolFormerPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.phetnet-uncased": {
+ "*": {
+ "repo": "microsoft/prophetnet-large-uncased",
+ "pkg": {
+ "0": {
+ "transformers": "ProphetNetModel"
+ }
+ },
+ "tasks": [
+ "ProphetNetDecoder",
+ "ProphetNetEncoder",
+ "ProphetNetForCausalLM",
+ "ProphetNetForConditionalGeneration",
+ "ProphetNetModel",
+ "ProphetNetPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.pvt-224": {
+ "*": {
+ "repo": "Xrenya/pvt-tiny-224",
+ "pkg": {
+ "0": {
+ "transformers": "PvtModel"
+ }
+ },
+ "tasks": [
+ "PvtForImageClassification",
+ "PvtModel",
+ "PvtPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.pvt-v2-b0": {
+ "*": {
+ "repo": "OpenGVLab/pvt_v2_b0",
+ "pkg": {
+ "0": {
+ "transformers": "PvtV2Model"
+ }
+ },
+ "tasks": [
+ "PvtV2ForImageClassification",
+ "PvtV2Model",
+ "PvtV2PreTrainedModel",
+ "PvtV2Backbone"
+ ]
+ }
+ },
+ "info.stst.qwen2": {
+ "*": {
+ "repo": "Qwen/Qwen2-7B",
+ "pkg": {
+ "0": {
+ "transformers": "Qwen2Model"
+ }
+ },
+ "tasks": [
+ "Qwen2PreTrainedModel",
+ "Qwen2Model",
+ "Qwen2ForCausalLM",
+ "Qwen2RMSNorm",
+ "Qwen2ForSequenceClassification",
+ "Qwen2ForTokenClassification",
+ "Qwen2ForQuestionAnswering"
+ ]
+ }
+ },
+ "info.vit.qwen2-vl": {
+ "*": {
+ "repo": "Qwen/Qwen2-VL-7B-Instruct",
+ "pkg": {
+ "0": {
+ "transformers": "Qwen2_5_VLModel"
+ }
+ },
+ "tasks": [
+ "Qwen2_5_VLForConditionalGeneration",
+ "Qwen2_5_VLModel",
+ "Qwen2_5_VLPreTrainedModel",
+ "Qwen2_5_VLTextModel"
+ ]
+ }
+ },
+ "info.stst.qwen2-vl": {
+ "*": {
+ "repo": "Qwen/Qwen2-VL-7B-Instruct",
+ "pkg": {
+ "0": {
+ "transformers": "Qwen2_5_VLTextModel"
+ }
+ },
+ "file_256": [
+ "1f48ac458d6fbd0aec53a116065a7ee3f1d34bddde544e25c16a05c9d5392b78",
+ "0e85c7111ce849293e97aa09ce1172352ecece023a3ecea7ac8311e326b47f3a",
+ "d725335e4ea2399be706469e4b8807716a8fa64bd03468252e9f7acf2415fee4",
+ "e10bd9583a77250376d9134cd6b46799029dfa3b4d7989c1050b3ec149cc7cf5"
+ ],
+ "layer_b3": [
+ "e4f681bde70a753f30f83495a2aa340d251bf3d818eb5a1cbe58f85fd6ea0d40",
+ "47b062ce8ddb14845fb1a71d2fd88fd52a82e26561ba3eb05be057915a867775",
+ "b6386f70b528ffa9e09fdd8db8a7b91a7c462ed97b06963576c6139e25fdcf31",
+ "4cd449df9f9004a7e53005583a7e4cfa6de42912f03647d2ea799d489e9c1406"
+ ],
+ "layer_256": [
+ "ed36a4a11c4ebebb10d1e010cb93e2e43fcaf975cd42bb6c9958537593d0d44d",
+ "f7f6f64e7b6d7826400a2fc0eef942a47c47bd5914e051ad0c8cd9ff5ff7982b",
+ "f341ed0f792cf0570ceb21d3b64ed14bf9875e9fcb90116851364eeed683a6ca",
+ "ba031d0da78afe24ae63558ad29b8028244a7bd4750a5615dab9079fe32a5fd7"
+ ],
+ "tasks": [
+ "Qwen2_5_VLForConditionalGeneration",
+ "Qwen2_5_VLModel",
+ "Qwen2_5_VLPreTrainedModel",
+ "Qwen2_5_VLTextModel"
+ ]
+ }
+ },
+ "info.aet.qwen2-audio": {
+ "*": {
+ "repo": "Qwen/Qwen2-Audio-7B",
+ "pkg": {
+ "0": {
+ "transformers": "Qwen2AudioEncoder"
+ }
+ },
+ "tasks": [
+ "Qwen2AudioForConditionalGeneration",
+ "Qwen2AudioPreTrainedModel",
+ "Qwen2AudioEncoder"
+ ]
+ }
+ },
+ "info.moe.qwen15-moe-a": {
+ "*": {
+ "repo": "Qwen/Qwen1.5-MoE-A2.7B",
+ "pkg": {
+ "0": {
+ "transformers": "Qwen2MoeModel"
+ }
+ },
+ "tasks": [
+ "Qwen2MoeForCausalLM",
+ "Qwen2MoeForQuestionAnswering",
+ "Qwen2MoeModel",
+ "Qwen2MoePreTrainedModel",
+ "Qwen2MoeForSequenceClassification",
+ "Qwen2MoeForTokenClassification"
+ ]
+ }
+ },
+ "info.stst.qwen3": {
+ "*": {
+ "repo": "Qwen/Qwen3-8B",
+ "pkg": {
+ "0": {
+ "transformers": "Qwen3Model"
+ }
+ },
+ "tasks": [
+ "Qwen3ForCausalLM",
+ "Qwen3ForQuestionAnswering",
+ "Qwen3PreTrainedModel",
+ "Qwen3Model",
+ "Qwen3ForSequenceClassification",
+ "Qwen3ForTokenClassification"
+ ]
+ }
+ },
+ "info.moe.qwen3-a": {
+ "*": {
+ "repo": "Qwen/Qwen3-30B-A3B",
+ "pkg": {
+ "0": {
+ "transformers": "Qwen3MoeModel"
+ }
+ },
+ "file_256": [
+ "c56947057481fb5e7cdf766e442da81717b34addc88bbe8f3728fd25bd03cbae"
+ ],
+ "layer_b3": [
+ "d2d1e0875202f5c9c84c781a2105620250733bd01832f67b2c17bc981d1eb508"
+ ],
+ "layer_256": [
+ "408c01da57c4968b7b0e36d98a74e321153e7aeb058fea63ffd140e323526476"
+ ],
+ "tasks": [
+ "Qwen3MoeForCausalLM",
+ "Qwen3MoeForQuestionAnswering",
+ "Qwen3MoeModel",
+ "Qwen3MoePreTrainedModel",
+ "Qwen3MoeForSequenceClassification",
+ "Qwen3MoeForTokenClassification"
+ ]
+ }
+ },
+ "info.moe.qwen3-next-a": {
+ "*": {
+ "repo": "Qwen/Qwen3-Next-80B-A3B-Instruct",
+ "pkg": {
+ "0": {
+ "transformers": "Qwen3NextModel"
+ }
+ },
+ "tasks": [
+ "Qwen3NextForCausalLM",
+ "Qwen3NextForQuestionAnswering",
+ "Qwen3NextModel",
+ "Qwen3NextPreTrainedModel",
+ "Qwen3NextForSequenceClassification",
+ "Qwen3NextForTokenClassification"
+ ]
+ }
+ },
+ "info.vit.qwen3-vl": {
+ "*": {
+ "repo": "Qwen/Qwen3-VL-4B-Instruct",
+ "pkg": {
+ "0": {
+ "transformers": "Qwen3VLModel"
+ }
+ },
+ "tasks": [
+ "Qwen3VLVisionModel",
+ "Qwen3VLForConditionalGeneration",
+ "Qwen3VLModel",
+ "Qwen3VLPreTrainedModel",
+ "Qwen3VLTextModel"
+ ]
+ }
+ },
+ "info.vit.qwen3-vl-a": {
+ "*": {
+ "repo": "Qwen/Qwen3-VL-30B-A3B-Instruct",
+ "pkg": {
+ "0": {
+ "transformers": "Qwen3VLMoeModel"
+ }
+ },
+ "tasks": [
+ "Qwen3VLMoeVisionModel",
+ "Qwen3VLMoeForConditionalGeneration",
+ "Qwen3VLMoeModel",
+ "Qwen3VLMoePreTrainedModel",
+ "Qwen3VLMoeTextModel"
+ ]
+ }
+ },
+ "info.moe.qwen3-vl-a": {
+ "*": {
+ "repo": "Qwen/Qwen3-VL-30B-A3B-Instruct",
+ "pkg": {
+ "0": {
+ "transformers": "Qwen3VLMoeTextModel"
+ }
+ },
+ "tasks": [
+ "Qwen3VLMoeVisionModel",
+ "Qwen3VLMoeForConditionalGeneration",
+ "Qwen3VLMoeModel",
+ "Qwen3VLMoePreTrainedModel",
+ "Qwen3VLMoeTextModel"
+ ]
+ }
+ },
+ "info.stst.qwen3-vl": {
+ "*": {
+ "repo": "Qwen/Qwen3-VL-4B-Instruct",
+ "pkg": {
+ "0": {
+ "transformers": "Qwen3VLTextModel"
+ }
+ },
+ "tasks": [
+ "Qwen3VLVisionModel",
+ "Qwen3VLForConditionalGeneration",
+ "Qwen3VLModel",
+ "Qwen3VLPreTrainedModel",
+ "Qwen3VLTextModel"
+ ]
+ }
+ },
+ "info.rnn.recurrentgemma": {
+ "*": {
+ "repo": "google/recurrentgemma-2b",
+ "pkg": {
+ "0": {
+ "transformers": "RecurrentGemmaModel"
+ }
+ },
+ "tasks": [
+ "RecurrentGemmaForCausalLM",
+ "RecurrentGemmaModel",
+ "RecurrentGemmaPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.reformer-crime-and-punishment": {
+ "*": {
+ "repo": "google/reformer-crime-and-punishment",
+ "pkg": {
+ "0": {
+ "transformers": "ReformerModel"
+ }
+ },
+ "tasks": [
+ "ReformerAttention",
+ "ReformerForMaskedLM",
+ "ReformerForQuestionAnswering",
+ "ReformerForSequenceClassification",
+ "ReformerLayer",
+ "ReformerModel",
+ "ReformerModelWithLMHead",
+ "ReformerPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.regnet-y-040": {
+ "*": {
+ "repo": "facebook/regnet-y-040",
+ "pkg": {
+ "0": {
+ "transformers": "RegNetModel"
+ }
+ },
+ "tasks": [
+ "RegNetForImageClassification",
+ "RegNetModel",
+ "RegNetPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.rembert": {
+ "*": {
+ "repo": "google/rembert",
+ "pkg": {
+ "0": {
+ "transformers": "RemBertModel"
+ }
+ },
+ "tasks": [
+ "RemBertForCausalLM",
+ "RemBertForMaskedLM",
+ "RemBertForMultipleChoice",
+ "RemBertForQuestionAnswering",
+ "RemBertForSequenceClassification",
+ "RemBertForTokenClassification",
+ "RemBertLayer",
+ "RemBertModel",
+ "RemBertPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.resnet-50": {
+ "*": {
+ "repo": "microsoft/resnet-50",
+ "pkg": {
+ "0": {
+ "transformers": "ResNetModel"
+ }
+ },
+ "tasks": [
+ "ResNetForImageClassification",
+ "ResNetModel",
+ "ResNetPreTrainedModel",
+ "ResNetBackbone"
+ ]
+ }
+ },
+ "info.art.roberta": {
+ "*": {
+ "repo": "FacebookAI/roberta-base",
+ "pkg": {
+ "0": {
+ "transformers": "RobertaModel"
+ }
+ },
+ "tasks": [
+ "RobertaForCausalLM",
+ "RobertaForMaskedLM",
+ "RobertaForMultipleChoice",
+ "RobertaForQuestionAnswering",
+ "RobertaForSequenceClassification",
+ "RobertaForTokenClassification",
+ "RobertaModel",
+ "RobertaPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.efficient-mlm-m0-0": {
+ "*": {
+ "repo": "andreasmadsen/efficient_mlm_m0.40",
+ "pkg": {
+ "0": {
+ "transformers": "RobertaPreLayerNormModel"
+ }
+ },
+ "tasks": [
+ "RobertaPreLayerNormForCausalLM",
+ "RobertaPreLayerNormForMaskedLM",
+ "RobertaPreLayerNormForMultipleChoice",
+ "RobertaPreLayerNormForQuestionAnswering",
+ "RobertaPreLayerNormForSequenceClassification",
+ "RobertaPreLayerNormForTokenClassification",
+ "RobertaPreLayerNormModel",
+ "RobertaPreLayerNormPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.roc-bert-zh": {
+ "*": {
+ "repo": "weiweishi/roc-bert-base-zh",
+ "pkg": {
+ "0": {
+ "transformers": "RoCBertModel"
+ }
+ },
+ "tasks": [
+ "RoCBertForCausalLM",
+ "RoCBertForMaskedLM",
+ "RoCBertForMultipleChoice",
+ "RoCBertForPreTraining",
+ "RoCBertForQuestionAnswering",
+ "RoCBertForSequenceClassification",
+ "RoCBertForTokenClassification",
+ "RoCBertLayer",
+ "RoCBertModel",
+ "RoCBertPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.roformer-chinese": {
+ "*": {
+ "repo": "junnyu/roformer_chinese_base",
+ "pkg": {
+ "0": {
+ "transformers": "RoFormerModel"
+ }
+ },
+ "tasks": [
+ "RoFormerForCausalLM",
+ "RoFormerForMaskedLM",
+ "RoFormerForMultipleChoice",
+ "RoFormerForQuestionAnswering",
+ "RoFormerForSequenceClassification",
+ "RoFormerForTokenClassification",
+ "RoFormerLayer",
+ "RoFormerModel",
+ "RoFormerPreTrainedModel"
+ ]
+ }
+ },
+ "info.detr.rtdetr-r50vd": {
+ "*": {
+ "repo": "PekingU/rtdetr_r50vd",
+ "pkg": {
+ "0": {
+ "transformers": "RTDetrModel"
+ }
+ },
+ "tasks": [
+ "RTDetrForObjectDetection",
+ "RTDetrModel",
+ "RTDetrPreTrainedModel"
+ ]
+ }
+ },
+ "info.detr.rtdetr-r18vd": {
+ "*": {
+ "repo": "PekingU/rtdetr_r18vd",
+ "pkg": {
+ "0": {
+ "transformers": "RTDetrV2Model"
+ }
+ },
+ "tasks": [
+ "RTDetrV2Model",
+ "RTDetrV2PreTrainedModel",
+ "RTDetrV2ForObjectDetection"
+ ]
+ }
+ },
+ "info.rnn.rwkv-4-pile": {
+ "*": {
+ "repo": "RWKV/rwkv-4-169m-pile",
+ "pkg": {
+ "0": {
+ "transformers": "RwkvModel"
+ }
+ },
+ "tasks": [
+ "RwkvForCausalLM",
+ "RwkvModel",
+ "RwkvPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.sam-vit-huge": {
+ "*": {
+ "repo": "facebook/sam-vit-huge",
+ "pkg": {
+ "0": {
+ "transformers": "SamModel"
+ }
+ },
+ "tasks": [
+ "SamVisionModel",
+ "SamModel",
+ "SamPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.sam2-hiera": {
+ "*": {
+ "repo": "facebook/sam2.1-hiera-tiny",
+ "pkg": {
+ "0": {
+ "transformers": "Sam2Model"
+ }
+ },
+ "tasks": [
+ "Sam2Model",
+ "Sam2VisionModel",
+ "Sam2PreTrainedModel",
+ "Sam2HieraDetModel"
+ ]
+ }
+ },
+ "info.vit.sam3": {
+ "*": {
+ "repo": "facebook/sam3",
+ "pkg": {
+ "0": {
+ "transformers": "Sam3Model"
+ }
+ },
+ "tasks": [
+ "Sam3Model",
+ "Sam3VisionModel",
+ "Sam3ViTModel",
+ "Sam3PreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.sam3-tracker1-hiera": {
+ "*": {
+ "repo": "facebook/sam3_tracker.1-hiera-tiny",
+ "pkg": {
+ "0": {
+ "transformers": "Sam3TrackerModel"
+ }
+ },
+ "tasks": [
+ "Sam3TrackerModel",
+ "Sam3TrackerPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.sam3": {
+ "*": {
+ "repo": "facebook/sam3",
+ "pkg": {
+ "0": {
+ "transformers": "Sam3VideoModel"
+ }
+ },
+ "tasks": [
+ "Sam3VideoModel",
+ "Sam3VideoPreTrainedModel",
+ "Sam3VideoInferenceSession",
+ "Sam3VideoSegmentationOutput"
+ ]
+ }
+ },
+ "info.vit.sam-hq-vit-h": {
+ "*": {
+ "repo": "sushmanth/sam_hq_vit_h",
+ "pkg": {
+ "0": {
+ "transformers": "SamHQModel"
+ }
+ },
+ "tasks": [
+ "SamHQModel",
+ "SamHQPreTrainedModel",
+ "SamHQVisionModel"
+ ]
+ }
+ },
+ "info.vit.sam-hq-vit-huge": {
+ "*": {
+ "repo": "syscv-community/sam-hq-vit-huge",
+ "pkg": {
+ "0": {
+ "transformers": "SamHQVisionModel"
+ }
+ },
+ "tasks": [
+ "SamHQModel",
+ "SamHQPreTrainedModel",
+ "SamHQVisionModel"
+ ]
+ }
+ },
+ "info.aet.hf-seamless-m4t": {
+ "*": {
+ "repo": "facebook/hf-seamless-m4t-medium",
+ "pkg": {
+ "0": {
+ "transformers": "SeamlessM4TModel"
+ }
+ },
+ "tasks": [
+ "SeamlessM4TForTextToSpeech",
+ "SeamlessM4TForSpeechToSpeech",
+ "SeamlessM4TForTextToText",
+ "SeamlessM4TForSpeechToText",
+ "SeamlessM4TModel",
+ "SeamlessM4TPreTrainedModel",
+ "SeamlessM4TCodeHifiGan",
+ "SeamlessM4THifiGan",
+ "SeamlessM4TTextToUnitForConditionalGeneration",
+ "SeamlessM4TTextToUnitModel"
+ ]
+ }
+ },
+ "info.stst.seamless-m4t-v2": {
+ "*": {
+ "repo": "facebook/seamless-m4t-v2-large",
+ "pkg": {
+ "0": {
+ "transformers": "SeamlessM4Tv2Model"
+ }
+ },
+ "tasks": [
+ "SeamlessM4Tv2ForTextToSpeech",
+ "SeamlessM4Tv2ForSpeechToSpeech",
+ "SeamlessM4Tv2ForTextToText",
+ "SeamlessM4Tv2ForSpeechToText",
+ "SeamlessM4Tv2Model",
+ "SeamlessM4Tv2PreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.seedoss": {
+ "*": {
+ "repo": "ByteDance-Seed/SeedOss-36B",
+ "pkg": {
+ "0": {
+ "transformers": "SeedOssModel"
+ }
+ },
+ "tasks": [
+ "SeedOssForCausalLM",
+ "SeedOssForQuestionAnswering",
+ "SeedOssPreTrainedModel",
+ "SeedOssModel",
+ "SeedOssForSequenceClassification",
+ "SeedOssForTokenClassification"
+ ]
+ }
+ },
+ "info.vit.segformer-b0-finetuned-ade-512-512": {
+ "*": {
+ "repo": "nvidia/segformer-b0-finetuned-ade-512-512",
+ "pkg": {
+ "0": {
+ "transformers": "SegformerModel"
+ }
+ },
+ "tasks": [
+ "SegformerDecodeHead",
+ "SegformerForImageClassification",
+ "SegformerForSemanticSegmentation",
+ "SegformerLayer",
+ "SegformerModel",
+ "SegformerPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.seggpt-vit": {
+ "*": {
+ "repo": "BAAI/seggpt-vit-large",
+ "pkg": {
+ "0": {
+ "transformers": "SegGptModel"
+ }
+ },
+ "tasks": [
+ "SegGptModel",
+ "SegGptPreTrainedModel",
+ "SegGptForImageSegmentation"
+ ]
+ }
+ },
+ "info.aet.sew": {
+ "*": {
+ "repo": "asapp/sew-tiny-100k",
+ "pkg": {
+ "0": {
+ "transformers": "SEWModel"
+ }
+ },
+ "tasks": [
+ "SEWForCTC",
+ "SEWForSequenceClassification",
+ "SEWModel",
+ "SEWPreTrainedModel"
+ ]
+ }
+ },
+ "info.aet.sew-d": {
+ "*": {
+ "repo": "asapp/sew-d-tiny-100k",
+ "pkg": {
+ "0": {
+ "transformers": "SEWDModel"
+ }
+ },
+ "tasks": [
+ "SEWDForCTC",
+ "SEWDForSequenceClassification",
+ "SEWDModel",
+ "SEWDPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.siglip2-patch16-224": {
+ "*": {
+ "repo": "google/siglip2-base-patch16-224",
+ "pkg": {
+ "0": {
+ "transformers": "Siglip2Model"
+ }
+ },
+ "tasks": [
+ "Siglip2Model",
+ "Siglip2PreTrainedModel",
+ "Siglip2TextModel",
+ "Siglip2VisionModel",
+ "Siglip2ForImageClassification"
+ ]
+ }
+ },
+ "info.vit.siglip2-patch16-naflex": {
+ "*": {
+ "repo": "google/siglip2-base-patch16-naflex",
+ "pkg": {
+ "0": {
+ "transformers": "Siglip2VisionModel"
+ }
+ },
+ "tasks": [
+ "Siglip2Model",
+ "Siglip2PreTrainedModel",
+ "Siglip2TextModel",
+ "Siglip2VisionModel",
+ "Siglip2ForImageClassification"
+ ]
+ }
+ },
+ "info.stst.smollm3": {
+ "*": {
+ "repo": "HuggingFaceTB/SmolLM3-3B",
+ "pkg": {
+ "0": {
+ "transformers": "SmolLM3Model"
+ }
+ },
+ "tasks": [
+ "SmolLM3PreTrainedModel",
+ "SmolLM3Model",
+ "SmolLM3ForCausalLM",
+ "SmolLM3ForSequenceClassification",
+ "SmolLM3ForTokenClassification",
+ "SmolLM3ForQuestionAnswering"
+ ]
+ }
+ },
+ "info.vit.smolvlm": {
+ "*": {
+ "repo": "HuggingFaceTB/SmolVLM2-2.2B-Instruct",
+ "pkg": {
+ "0": {
+ "transformers": "SmolVLMModel"
+ }
+ },
+ "tasks": [
+ "SmolVLMForConditionalGeneration",
+ "SmolVLMPreTrainedModel",
+ "SmolVLMModel",
+ "SmolVLMVisionTransformer"
+ ]
+ }
+ },
+ "info.vit.siglip-so-patch14-384": {
+ "*": {
+ "repo": "google/siglip-so400m-patch14-384",
+ "pkg": {
+ "0": {
+ "transformers": "SmolVLMVisionTransformer"
+ }
+ },
+ "tasks": [
+ "SmolVLMForConditionalGeneration",
+ "SmolVLMPreTrainedModel",
+ "SmolVLMModel",
+ "SmolVLMVisionTransformer"
+ ]
+ }
+ },
+ "info.aet.s2t-librispeech-asr": {
+ "*": {
+ "repo": "facebook/s2t-small-librispeech-asr",
+ "pkg": {
+ "0": {
+ "transformers": "Speech2TextModel"
+ }
+ },
+ "tasks": [
+ "Speech2TextForConditionalGeneration",
+ "Speech2TextModel",
+ "Speech2TextPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.speecht5-asr": {
+ "*": {
+ "repo": "microsoft/speecht5_asr",
+ "pkg": {
+ "0": {
+ "transformers": "SpeechT5Model"
+ }
+ },
+ "tasks": [
+ "SpeechT5ForSpeechToText",
+ "SpeechT5ForSpeechToSpeech",
+ "SpeechT5ForTextToSpeech",
+ "SpeechT5Model",
+ "SpeechT5PreTrainedModel",
+ "SpeechT5HifiGan"
+ ]
+ }
+ },
+ "info.art.splinter": {
+ "*": {
+ "repo": "tau/splinter-base",
+ "pkg": {
+ "0": {
+ "transformers": "SplinterModel"
+ }
+ },
+ "tasks": [
+ "SplinterForQuestionAnswering",
+ "SplinterForPreTraining",
+ "SplinterLayer",
+ "SplinterModel",
+ "SplinterPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.squeezebert-uncased": {
+ "*": {
+ "repo": "squeezebert/squeezebert-uncased",
+ "pkg": {
+ "0": {
+ "transformers": "SqueezeBertModel"
+ }
+ },
+ "tasks": [
+ "SqueezeBertForMaskedLM",
+ "SqueezeBertForMultipleChoice",
+ "SqueezeBertForQuestionAnswering",
+ "SqueezeBertForSequenceClassification",
+ "SqueezeBertForTokenClassification",
+ "SqueezeBertModel",
+ "SqueezeBertModule",
+ "SqueezeBertPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.stablelm-4e1t": {
+ "*": {
+ "repo": "stabilityai/stablelm-3b-4e1t",
+ "pkg": {
+ "0": {
+ "transformers": "StableLmModel"
+ }
+ },
+ "tasks": [
+ "StableLmForCausalLM",
+ "StableLmModel",
+ "StableLmPreTrainedModel",
+ "StableLmForSequenceClassification",
+ "StableLmForTokenClassification"
+ ]
+ }
+ },
+ "info.stst.starcoder2": {
+ "*": {
+ "repo": "bigcode/starcoder2-7b",
+ "pkg": {
+ "0": {
+ "transformers": "Starcoder2Model"
+ }
+ },
+ "tasks": [
+ "Starcoder2ForCausalLM",
+ "Starcoder2Model",
+ "Starcoder2PreTrainedModel",
+ "Starcoder2ForSequenceClassification",
+ "Starcoder2ForTokenClassification"
+ ]
+ }
+ },
+ "info.vit.swiftformer-xs": {
+ "*": {
+ "repo": "MBZUAI/swiftformer-xs",
+ "pkg": {
+ "0": {
+ "transformers": "SwiftFormerModel"
+ }
+ },
+ "tasks": [
+ "SwiftFormerForImageClassification",
+ "SwiftFormerModel",
+ "SwiftFormerPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.swin2sr-classicalsr-x2-64": {
+ "*": {
+ "repo": "caidas/swin2sr-classicalsr-x2-64",
+ "pkg": {
+ "0": {
+ "transformers": "Swin2SRModel"
+ }
+ },
+ "tasks": [
+ "Swin2SRForImageSuperResolution",
+ "Swin2SRModel",
+ "Swin2SRPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.swinv2-patch4-window8-256": {
+ "*": {
+ "repo": "microsoft/swinv2-tiny-patch4-window8-256",
+ "pkg": {
+ "0": {
+ "transformers": "Swinv2Model"
+ }
+ },
+ "tasks": [
+ "Swinv2ForImageClassification",
+ "Swinv2ForMaskedImageModeling",
+ "Swinv2Model",
+ "Swinv2PreTrainedModel",
+ "Swinv2Backbone"
+ ]
+ }
+ },
+ "info.moe.switch-8": {
+ "*": {
+ "repo": "google/switch-base-8",
+ "pkg": {
+ "0": {
+ "transformers": "SwitchTransformersModel"
+ }
+ },
+ "tasks": [
+ "SwitchTransformersEncoderModel",
+ "SwitchTransformersForConditionalGeneration",
+ "SwitchTransformersModel",
+ "SwitchTransformersPreTrainedModel",
+ "SwitchTransformersTop1Router",
+ "SwitchTransformersSparseMLP"
+ ]
+ }
+ },
+ "info.stst.t5": {
+ "*": {
+ "repo": "google-t5/t5-small",
+ "pkg": {
+ "0": {
+ "transformers": "T5Model"
+ }
+ },
+ "identifiers": [
+ [
+ 4096
+ ],
+ "encoder.embed_tokens.weight",
+ "text_encoders.t5xxl.transformer.shared.weight",
+ "t5xxl",
+ "encoder.block.0.layer.1.DenseReluDense.wi.weight"
+ ],
+ "file_256": [
+ "ec87bffd1923e8b2774a6d240c922a41f6143081d52cf83b8fe39e9d838c893e",
+ "565cb2487351282e8e4dbeb88e63f4ad28217ce0439f5a8e6525a924807d2d9b",
+ "6e480b09fae049a72d2a8c5fbccb8d3e92febeb233bbe9dfe7256958a9167635",
+ "4f2751ceeb2a96edd693e539dc5d6bba0b8d3814f49a9b3798403a0cec4b2e3d",
+ "83690f3cc37cecb5e907f41ab0f7abb0855ef24a0a8aab9259f2888ce85a34e2",
+ "7d330da4816157540d6bb7838bf63a0f02f573fc48ca4d8de34bb0cbfd514f09",
+ "8490f7a22615c20651a63dbe7b4241929826a4de20292dc8e63bfc3c61e3654f",
+ "d8720addef2596fef86b1b22e4b62875c9118779ba8723759a75dfcbc649ffd5",
+ "7d0eac95abe8daae454bcd3d166b8bfc6a35fe68278f97479d62dbb6850f38c0",
+ "ceabd6f71c7112cfaa4dfca8711dda97b79fb9b25983f1c95532de226045f1f8",
+ "49e139f50824fef40908ef4307c851e7adaa8b91bed44054c4829600dbedfdda",
+ "211ade1d474f5dc83190aec8be5c4baf52643777790d64de0cbd84f63613e5e9",
+ "7894547154ba3fd6e364e66e2951ee82b4c3fc1ae0f95df6a4f9d1c5a4e98f17",
+ "eb529f693f4b17773a24e787fcba29486d5e1700dadcc20bb91e4c8b00212d08",
+ "d80116f6fc39801e4eef425a584e7a7a41cbe5119797bef2dad67299909fe2ae",
+ "31ebe18e901bfb6e5709a20ec1c95fce29bce2b9545073231e0f909a53239f5c",
+ "6be2b0b7e2de7cf2919340c88cb802a103a997ce46c53131cec91958c1db1af4",
+ "b51cbb10b1a7aac6dd1c3b62f0ed908bfd06e0b42d2f3577d43e061361f51dae",
+ "9ec60f6028534b7fe5af439fcb535d75a68592a9ca3fcdeb175ef89e3ee99825",
+ "8f5ab879234384235d56732f0cda07bf8801f30a49645248c5bfdeeb1665f64b",
+ "86427a1f4dba48940e45bf78d6db5bf0d48fce8b4656f5aba27955f06af9628e",
+ "88b696cfae098f03bb078cc5944ef03aec1e91ec020a6b016b723a0f0532558c",
+ "1dc600961d3c5ed081f6700485cdc7ed9cfb4631f2dc385b7ac6bd3c80846d0d",
+ "f28631189911f8d7931e8fe642a4cb2a3c51f50da7cabbfa06b89bafc19c00d0",
+ "de9dfdd19d7ba6859993cadec5100665dc7a4fb71e1c6c8970959cbdaf4366e3",
+ "7a68b2c8c080696a10109612a649bc69330991ecfea65930ccfdfbdb011f2686",
+ "2c0c539ab8e8fba3877cc94bc483e427f74c525f817a809b028ebc8d96d75a94"
+ ],
+ "layer_b3": [
+ "ca94e03b7b1fdcb0d6ff5205eac56f145d2dff8a9c489faf80935bfec8387f18",
+ "c0e2b054bedd782909191b05748a88c28d1538fa91789fec63f036ba01dcc001",
+ "672de9b79d14001de7d1109ffc52e4d0cccc3bfee6f45648fa347703b58e2b99",
+ "abdb187a996c51cb0469630c124b14eeb0bb8f5f635aca6c71dea264f8bd61ae",
+ "8926f862b7763fd9688af317eba7809aa71a478484be0c738c269de368ace4a7",
+ "e616b754cf55e55b3f9f17ab7e1fff95f0607c81782822fc1223ae22fb1e9f36",
+ "b79e5f1878a62cd726bb4f9fc1415cacb071d278440e9026290c7b36cb41e1d4",
+ "77619d5278d9f547ddac17d4d99df56cb6a3a9e660ae31b2f896a4297907e62e",
+ "c87c9d3cc7becc46ee34821299cf8551a6df5541582a45469a031bccdc4bd340",
+ "7e6c32c01c89fc5d1610c410135aa9708e77a7444510e5e479fa677ff2b53643",
+ "a49c2bc301733967ddff113790e301773dc5dd71368b657af4141458de593ced",
+ "c2ea94030ea362e03d73d448fa5353ace0a449dc38c51a4a49fb148444ebb8ef",
+ "4a90463350f08ef41479da1d561ab41b8f8b792f1603a092226a838156aebfb0",
+ "f86cd0324eebbffb81b15ad47dc8b63fedfa51dc222e44e1a958a7becce2bcb0",
+ "48c54c61c5f14e42761c6177539b2da3a22222516dab053952ca8d8e92f93d65",
+ "311332d9738773669128814d944b1e860a8e3176b37abf43370bc06b43b454d0",
+ "3f4e51dec6d542759cdea49b3bec14c090a4908f953fa3e182e2ea43b5b05402",
+ "beb25461e168359108add77263ea5cc121b7584cc4aa304ffc4e134783bb1d88",
+ "43313f90a359c8c1c787a7a833b1ab9f7a38204ba36d0ba587c658d0d9bf0852",
+ "fa9e97cdad26f55fedab83a3f114e0338c9cca3ea2bf8f1b168a6dfc5919bf8e",
+ "93108d67f8829a7e1e8f3773e9ce53c67f365889c2acfd69816ac80fd43f8e08",
+ "fc65a6cc55e89394d7bc0fa4ee952d63ce3bdc143b84b5aa4bb3edf7722a6b83",
+ "8163bc781a7e013dfeb806bbb828a36913cf119363ea5fcd9071d87a0c227cda",
+ "ad2ba63e1134bad1b15ee339313bc130708b2995e8b4b76fb44d727f28c26ad9",
+ "4a844772638ffed2f61d45eaac984094b92540fa1391a4098608fc73a6cd4fd8",
+ "76c31e1fd35da7de7cee97c1e7c5ccde640e6fac3e17a62e115ecf484c7196c3",
+ "a4d672e22b5bdd8f8b0885cec4a173d0466bb1dcbfbf8400cedcc41c2494f16c",
+ "d1860c3f01dc9f260d98b50d3d2bbc8dc2d3eefaa93778a8de9d7adfb897fc6e",
+ "b8719092fc58487406211f52dc55bf40b573ccfd29933a989c33a36b694f6f0a",
+ "795e272409bc4fa55f402485acf86b607256f91aa965295c5bb771c61f8e9e74"
+ ],
+ "layer_256": [
+ "bb20f7805209379aea4d6548f17e551cf27d0f8426ca169e4df8234f718ed5ef",
+ "431580c2d86f9a9ed3500f776a4c997223e5644aed211f965354869ccfa4d76e",
+ "2ccd548c4ffe34168c60779ebd497b9b410981a2fda813c8723a24a805c94ea0",
+ "a608fc4e1cc9762e46187a1ce66e98e8ba4bc3a604cbfd96174bd876baea0fa1",
+ "dc9e74cdf535e0b7a17e1335d0d8b38a00f94facf0cb01363baee09945a25278",
+ "f07409710a69b2247aa4723a9b40d2225d5e5bfba7b60c51f0ea901fc2ef5ad9",
+ "ed28f8b6cc472f352fc840b5a9f841ff17d76ae6918f0676464dca20529aa92b",
+ "97c1a08f87c59b4c55ad4672841977cfce43ca7730bcd11d8c178a9330de1855",
+ "968972839b859a9c4457f190fad2e17e8585ce27d9ef318df4f5b4e902143944",
+ "4dbdeadc957c898c327197a3d8770188535672e9208beb29bbf48dfdf51c8955",
+ "669172c2b5e8b97774d9dd0227ede40c4d25cae3adae97d9f281d03531e7e137",
+ "39fff130b9ee240102c28a78ee1c4a643e9f800b734ff133f3ab2ad1357bd2f6",
+ "6e047ed8cb7007034ff15840dd53c92096f0e7ed5befa07808de8afa35d35874",
+ "adbd0baa059074501b7686db2b0c01715f3a317275c2657c5dfbfd6ee92389b7",
+ "eb63790fb32b5660de34fa42c2e608df58f7aa3680b4984f0ee9008fe613729c",
+ "f125c20a33b0ff2dbd4e8ad9acebc34383cb2ef98668169ef79a8c06655ced35",
+ "e64e0ac83a785ef584a0e86b347fae8f9e2bd84324a49396ca8a9fe7532a947b",
+ "70001b3ac1b66522142bb86e4c3e87e20c2bbd07276c763878e0838ef6184aad",
+ "f46fd1e2b5fef3b9f7ae80d183cc77f7be181117a72a0bb933bdef0bc6cd679e",
+ "83676d73726d101325a47c7f8a60cedf10bab99ea79a6bedad7761220cb4a625",
+ "a621a907586e5e270e7c7873b167364d8a935ff347d8240fa9bab319678da690",
+ "f0af1a089f40d8611db5c59469314f1547e2df23c6eff24860359b37ea9bd966",
+ "72478320b8dbfd9aeaea010dcf0896e3116fa5ab940f3b472882d9f9d2d7333f",
+ "9c1a88e36334a48d8482fec54b14ea1d5fd31f0dbb65d13cc616e63dc7c42be5",
+ "d0689f727e8ac4fef3ec4b1f29e8a3bd12e1116559eeefb2a1a457cd4e676d1e",
+ "fea158a4afcfaa6e95e04799bae0287de0c4fcb188f3b41768a46ce48c71c9df",
+ "2e5bc4e73312b5aec4c1a55631cb4ed69cf34ccaa6d1f28f7045f137a579b439",
+ "015fdecbc3b5369dbcb2302e4b79985437ac4496d1b9ad63316423a222fb0803"
+ ],
+ "tasks": [
+ "T5EncoderModel",
+ "T5ForConditionalGeneration",
+ "T5Model",
+ "T5PreTrainedModel",
+ "T5ForQuestionAnswering",
+ "T5ForSequenceClassification",
+ "T5ForTokenClassification"
+ ]
+ }
+ },
+ "info.stst.t5gemma-prefixlm": {
+ "*": {
+ "repo": "google/t5gemma-2b-2b-prefixlm-it",
+ "pkg": {
+ "0": {
+ "transformers": "T5GemmaModel"
+ }
+ },
+ "tasks": [
+ "T5GemmaForConditionalGeneration",
+ "T5GemmaModel",
+ "T5GemmaEncoderModel",
+ "T5GemmaPreTrainedModel",
+ "T5GemmaForSequenceClassification",
+ "T5GemmaForTokenClassification"
+ ]
+ }
+ },
+ "info.stst.t5gemma-2": {
+ "*": {
+ "repo": "google/t5gemma-2-270m-270m",
+ "pkg": {
+ "0": {
+ "transformers": "T5Gemma2Model"
+ }
+ },
+ "tasks": [
+ "T5Gemma2ForConditionalGeneration",
+ "T5Gemma2Model",
+ "T5Gemma2PreTrainedModel",
+ "T5Gemma2ForSequenceClassification",
+ "T5Gemma2ForTokenClassification"
+ ]
+ }
+ },
+ "info.detr.table-transformer-detection": {
+ "*": {
+ "repo": "microsoft/table-transformer-detection",
+ "pkg": {
+ "0": {
+ "transformers": "TableTransformerModel"
+ }
+ },
+ "tasks": [
+ "TableTransformerForObjectDetection",
+ "TableTransformerModel",
+ "TableTransformerPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.tapas-finetuned-sqa": {
+ "*": {
+ "repo": "google/tapas-base-finetuned-sqa",
+ "pkg": {
+ "0": {
+ "transformers": "TapasModel"
+ }
+ },
+ "tasks": [
+ "TapasForMaskedLM",
+ "TapasForQuestionAnswering",
+ "TapasForSequenceClassification",
+ "TapasModel",
+ "TapasPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.textnet": {
+ "*": {
+ "repo": "czczup/textnet-base",
+ "pkg": {
+ "0": {
+ "transformers": "TextNetModel"
+ }
+ },
+ "tasks": [
+ "TextNetBackbone",
+ "TextNetModel",
+ "TextNetPreTrainedModel",
+ "TextNetForImageClassification"
+ ]
+ }
+ },
+ "info.stst.time-series-transformer-tourism-monthly": {
+ "*": {
+ "repo": "huggingface/time-series-transformer-tourism-monthly",
+ "pkg": {
+ "0": {
+ "transformers": "TimeSeriesTransformerModel"
+ }
+ },
+ "tasks": [
+ "TimeSeriesTransformerForPrediction",
+ "TimeSeriesTransformerModel",
+ "TimeSeriesTransformerPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.timesfm-2-pytorch": {
+ "*": {
+ "repo": "google/timesfm-2.0-500m-pytorch",
+ "pkg": {
+ "0": {
+ "transformers": "TimesFmModel"
+ }
+ },
+ "tasks": [
+ "TimesFmModelForPrediction",
+ "TimesFmPreTrainedModel",
+ "TimesFmModel"
+ ]
+ }
+ },
+ "info.vit.timesformer-finetuned-k600": {
+ "*": {
+ "repo": "facebook/timesformer-base-finetuned-k600",
+ "pkg": {
+ "0": {
+ "transformers": "TimesformerModel"
+ }
+ },
+ "tasks": [
+ "TimesformerModel",
+ "TimesformerForVideoClassification",
+ "TimesformerPreTrainedModel"
+ ]
+ }
+ },
+ "info.detr.resnet18-a1-in": {
+ "*": {
+ "repo": "timm/resnet18.a1_in1k",
+ "pkg": {
+ "0": {
+ "transformers": "TimmWrapperModel"
+ }
+ },
+ "tasks": [
+ "TimmWrapperPreTrainedModel",
+ "TimmWrapperModel",
+ "TimmWrapperForImageClassification"
+ ]
+ }
+ },
+ "info.detr.tvp": {
+ "*": {
+ "repo": "Intel/tvp-base",
+ "pkg": {
+ "0": {
+ "transformers": "TvpModel"
+ }
+ },
+ "tasks": [
+ "TvpModel",
+ "TvpPreTrainedModel",
+ "TvpForVideoGrounding"
+ ]
+ }
+ },
+ "info.vit.udop": {
+ "*": {
+ "repo": "microsoft/udop-large",
+ "pkg": {
+ "0": {
+ "transformers": "UdopModel"
+ }
+ },
+ "tasks": [
+ "UdopForConditionalGeneration",
+ "UdopPreTrainedModel",
+ "UdopModel",
+ "UdopEncoderModel"
+ ]
+ }
+ },
+ "info.stst.umt5": {
+ "*": {
+ "repo": "google/umt5-small",
+ "pkg": {
+ "0": {
+ "transformers": "UMT5Model"
+ }
+ },
+ "identifiers": [
+ "encoder.block.1.layer.0.SelfAttention.relative_attention_bias.weight"
+ ],
+ "file_256": [
+ "a8e861969c7433e707cc5a74065d795d36cca07ec96eb6763eb4083df7248f58",
+ "decf9b70814ed5e9965bfca9fbd0483462e2bf743790663025b7742f8c014c72",
+ "0a07449cf1141c0ec86e653c00465f6f0d79c6e58a2c60c8bcf4203d0e4ec4f6",
+ "c0ef3a140898e228a3520c9adec60743d2e8e5b3d229651bb37f1a3921919f99",
+ "7b8850f1961e1cf8a77cca4c964a358d303f490833c6c087d0cff4b2f99db2af",
+ "c3355d30191f1f066b26d93fba017ae9809dce6c627dda5f6a66eaa651204f68",
+ "fa1d36fd54f171ae60fea915c23bd77986b330bbed9729f0d2f8ecbe9168bc48",
+ "4a3176f32fd70c0a335b4419fcbf8c86cc875e23498c0fc06f5b4aa0930889e0",
+ "adbc782b9145a27e15d63dfa25057efca0ac75e2db7d372c901ddaa130ca2def",
+ "b7e2ca4c493c9d51fa951005e8ceba2f4b6b6877cfb4c36a8955c6cd68a1dba7",
+ "2521d4de0bf9e1cc6549866463ceae85e4ec3239bc6063f7488810be39033bbc",
+ "9209b4c77b34ad8cf3f06b04c6eaa27e7beeebb348a31f85e3b38a1d719b09ed",
+ "8bc12d80bc0413573fa58a93626117440b4528f640dd9cb310732e05fa9e6c3e",
+ "f64f8d6dc4d8a24276df69d0ccea789aae686f7417950a41e6568c30cb478a5c",
+ "17cf97a5bbbc60a646d6105b832b6f657ce904a8a1ad970e4b59df0c67584a40",
+ "eaea358bb438c5d211721a4feecc162000e3636e9cb96f51e216f1f44ebd12ce"
+ ],
+ "layer_b3": [
+ "cd92b29c9099a640e3f5d4a76e64b3467f87f6c056119e0defdff94d311ad6de",
+ "1c943dbcb8b328a7c6c852921ddaefbd84c9df8c83bc51fe303c1f06cb734102",
+ "1639a6467af0db1e15828d33b878e568cba1335947eeadd481170bcdc9ba8e33",
+ "72a0329740dee29a2c099eec3c320b3945590a74293356014c30249fe69652e5",
+ "0374cba03c607ffe8ab8f04994d82f82e80901dc7578f1a9a6cb2637608be5d5",
+ "d75a407f873e1cfa1a0a36214b53b14bfebe9253ea263465151c07f0d57f3f29",
+ "621153502b985c143d304318c91dc3d10296d24268c81e3538fc336fdc84c915",
+ "43bb052945d38a68bec27c3d26162e88e306e6074d027d3b4b2b8ae2b1851691",
+ "98f50ea5d55e61c1478df47e567e48bdd036d240b9129e64d53a826406900adc",
+ "9400313b8eae31699473daa5f840d25a4ef660f68de9a7894f1a28f214f23384",
+ "9f13826b8e4ddde24d80de6a947a7868e26cea25dda52790ee6ed695ff72b9bb",
+ "475773ab108a537ff904b84e7f3a80129ba4983deb7170b6b52c922ece6069ce",
+ "5ef27b3c1eddb08cfe41b452cf9529d86dff811645d40c165bae324486d19e96",
+ "e170559d8551cfe651344594e54c0a9a90c0068b00f3866f6e9a3737e20925cb",
+ "e8dc7442a20bcdc7b6e5dd0265939d88896eab5ddd33ee16f1f09537e65914b8",
+ "4d3d5049857d01741780daf01e96617092973305637b435f4895499a26bbaede",
+ "7a2adadc2372feda23b2169337276adda6d1fdef82ba69f0d3321c4c6ba8c604",
+ "0a7c61a85bb3f51f75924de48ef3f5e87cbf8901f600cbfcae97f5e2919c4148"
+ ],
+ "layer_256": [
+ "467916d35f3053dce1d40d998fcaf6aa03feda75aa578d964dd61461e23641a3",
+ "58deeef888d4ded4ffababfbf8da27227a4a6ff8adfa42016e12c0180f713816",
+ "178ebd3fa3418d33a2e45a80d8b9d3662ff4a8e75f3de3f0332f82c505d8152a",
+ "8700dcb651465fe6c925b7ad6068b58b32951832fff0ed19819510f8d0713ee5",
+ "954f2129ba166e746c71433f717b572d8869ec14b32b7f214d1701d3b1120047",
+ "32f5fc1daea014b6488b96c2a1330e0aad87e074844fa3e2e3f20b9e58440395",
+ "9245abaf6df8a4b5fcc828ecbcd7b21a1b19bf5f3c4388fb5c8eabc140276dce",
+ "172d0fbbd379ae014a7008e148813818494e9e645db802fd000d443369df9d17",
+ "2fa68a26b0386aaf9123d2b4067dafc8631ee724602197dd353f3ea5a61dac8a",
+ "16f0054014e6d07b86b0526d5bcfed7d2aa3aebe3e44e6758933d90cbd3da46e",
+ "fd62047f5d27ff43210c117dc0f253c101e694a5331d6b684688606c92c65ccf",
+ "ddc4f38db9f132fb1b736c1d693b5c039a2d6fe83bdf4f1c1e7a2745b5d79124",
+ "9e9ab11b3ea059b84ae2bcc5be76ab3f730a486d92a16f1fd2a959bdc2ede08f",
+ "bfb178b1ce27f00e122d2328c662fdef6cc239c07efc749aa61ae2d395441b02",
+ "50addf6a911b90194a75b0212429d1af55eb2f9d24715479b9ccc4a40adc299b",
+ "2e46e9f1b714d72160d3b3b775a845b3049a01396fab935f1278d9e8de2ef0c6",
+ "db8d2b49d9042e39d6531b33ec3bebb9cdf42b9e6ad56163f08da2a7da2a53cd",
+ "2d81d19ad5440422b85e0b17c71914269f6c25c9b1fa321c0dd6119ddb41d62d"
+ ],
+ "tasks": [
+ "UMT5EncoderModel",
+ "UMT5ForConditionalGeneration",
+ "UMT5ForQuestionAnswering",
+ "UMT5ForSequenceClassification",
+ "UMT5ForTokenClassification",
+ "UMT5Model",
+ "UMT5PreTrainedModel"
+ ]
+ }
+ },
+ "info.aet.unispeech-1500h-cv": {
+ "*": {
+ "repo": "microsoft/unispeech-large-1500h-cv",
+ "pkg": {
+ "0": {
+ "transformers": "UniSpeechModel"
+ }
+ },
+ "tasks": [
+ "UniSpeechForCTC",
+ "UniSpeechForPreTraining",
+ "UniSpeechForSequenceClassification",
+ "UniSpeechModel",
+ "UniSpeechPreTrainedModel"
+ ]
+ }
+ },
+ "info.aet.unispeech-sat-100h-libri-ft": {
+ "*": {
+ "repo": "microsoft/unispeech-sat-base-100h-libri-ft",
+ "pkg": {
+ "0": {
+ "transformers": "UniSpeechSatModel"
+ }
+ },
+ "tasks": [
+ "UniSpeechSatForAudioFrameClassification",
+ "UniSpeechSatForCTC",
+ "UniSpeechSatForPreTraining",
+ "UniSpeechSatForSequenceClassification",
+ "UniSpeechSatForXVector",
+ "UniSpeechSatModel",
+ "UniSpeechSatPreTrainedModel"
+ ]
+ }
+ },
+ "info.gan.univnet-dev": {
+ "*": {
+ "repo": "dg845/univnet-dev",
+ "pkg": {
+ "0": {
+ "transformers": "UnivNetModel"
+ }
+ },
+ "tasks": [
+ "UnivNetModel"
+ ]
+ }
+ },
+ "info.stst.vaultgemma": {
+ "*": {
+ "repo": "google/vaultgemma-7b",
+ "pkg": {
+ "0": {
+ "transformers": "VaultGemmaModel"
+ }
+ },
+ "tasks": [
+ "VaultGemmaForCausalLM",
+ "VaultGemmaModel",
+ "VaultGemmaPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.videollama3-image-hf": {
+ "*": {
+ "repo": "lkhl/VideoLLaMA3-2B-Image-HF",
+ "pkg": {
+ "0": {
+ "transformers": "VideoLlama3Model"
+ }
+ },
+ "tasks": [
+ "VideoLlama3VisionModel",
+ "VideoLlama3PreTrainedModel",
+ "VideoLlama3Model",
+ "VideoLlama3ForConditionalGeneration"
+ ]
+ }
+ },
+ "info.vit.video-llava-hf": {
+ "*": {
+ "repo": "LanguageBind/Video-LLaVA-7B-hf",
+ "pkg": {
+ "0": {
+ "transformers": "VideoLlavaModel"
+ }
+ },
+ "tasks": [
+ "VideoLlavaPreTrainedModel",
+ "VideoLlavaModel",
+ "VideoLlavaForConditionalGeneration"
+ ]
+ }
+ },
+ "info.vit.videomae": {
+ "*": {
+ "repo": "MCG-NJU/videomae-base",
+ "pkg": {
+ "0": {
+ "transformers": "VideoMAEModel"
}
- }
- },
- "megatron-bert-uncased": {
+ },
+ "tasks": [
+ "VideoMAEForPreTraining",
+ "VideoMAEModel",
+ "VideoMAEPreTrainedModel",
+ "VideoMAEForVideoClassification"
+ ]
+ }
+ },
+ "info.vit.vilt-b32-mlm": {
+ "*": {
+ "repo": "dandelin/vilt-b32-mlm",
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "ViltModel"
}
- }
- },
- "grounding-dino": {
+ },
+ "tasks": [
+ "ViltForImageAndTextRetrieval",
+ "ViltForImagesAndTextClassification",
+ "ViltForTokenClassification",
+ "ViltForMaskedLM",
+ "ViltForQuestionAnswering",
+ "ViltLayer",
+ "ViltModel",
+ "ViltPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.vip-llava-hf": {
+ "*": {
+ "repo": "ybelkada/vip-llava-7b-hf",
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "VipLlavaModel"
}
- }
- },
- "funnel": {
+ },
+ "tasks": [
+ "VipLlavaModel",
+ "VipLlavaForConditionalGeneration",
+ "VipLlavaPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.japanese-clip-vit-h-14-bert-wider": {
+ "*": {
+ "repo": "hakuhodo-tech/japanese-clip-vit-h-14-bert-wider",
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "13": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "14": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "VisionTextDualEncoderModel"
}
- }
- },
- "wav2vec2-conformer-rel-pos": {
+ },
+ "tasks": [
+ "VisionTextDualEncoderModel"
+ ]
+ }
+ },
+ "info.art.visualbert-vqa-coco-pre": {
+ "*": {
+ "repo": "uclanlp/visualbert-vqa-coco-pre",
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "13": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "14": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "15": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "16": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "17": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "18": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "19": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "VisualBertModel"
}
- }
- },
- "mm-grounding-dino-o365v1-goldg-v3det": {
+ },
+ "tasks": [
+ "VisualBertForMultipleChoice",
+ "VisualBertForPreTraining",
+ "VisualBertForQuestionAnswering",
+ "VisualBertForRegionToPhraseAlignment",
+ "VisualBertForVisualReasoning",
+ "VisualBertLayer",
+ "VisualBertModel",
+ "VisualBertPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.vit-patch16-224": {
+ "*": {
+ "repo": "google/vit-base-patch16-224",
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "ViTModel"
}
- }
- },
- "gpt2": {
+ },
+ "tasks": [
+ "ViTForImageClassification",
+ "ViTForMaskedImageModeling",
+ "ViTModel",
+ "ViTPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.vit-mae": {
+ "*": {
+ "repo": "facebook/vit-mae-base",
"pkg": {
"0": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "1": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "2": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "3": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "4": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "5": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "6": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "7": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "8": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "9": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "10": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "11": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "12": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "13": {
- "transformers": "transformers.utils.import_utils.transformers"
- },
- "14": {
- "transformers": "transformers.utils.import_utils.transformers"
+ "transformers": "ViTMAEModel"
}
- }
+ },
+ "tasks": [
+ "ViTMAEForPreTraining",
+ "ViTMAELayer",
+ "ViTMAEModel",
+ "ViTMAEPreTrainedModel"
+ ]
}
},
- "info.detr.omdet-turbo-swin-hf": {
+ "info.vit.vit-msn": {
"*": {
- "repo": "omlab/omdet-turbo-swin-tiny-hf",
+ "repo": "facebook/vit-msn-base",
"pkg": {
"0": {
- "transformers": "OmDetTurboForObjectDetection"
+ "transformers": "ViTMSNModel"
}
},
"tasks": [
- "OmDetTurboForObjectDetection",
- "OmDetTurboPreTrainedModel"
+ "ViTMSNModel",
+ "ViTMSNForImageClassification",
+ "ViTMSNPreTrainedModel"
]
}
},
- "info.vit.blip2-opt": {
+ "info.vit.vitdet-patch16-224": {
"*": {
- "repo": "Salesforce/blip2-opt-2.7b",
+ "repo": "google/vitdet-base-patch16-224",
"pkg": {
"0": {
- "transformers": "Blip2Model"
+ "transformers": "VitDetModel"
}
},
"tasks": [
- "Blip2Model",
- "Blip2VisionModelWithProjection",
- "Blip2QFormerModel",
- "Blip2PreTrainedModel",
- "Blip2ForConditionalGeneration",
- "Blip2ForImageTextRetrieval",
- "Blip2VisionModel",
- "Blip2TextModelWithProjection"
+ "VitDetModel",
+ "VitDetPreTrainedModel",
+ "VitDetBackbone"
]
}
},
- "info.art.deberta-v2-x": {
+ "info.art.mms-tts-eng": {
"*": {
- "repo": "microsoft/deberta-v2-xlarge",
+ "repo": "facebook/mms-tts-eng",
"pkg": {
"0": {
- "transformers": "DebertaV2Model"
+ "transformers": "VitsModel"
}
},
"tasks": [
- "DebertaV2ForMaskedLM",
- "DebertaV2ForMultipleChoice",
- "DebertaV2ForQuestionAnswering",
- "DebertaV2ForSequenceClassification",
- "DebertaV2ForTokenClassification",
- "DebertaV2Model",
- "DebertaV2PreTrainedModel"
+ "VitsModel",
+ "VitsPreTrainedModel"
]
}
},
- "info.vit.ast-finetuned-audioset-10-10-0593": {
+ "info.vit.vivit16x2-kinetics400": {
"*": {
- "repo": "MIT/ast-finetuned-audioset-10-10-0.4593",
+ "repo": "google/vivit-b-16x2-kinetics400",
"pkg": {
"0": {
- "transformers": "ASTModel"
+ "transformers": "VivitModel"
}
},
"tasks": [
- "ASTForAudioClassification",
- "ASTModel",
- "ASTPreTrainedModel"
+ "VivitModel",
+ "VivitPreTrainedModel",
+ "VivitForVideoClassification"
]
}
},
- "info.detr.dab-detr": {
+ "info.vit.vjepa2-vitl-fpc64-256": {
"*": {
- "repo": "IDEA-Research/dab-detr-resnet-50",
+ "repo": "facebook/vjepa2-vitl-fpc64-256",
"pkg": {
"0": {
- "transformers": "DabDetrModel"
+ "transformers": "VJEPA2Model"
}
},
"tasks": [
- "DabDetrForObjectDetection",
- "DabDetrModel",
- "DabDetrPreTrainedModel"
+ "VJEPA2Model",
+ "VJEPA2PreTrainedModel",
+ "VJEPA2ForVideoClassification"
]
}
},
- "info.aet.sew-d": {
+ "info.stst.voxtral-2507": {
"*": {
- "repo": "asapp/sew-d-tiny-100k",
+ "repo": "mistralai/Voxtral-Mini-3B-2507",
"pkg": {
"0": {
- "transformers": "SEWDModel"
+ "transformers": "VoxtralForConditionalGeneration"
}
},
"tasks": [
- "SEWDForCTC",
- "SEWDForSequenceClassification",
- "SEWDModel",
- "SEWDPreTrainedModel"
+ "VoxtralPreTrainedModel",
+ "VoxtralEncoder",
+ "VoxtralForConditionalGeneration"
]
}
},
- "info.art.bert-for-seq-generation-l-24-bbc-encoder": {
+ "info.aet.voxtral-2507": {
"*": {
- "repo": "google/bert_for_seq_generation_L-24_bbc_encoder",
+ "repo": "mistralai/Voxtral-Mini-3B-2507",
"pkg": {
"0": {
- "transformers": "BertGenerationEncoder"
+ "transformers": "VoxtralEncoder"
}
},
"tasks": [
- "BertGenerationDecoder",
- "BertGenerationEncoder",
- "BertGenerationPreTrainedModel"
+ "VoxtralPreTrainedModel",
+ "VoxtralEncoder",
+ "VoxtralForConditionalGeneration"
+ ]
+ }
+ },
+ "info.aet.wav2vec2-960h": {
+ "*": {
+ "repo": "facebook/wav2vec2-base-960h",
+ "pkg": {
+ "0": {
+ "transformers": "Wav2Vec2Model"
+ }
+ },
+ "tasks": [
+ "Wav2Vec2ForAudioFrameClassification",
+ "Wav2Vec2ForCTC",
+ "Wav2Vec2ForMaskedLM",
+ "Wav2Vec2ForPreTraining",
+ "Wav2Vec2ForSequenceClassification",
+ "Wav2Vec2ForXVector",
+ "Wav2Vec2Model",
+ "Wav2Vec2PreTrainedModel"
]
}
},
@@ -5193,91 +12126,142 @@
]
}
},
- "info.stst.nllb-moe": {
+ "info.aet.wav2vec2-conformer-rel-pos": {
"*": {
- "repo": "facebook/nllb-moe-54b",
+ "repo": "facebook/wav2vec2-conformer-rel-pos-large",
"pkg": {
"0": {
- "transformers": "NllbMoeModel"
+ "transformers": "Wav2Vec2ConformerModel"
}
},
"tasks": [
- "NllbMoeForConditionalGeneration",
- "NllbMoeModel",
- "NllbMoePreTrainedModel",
- "NllbMoeTop2Router",
- "NllbMoeSparseMLP"
+ "Wav2Vec2ConformerForAudioFrameClassification",
+ "Wav2Vec2ConformerForCTC",
+ "Wav2Vec2ConformerForPreTraining",
+ "Wav2Vec2ConformerForSequenceClassification",
+ "Wav2Vec2ConformerForXVector",
+ "Wav2Vec2ConformerModel",
+ "Wav2Vec2ConformerPreTrainedModel"
+ ]
+ }
+ },
+ "info.aet.wavlm": {
+ "*": {
+ "repo": "microsoft/wavlm-base",
+ "pkg": {
+ "0": {
+ "transformers": "WavLMModel"
+ }
+ },
+ "tasks": [
+ "WavLMForAudioFrameClassification",
+ "WavLMForCTC",
+ "WavLMForSequenceClassification",
+ "WavLMForXVector",
+ "WavLMModel",
+ "WavLMPreTrainedModel"
+ ]
+ }
+ },
+ "info.aet.whisper": {
+ "*": {
+ "repo": "openai/whisper-tiny",
+ "pkg": {
+ "0": {
+ "transformers": "WhisperModel"
+ }
+ },
+ "tasks": [
+ "WhisperForCausalLM",
+ "WhisperForConditionalGeneration",
+ "WhisperModel",
+ "WhisperPreTrainedModel",
+ "WhisperForAudioClassification"
+ ]
+ }
+ },
+ "info.vit.xclip-patch32": {
+ "*": {
+ "repo": "microsoft/xclip-base-patch32",
+ "pkg": {
+ "0": {
+ "transformers": "XCLIPModel"
+ }
+ },
+ "tasks": [
+ "XCLIPModel",
+ "XCLIPPreTrainedModel",
+ "XCLIPTextModel",
+ "XCLIPVisionModel"
]
}
},
- "info.art.efficient-mlm-m0-0": {
+ "info.gan.x-codec": {
"*": {
- "repo": "andreasmadsen/efficient_mlm_m0.40",
+ "repo": "Manel/X-Codec",
"pkg": {
"0": {
- "transformers": "RobertaPreLayerNormModel"
+ "transformers": "XcodecModel"
}
},
"tasks": [
- "RobertaPreLayerNormForCausalLM",
- "RobertaPreLayerNormForMaskedLM",
- "RobertaPreLayerNormForMultipleChoice",
- "RobertaPreLayerNormForQuestionAnswering",
- "RobertaPreLayerNormForSequenceClassification",
- "RobertaPreLayerNormForTokenClassification",
- "RobertaPreLayerNormModel",
- "RobertaPreLayerNormPreTrainedModel"
+ "XcodecModel",
+ "XcodecPreTrainedModel"
]
}
},
- "info.art.xlm-roberta": {
+ "info.art.xglm": {
"*": {
- "repo": "FacebookAI/xlm-roberta-base",
+ "repo": "facebook/xglm-564M",
"pkg": {
"0": {
- "transformers": "XLMRobertaModel"
+ "transformers": "XGLMModel"
}
},
"tasks": [
- "XLMRobertaForCausalLM",
- "XLMRobertaForMaskedLM",
- "XLMRobertaForMultipleChoice",
- "XLMRobertaForQuestionAnswering",
- "XLMRobertaForSequenceClassification",
- "XLMRobertaForTokenClassification",
- "XLMRobertaModel",
- "XLMRobertaPreTrainedModel"
+ "XGLMForCausalLM",
+ "XGLMModel",
+ "XGLMPreTrainedModel"
]
}
},
- "info.vit.mgp-str": {
+ "info.art.xlm-mlm-en-2048": {
"*": {
- "repo": "alibaba-damo/mgp-str-base",
+ "repo": "FacebookAI/xlm-mlm-en-2048",
"pkg": {
"0": {
- "transformers": "MgpstrForSceneTextRecognition"
+ "transformers": "XLMModel"
}
},
"tasks": [
- "MgpstrModel",
- "MgpstrPreTrainedModel",
- "MgpstrForSceneTextRecognition"
+ "XLMForMultipleChoice",
+ "XLMForQuestionAnswering",
+ "XLMForQuestionAnsweringSimple",
+ "XLMForSequenceClassification",
+ "XLMForTokenClassification",
+ "XLMModel",
+ "XLMPreTrainedModel",
+ "XLMWithLMHeadModel"
]
}
},
- "info.stst.blenderbot": {
+ "info.art.xlm-roberta": {
"*": {
- "repo": "facebook/blenderbot_small-90M",
+ "repo": "FacebookAI/xlm-roberta-base",
"pkg": {
"0": {
- "transformers": "BlenderbotSmallModel"
+ "transformers": "XLMRobertaModel"
}
},
"tasks": [
- "BlenderbotSmallForCausalLM",
- "BlenderbotSmallForConditionalGeneration",
- "BlenderbotSmallModel",
- "BlenderbotSmallPreTrainedModel"
+ "XLMRobertaForCausalLM",
+ "XLMRobertaForMaskedLM",
+ "XLMRobertaForMultipleChoice",
+ "XLMRobertaForQuestionAnswering",
+ "XLMRobertaForSequenceClassification",
+ "XLMRobertaForTokenClassification",
+ "XLMRobertaModel",
+ "XLMRobertaPreTrainedModel"
]
}
},
@@ -5301,148 +12285,125 @@
]
}
},
- "info.art.megatron-bert-uncased": {
- "*": {
- "repo": "nvidia/megatron-bert-uncased-345m",
- "pkg": {
- "0": {
- "transformers": "MegatronBertModel"
- }
- },
- "tasks": [
- "MegatronBertForCausalLM",
- "MegatronBertForMaskedLM",
- "MegatronBertForMultipleChoice",
- "MegatronBertForNextSentencePrediction",
- "MegatronBertForPreTraining",
- "MegatronBertForQuestionAnswering",
- "MegatronBertForSequenceClassification",
- "MegatronBertForTokenClassification",
- "MegatronBertModel",
- "MegatronBertPreTrainedModel"
- ]
- }
- },
- "info.detr.grounding-dino": {
+ "info.art.xlnet-cased": {
"*": {
- "repo": "IDEA-Research/grounding-dino-tiny",
+ "repo": "xlnet/xlnet-large-cased",
"pkg": {
"0": {
- "transformers": "GroundingDinoModel"
+ "transformers": "XLNetModel"
}
},
"tasks": [
- "GroundingDinoForObjectDetection",
- "GroundingDinoModel",
- "GroundingDinoPreTrainedModel"
+ "XLNetForMultipleChoice",
+ "XLNetForQuestionAnswering",
+ "XLNetForQuestionAnsweringSimple",
+ "XLNetForSequenceClassification",
+ "XLNetForTokenClassification",
+ "XLNetLMHeadModel",
+ "XLNetModel",
+ "XLNetPreTrainedModel"
]
}
},
- "info.detr.table-transformer-detection": {
+ "info.lstm.xlstm": {
"*": {
- "repo": "microsoft/table-transformer-detection",
+ "repo": "NX-AI/xLSTM-7b",
"pkg": {
"0": {
- "transformers": "TableTransformerModel"
+ "transformers": "xLSTMModel"
}
},
"tasks": [
- "TableTransformerForObjectDetection",
- "TableTransformerModel",
- "TableTransformerPreTrainedModel"
+ "xLSTMForCausalLM",
+ "xLSTMModel",
+ "xLSTMPreTrainedModel"
]
}
},
- "info.aet.funnel": {
+ "info.art.xmod": {
"*": {
- "repo": "funnel-transformer/small",
+ "repo": "facebook/xmod-base",
"pkg": {
"0": {
- "transformers": "FunnelModel"
+ "transformers": "XmodModel"
}
},
"tasks": [
- "FunnelBaseModel",
- "FunnelForMaskedLM",
- "FunnelForMultipleChoice",
- "FunnelForPreTraining",
- "FunnelForQuestionAnswering",
- "FunnelForSequenceClassification",
- "FunnelForTokenClassification",
- "FunnelModel",
- "FunnelPreTrainedModel"
+ "XmodForCausalLM",
+ "XmodForMaskedLM",
+ "XmodForMultipleChoice",
+ "XmodForQuestionAnswering",
+ "XmodForSequenceClassification",
+ "XmodForTokenClassification",
+ "XmodModel",
+ "XmodPreTrainedModel"
]
}
},
- "info.aet.unispeech-sat-100h-libri-ft": {
+ "info.cnn.yolos": {
"*": {
- "repo": "microsoft/unispeech-sat-base-100h-libri-ft",
+ "repo": "hustvl/yolos-base",
"pkg": {
"0": {
- "transformers": "UniSpeechSatModel"
+ "transformers": "YolosModel"
}
},
"tasks": [
- "UniSpeechSatForAudioFrameClassification",
- "UniSpeechSatForCTC",
- "UniSpeechSatForPreTraining",
- "UniSpeechSatForSequenceClassification",
- "UniSpeechSatForXVector",
- "UniSpeechSatModel",
- "UniSpeechSatPreTrainedModel"
+ "YolosForObjectDetection",
+ "YolosModel",
+ "YolosPreTrainedModel"
]
}
},
- "info.aet.wav2vec2-conformer-rel-pos": {
+ "info.art.yoso-4096": {
"*": {
- "repo": "facebook/wav2vec2-conformer-rel-pos-large",
+ "repo": "uw-madison/yoso-4096",
"pkg": {
"0": {
- "transformers": "Wav2Vec2ConformerModel"
+ "transformers": "YosoModel"
}
},
"tasks": [
- "Wav2Vec2ConformerForAudioFrameClassification",
- "Wav2Vec2ConformerForCTC",
- "Wav2Vec2ConformerForPreTraining",
- "Wav2Vec2ConformerForSequenceClassification",
- "Wav2Vec2ConformerForXVector",
- "Wav2Vec2ConformerModel",
- "Wav2Vec2ConformerPreTrainedModel"
+ "YosoForMaskedLM",
+ "YosoForMultipleChoice",
+ "YosoForQuestionAnswering",
+ "YosoForSequenceClassification",
+ "YosoForTokenClassification",
+ "YosoLayer",
+ "YosoModel",
+ "YosoPreTrainedModel"
]
}
},
- "info.detr.mm-grounding-dino-o365v1-goldg-v3det": {
+ "info.ssm.zamba-v1": {
"*": {
- "repo": "openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det",
+ "repo": "Zyphra/Zamba-7B-v1",
"pkg": {
"0": {
- "transformers": "MMGroundingDinoModel"
+ "transformers": "ZambaModel"
}
},
"tasks": [
- "MMGroundingDinoForObjectDetection",
- "MMGroundingDinoModel",
- "MMGroundingDinoPreTrainedModel"
+ "ZambaForCausalLM",
+ "ZambaForSequenceClassification",
+ "ZambaModel",
+ "ZambaPreTrainedModel"
]
}
},
- "info.art.gpt2": {
+ "info.ssm.zamba2": {
"*": {
- "repo": "openai-community/gpt2",
+ "repo": "Zyphra/Zamba2-2.7B",
"pkg": {
"0": {
- "transformers": "GPT2Model"
+ "transformers": "Zamba2Model"
}
},
"tasks": [
- "GPT2DoubleHeadsModel",
- "GPT2ForQuestionAnswering",
- "GPT2ForSequenceClassification",
- "GPT2ForTokenClassification",
- "GPT2LMHeadModel",
- "GPT2Model",
- "GPT2PreTrainedModel"
+ "Zamba2ForCausalLM",
+ "Zamba2ForSequenceClassification",
+ "Zamba2Model",
+ "Zamba2PreTrainedModel"
]
}
},
@@ -7154,6 +14115,167 @@
]
}
},
+ "info.dit.flux1-dev": {
+ "mystic": {
+ "repo": "enhanceaiteam/Mystic",
+ "pkg": {
+ "0": {
+ "generation": {
+ "num_inference_steps": 16,
+ "guidance_scale": 7.5,
+ "width": 768,
+ "height": 1024
+ }
+ }
+ },
+ "file_256": [
+ "179d4000e44295f6dfadc0e4ac210146454724d46371b82657200ff9fb5c68a9",
+ "48ca85274e3b67f07f70dd84b67725e62395c2f7b188394342716f783ea4c6ac"
+ ],
+ "layer_256": [
+ "3942e6a52dbb0abaf63b031d9c4eda0df47576b51d4c81361978a3dc27b1309e"
+ ],
+ "layer_b3": [
+ "91074aaebe1b5f3b2e7755d3c092af7eb240e92a192360690f1033949d3c8a68"
+ ]
+ },
+ "flux1-lite": {
+ "repo": "freepik/flux.1-lite-8b",
+ "pkg": {
+ "0": {
+ "generation": {
+ "num_inference_steps": 28
+ }
+ }
+ },
+ "file_256": [
+ "09e970a7b8d1813ea7cacd48f9a944fd223882b137a8f4f3b61d864cdc20bbec",
+ "de90e69945c2f4afcb9b6a057ce48190905c984370fce76b16ba3b97d46e2747"
+ ],
+ "layer_256": [
+ "e1afe2f9b1ca55b3c659293cf3237f6b5571f5c4e826bad025ff0f7b54dc34ee"
+ ],
+ "layer_b3": [
+ "9276fa4805efeb45c08cca32c5b51d490e57a2ce5c15ef476a8e468a509c5cdf"
+ ]
+ },
+ "f-lite": {
+ "repo": "freepik/f-lite",
+ "pkg": {
+ "0": {
+ "f_lite": "FLitePipeline",
+ "generation": {
+ "num_inference_steps": 28
+ }
+ }
+ }
+ },
+ "f-lite-texture": {
+ "repo": "freepik/f-lite-texture",
+ "pkg": {
+ "0": {
+ "f_lite": "FLitePipeline",
+ "generation": {
+ "num_inference_steps": 28
+ }
+ }
+ }
+ },
+ "flux": {
+ "repo": "TencentARC/flux-mini",
+ "file_256": [
+ "4236455adeaeb4ed444d63b253ec99805022d17e962ed7261ada9c72ce11cfee"
+ ],
+ "layer_256": [
+ "e4a0d8cf2034da094518ab058da1d4aea14e00d132c6152a266ec196ffef02d0"
+ ],
+ "layer_b3": [
+ "c1a6f83585398fe452d20596a79a522e2986f4c2c01a40e7bfd787af113735d3"
+ ]
+ },
+ "flex2": {
+ "repo": "ostris/Flex.2-preview",
+ "file_256": [
+ "0407108e446a4f57efffc5e7518bc374876af970d3c6068dc4074de0d221c615",
+ "df168ba94d5f96c478b24604a6beedff6189047152190509c73c162ea0d8ec02"
+ ],
+ "layer_256": [
+ "5063de856be5365807d12b47ef6919b4ac611a72651739b2b4050e113bed7a83"
+ ],
+ "layer_b3": [
+ "7f85cdc186896da6965b57d5edb672f08663075d2b207f0e20e328c4034a8076"
+ ]
+ },
+ "flex1-alpha": {
+ "repo": "ostris/Flex.1-alpha",
+ "file_256": [
+ "5d6dce30a266ccbf530c3a3bf253cd5486720a8fb71cdeed556c28304201dc2f",
+ "7acf8771b80a91eaa21566abe8c7d9d3ba33d8688e6e98446827749aee7ca1ee"
+ ],
+ "layer_256": [
+ "a6b9af6efc25fa77cd24046b81ee66fea09a9987d2a8e56ffca9b7a1c9c9c519"
+ ],
+ "layer_b3": [
+ "cb3d3edafd81651eefd62894b3572deb02c5304f4b5d4f7ab8654f1fb922ecd6"
+ ]
+ },
+ "*": {
+ "pkg": {
+ "0": {
+ "precision": "ops.precision.bfloat.B16",
+ "generation": {
+ "height": 1024,
+ "width": 1024,
+ "guidance_scale": 3.5,
+ "num_inference_steps": 50,
+ "max_sequence_length": 512
+ }
+ },
+ "1": {
+ "mflux": "flux.flux.Flux1",
+ "generation": {
+ "height": 1024,
+ "width": 1024,
+ "gudance": 3.5,
+ "num_inference_steps": 25
+ }
+ }
+ },
+ "file_256": [
+ "f6315581b7cddd450b9aba72b4e9ccf8b6580dc1a6b9538aff43ee26a1a3b6c2",
+ "1b2170ac37156d4cf91909eb6834bb8adac84bc1fce8098a29cfb03738df84ad",
+ "4610115bb0c89560703c892c59ac2742fa821e60ef5871b33493ba544683abd7",
+ "d86a3038eacaa720682cb9b1da3c49fecf8a3ded605af4def6061eaa18903eb8",
+ "b7d840eef01c27dfd72ae9143c261355a51bab3b2662263a6cb0059d55347c3d"
+ ],
+ "layer_b3": [
+ "261559c8eaccae558f72621804a9ee188d338e45e2c622a58db709ac190198ba",
+ "87f5d565c66e40eb02eb96498243ad81afcbf86192db99a4fc8fff215470320e",
+ "e61d10a394902dadca9367467b2245070f651f4553ec4a96192fbba64e820acb"
+ ],
+ "layer_256": [
+ "3db58cf834d2f81abb1e035131956da4c90451074c681d0db10810e55e60c2c4",
+ "ddf1a34a06b355ce2bcd0f9beb0713450d9bcdc61a03a6bc37716361735e96f1",
+ "ad8763121f98e28bc4a3d5a8b494c1e8f385f14abe92fc0ca5e4ab3191f3a881"
+ ],
+ "identifiers": [
+ "double_blocks.12.txt_mod.lin.weight",
+ "add_q_proj.weight",
+ "single_transformer_blocks.9.norm.linear.weight"
+ ],
+ "tasks": [
+ "Image",
+ "Redux",
+ "Kontext",
+ "Depth",
+ "Fill",
+ "ConceptAttention",
+ "ControlNet",
+ "CavTon",
+ "IC-Edit"
+ ]
+ }
+ },
"info.dit.wan2-flf2v-720p": {
"diffusers": {
"repo": "Wan-AI/Wan2.1-FLF2V-14B-720P-Diffusers",
diff --git a/mir/config/constants.py b/mir/config/constants.py
index 23632fd..a572017 100644
--- a/mir/config/constants.py
+++ b/mir/config/constants.py
@@ -41,7 +41,7 @@ def mapped_cls(model_identifier: str):
return None
-def import_submodules(module_name: str, pkg_name_or_abs_path: str) -> Callable:
+def import_submodules(module_name: str, pkg_name_or_abs_path: str) -> Callable | None:
"""Convert two strings into a callable function or property\n
:param module: The name of the module to import
:param library_path: Base package for the module
@@ -62,7 +62,7 @@ def import_submodules(module_name: str, pkg_name_or_abs_path: str) -> Callable:
nfo("failed to find module {module}")
-def extract_init_params(module: Callable | str, package_name: str | None = None) -> dict[str, list[str]]:
+def extract_init_parameters(module: Callable | str, package_name: str | None = None) -> dict[str, list[str]]:
"""Pick apart a Diffusers or Transformers pipeline class and find its constituent parts (formerly root_class)\n
:param module: Origin pipeline as a class or as a string
:param library: name of a library to import the class from, only if a string is provided
@@ -77,23 +77,12 @@ def extract_init_params(module: Callable | str, package_name: str | None = None)
module_obj = module
signature = inspect.signature(module_obj.__init__)
class_names = {}
- for folder, param in signature.parameters.items():
- if folder not in ["self", "kwargs", "use_cache"]:
- sub_module = str(param.annotation).split("'")
- if len(sub_module) > 1 and sub_module[1] not in [
- "bool",
- "int",
- "float",
- "complex",
- "str",
- "list",
- "tuple",
- "dict",
- "set",
- "inspect",
- "_empty",
- ]:
- class_names.setdefault(folder, sub_module[1].split("."))
+ editable_signature = signature.parameters.copy()
+ editable_signature.pop("self", None)
+ editable_signature.pop("kwargs", None)
+ editable_signature.pop("use_cache", None)
+ for folder, param in editable_signature.items():
+ class_names.setdefault(folder, True)
return class_names
@@ -110,9 +99,9 @@ class ClassMapEntry:
def __post_init__(self):
if self.model:
- self.model_params = extract_init_params(self.model)
+ self.model_params = extract_init_parameters(self.model)
if self.config:
- self.config_params = extract_init_params(self.config)
+ self.config_params = extract_init_parameters(self.config)
@dataclass
diff --git a/mir/indexers.py b/mir/indexers.py
index 0c155ce..573d877 100644
--- a/mir/indexers.py
+++ b/mir/indexers.py
@@ -8,7 +8,7 @@
from typing import Any, Callable
from mir.config.console import nfo
-from mir.config.constants import ClassMapEntry, extract_init_params
+from mir.config.constants import ClassMapEntry, extract_init_parameters
from mir.config.conversion import get_repo_from_class_map, import_submodules
from mir.doc_parser import parse_docs, DocParseData
from mir.tag import mir_prefix_from_forward_pass, mir_tag_from_config, tag_model_from_repo
@@ -47,7 +47,7 @@ def create_pipe_entry(repo_path: str, class_name: str, model_class_obj: Callable
mir_prefix = "info"
if hasattr(diffusers, class_name):
model_class_obj = getattr(diffusers, class_name)
- sub_segments = extract_init_params(model_class_obj, "diffusers")
+ sub_segments = extract_init_parameters(model_class_obj, "diffusers")
decoder = "decoder" in sub_segments
if repo_path in ["kandinsky-community/kandinsky-3"]:
mir_prefix = "info.unet"
@@ -116,7 +116,7 @@ def diffusers_index() -> dict[str, dict[str, dict[str, Any]]]:
model_class_obj = import_submodules(parsed_data.pipe_class, f"diffusers.pipelines.{extracted.package_name}.{extracted.file_name}")
if not model_class_obj:
continue
- extract_init_params(model_class_obj)
+ extract_init_parameters(model_class_obj)
try:
series, comp_data = create_pipe_entry(parsed_data.pipe_repo, parsed_data.pipe_class)
except TypeError:
@@ -167,13 +167,14 @@ def transformers_index():
repo_path = get_repo_from_class_map(entry)
if config := missing_config_params.get(entry.name, {}):
entry.config_params = config.get("params", entry.config_params)
- if not repo_path:
+ if not repo_path or entry.name == "gpt_oss":
repo_path = config["repo_path"]
if not repo_path:
raise ValueError(f"Unable to determine repo from {entry}")
- if entry.config_params and list(entry.config_params) != ["use_cache", "kwargs"]:
+ if entry.config_params:
mir_series, mir_comp, mir_suffix = mir_tag_from_config(entry, repo_path)
# modalities = add_mode_types(mir_tag=[mir_series, mir_comp])
+
repo_path = check_migrations(repo_path)
tk_pkg = {}
tokenizer_classes = TOKENIZER_MAPPING_NAMES.get(entry.name)
diff --git a/mir/inspect/metadata.py b/mir/inspect/metadata.py
index 190d61b..613afae 100644
--- a/mir/inspect/metadata.py
+++ b/mir/inspect/metadata.py
@@ -4,7 +4,7 @@
from typing import Callable, Generator
import diffusers
-from mir.config.constants import ClassMapEntry, DocStringEntry, extract_init_params
+from mir.config.constants import ClassMapEntry, DocStringEntry, extract_init_parameters
from mir.config.conversion import retrieve_diffusers_docstrings
diff --git a/mir/inspect/pipes.py b/mir/inspect/pipes.py
index 8ef1d06..cdec5f7 100644
--- a/mir/inspect/pipes.py
+++ b/mir/inspect/pipes.py
@@ -9,13 +9,13 @@ def show_shared_hyperparameters(parameter_filter: Optional[str] = None) -> List[
:param from_match: Narrow the classes to only those with an exact key inside
:return: A list of all Classes"""
from mir.inspect.metadata import map_transformers_classes
- from mir.config.constants import extract_init_params
+ from mir.config.constants import extract_init_parameters
transformers_data = map_transformers_classes()
config_data = []
for entry in transformers_data:
if parameter_filter:
- segments = extract_init_params(module=entry.config, package_name="transformers")
+ segments = extract_init_parameters(module=entry.config, package_name="transformers")
if parameter_filter in list(segments):
config_data.append(entry.config)
else:
diff --git a/mir/mir.json b/mir/mir.json
index 4c9d44d..c897555 100644
--- a/mir/mir.json
+++ b/mir/mir.json
@@ -116,6 +116,29 @@
"0": {
"diffusers": "MarigoldDepthPipeline"
}
+ },
+ "pipe_names": {
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "scheduler": [
+ [
+ "ops.scheduler.ddim",
+ "scheduler"
+ ],
+ [
+ "ops.scheduler.lcm",
+ "scheduler"
+ ]
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "marigold-depth-v1-1"
+ ]
}
}
},
@@ -126,6 +149,29 @@
"0": {
"diffusers": "MarigoldIntrinsicsPipeline"
}
+ },
+ "pipe_names": {
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "scheduler": [
+ [
+ "ops.scheduler.ddim",
+ "scheduler"
+ ],
+ [
+ "ops.scheduler.lcm",
+ "scheduler"
+ ]
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "marigold-iid-appearance-v1-1"
+ ]
}
}
},
@@ -136,6 +182,29 @@
"0": {
"diffusers": "MarigoldNormalsPipeline"
}
+ },
+ "pipe_names": {
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "scheduler": [
+ [
+ "ops.scheduler.ddim",
+ "scheduler"
+ ],
+ [
+ "ops.scheduler.lcm",
+ "scheduler"
+ ]
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "marigold-normals-v1-1"
+ ]
}
}
},
@@ -146,6 +215,94 @@
"0": {
"diffusers": "StableDiffusionPipeline"
}
+ },
+ "identifiers": [
+ "up_blocks.3.attentions.0.transformer_blocks.0.norm3.weight"
+ ],
+ "file_256": [
+ "6ce0161689b3853acaa03779ec93eafe75a02f4ced659bee03f50797806fa2fa",
+ "1a189f0be69d6106a48548e7626207dddd7042a418dbf372cefd05e0cdba61b6",
+ "e1441589a6f3c5a53f5f54d0975a18a7feb7cdf0b0dee276dfc3331ae376a053",
+ "cc6cb27103417325ff94f52b7a5d2dde45a7515b25c255d8e396c90014281516",
+ "19da7aaa4b880e59d56843f1fcb4dd9b599c28a1d9d9af7c1143057c8ffae9f1",
+ "cd1b6db09a81cb1d39fbd245a89c1e3db9da9fe8eba5e8f9098ea6c4994221d3",
+ "c83908253f9a64d08c25fc90874c9c8aef9a329ce1ca5fb909d73b0c83d1ea21"
+ ],
+ "layer_b3": [
+ "909c6ff3192ab2767e789a6125865bc23163db467ab78b1c633bad46a4293fad",
+ "b52807536902cabbf84f99e4fa2f8713fb4ef77e739f06367ee0d486e3222faa",
+ "d31382d71a1044b636d80d861a2b4dbca51826bed34d34b5c14608b7679ccefd",
+ "5fd8b28013b7e5a64c7c235f0a93d93e48bc19a0e5dde7b646a87b429219643a",
+ "731f552f29edcb4f86112cc94d296377f3533a9633ccf83e202d9e1785d94a00",
+ "2d2f97574a161cf01a6f6d476b141c7be06f940d94b695ffc12c4e74eca2de1c"
+ ],
+ "layer_256": [
+ "ece771354ad470a82d56eda413ae3dd6c00d2de28ab3c56a88201d08d4424b4b",
+ "65b084dada803461ab9ca9be9b892d211870a121dd6c555a111eea470b951c54",
+ "dc937b59892604f5a86ac96936cd7ff09e25f18ae6b758e8014a24c7fa039e91",
+ "92565dec90f7c8412dc872e820f66cd0c56263bbbc392439645b6fee270f41bb"
+ ],
+ "tasks": [
+ "StableDiffusion3ControlNetInpaintingPipeline",
+ "StableDiffusion3ControlNetPipeline",
+ "StableDiffusion3Img2ImgPipeline",
+ "StableDiffusion3InpaintPipeline",
+ "StableDiffusion3PAGImg2ImgPipeline",
+ "StableDiffusion3PAGPipeline",
+ "StableDiffusion3Pipeline",
+ "StableDiffusionControlNetImg2ImgPipeline",
+ "StableDiffusionControlNetInpaintPipeline",
+ "StableDiffusionControlNetPAGInpaintPipeline",
+ "StableDiffusionControlNetPAGPipeline",
+ "StableDiffusionControlNetPipeline",
+ "StableDiffusionImg2ImgPipeline",
+ "StableDiffusionInpaintPipeline",
+ "StableDiffusionPAGImg2ImgPipeline",
+ "StableDiffusionPAGInpaintPipeline",
+ "StableDiffusionPAGPipeline",
+ "StableDiffusionPipeline",
+ "StableDiffusionXLControlNetImg2ImgPipeline",
+ "StableDiffusionXLControlNetInpaintPipeline",
+ "StableDiffusionXLControlNetPAGImg2ImgPipeline",
+ "StableDiffusionXLControlNetPAGPipeline",
+ "StableDiffusionXLControlNetPipeline",
+ "StableDiffusionXLControlNetUnionImg2ImgPipeline",
+ "StableDiffusionXLControlNetUnionInpaintPipeline",
+ "StableDiffusionXLControlNetUnionPipeline",
+ "StableDiffusionXLImg2ImgPipeline",
+ "StableDiffusionXLInpaintPipeline",
+ "StableDiffusionXLPAGImg2ImgPipeline",
+ "StableDiffusionXLPAGInpaintPipeline",
+ "StableDiffusionXLPAGPipeline",
+ "StableDiffusionXLPipeline"
+ ],
+ "pipe_names": {
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "stable-diffusion-v1-5"
+ ],
+ "scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ],
+ "safety_checker": [
+ "StableDiffusionSafetyChecker"
+ ],
+ "feature_extractor": [
+ "info.dit.flux1-schnell",
+ "*"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ]
}
}
},
@@ -156,6 +313,80 @@
"0": {
"diffusers": "StableUnCLIPPipeline"
}
+ },
+ "tasks": [
+ "StableDiffusion3ControlNetInpaintingPipeline",
+ "StableDiffusion3ControlNetPipeline",
+ "StableDiffusion3Img2ImgPipeline",
+ "StableDiffusion3InpaintPipeline",
+ "StableDiffusion3PAGImg2ImgPipeline",
+ "StableDiffusion3PAGPipeline",
+ "StableDiffusion3Pipeline",
+ "StableDiffusionControlNetImg2ImgPipeline",
+ "StableDiffusionControlNetInpaintPipeline",
+ "StableDiffusionControlNetPAGInpaintPipeline",
+ "StableDiffusionControlNetPAGPipeline",
+ "StableDiffusionControlNetPipeline",
+ "StableDiffusionImg2ImgPipeline",
+ "StableDiffusionInpaintPipeline",
+ "StableDiffusionPAGImg2ImgPipeline",
+ "StableDiffusionPAGInpaintPipeline",
+ "StableDiffusionPAGPipeline",
+ "StableDiffusionPipeline",
+ "StableDiffusionXLControlNetImg2ImgPipeline",
+ "StableDiffusionXLControlNetInpaintPipeline",
+ "StableDiffusionXLControlNetPAGImg2ImgPipeline",
+ "StableDiffusionXLControlNetPAGPipeline",
+ "StableDiffusionXLControlNetPipeline",
+ "StableDiffusionXLControlNetUnionImg2ImgPipeline",
+ "StableDiffusionXLControlNetUnionInpaintPipeline",
+ "StableDiffusionXLControlNetUnionPipeline",
+ "StableDiffusionXLImg2ImgPipeline",
+ "StableDiffusionXLInpaintPipeline",
+ "StableDiffusionXLPAGImg2ImgPipeline",
+ "StableDiffusionXLPAGInpaintPipeline",
+ "StableDiffusionXLPAGPipeline",
+ "StableDiffusionXLPipeline"
+ ],
+ "pipe_names": {
+ "prior_tokenizer": [
+ "info.encoder.tokenizer",
+ "stable-unclip-2-1-l"
+ ],
+ "prior_text_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "prior": [
+ "PriorTransformer"
+ ],
+ "prior_scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ],
+ "image_normalizer": [
+ "info.dit.flux1-schnell",
+ "*"
+ ],
+ "image_noising_scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "stable-unclip-2-1-l"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ]
}
}
},
@@ -166,6 +397,73 @@
"0": {
"diffusers": "StableUnCLIPImg2ImgPipeline"
}
+ },
+ "tasks": [
+ "StableDiffusion3ControlNetInpaintingPipeline",
+ "StableDiffusion3ControlNetPipeline",
+ "StableDiffusion3Img2ImgPipeline",
+ "StableDiffusion3InpaintPipeline",
+ "StableDiffusion3PAGImg2ImgPipeline",
+ "StableDiffusion3PAGPipeline",
+ "StableDiffusion3Pipeline",
+ "StableDiffusionControlNetImg2ImgPipeline",
+ "StableDiffusionControlNetInpaintPipeline",
+ "StableDiffusionControlNetPAGInpaintPipeline",
+ "StableDiffusionControlNetPAGPipeline",
+ "StableDiffusionControlNetPipeline",
+ "StableDiffusionImg2ImgPipeline",
+ "StableDiffusionInpaintPipeline",
+ "StableDiffusionPAGImg2ImgPipeline",
+ "StableDiffusionPAGInpaintPipeline",
+ "StableDiffusionPAGPipeline",
+ "StableDiffusionPipeline",
+ "StableDiffusionXLControlNetImg2ImgPipeline",
+ "StableDiffusionXLControlNetInpaintPipeline",
+ "StableDiffusionXLControlNetPAGImg2ImgPipeline",
+ "StableDiffusionXLControlNetPAGPipeline",
+ "StableDiffusionXLControlNetPipeline",
+ "StableDiffusionXLControlNetUnionImg2ImgPipeline",
+ "StableDiffusionXLControlNetUnionInpaintPipeline",
+ "StableDiffusionXLControlNetUnionPipeline",
+ "StableDiffusionXLImg2ImgPipeline",
+ "StableDiffusionXLInpaintPipeline",
+ "StableDiffusionXLPAGImg2ImgPipeline",
+ "StableDiffusionXLPAGInpaintPipeline",
+ "StableDiffusionXLPAGPipeline",
+ "StableDiffusionXLPipeline"
+ ],
+ "pipe_names": {
+ "feature_extractor": [
+ "info.dit.flux1-schnell",
+ "*"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "image_normalizer": [
+ "info.dit.flux1-schnell",
+ "*"
+ ],
+ "image_noising_scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "stable-diffusion-2-1-unclip"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ]
}
}
},
@@ -174,9 +472,42 @@
"repo": "stabilityai/stable-diffusion-xl-base-1.0",
"pkg": {
"0": {
- "diffusers": "StableDiffusionXLPipeline"
+ "precision": "ops.precision.float.F16",
+ "generation": {
+ "denoising_end": 0.8,
+ "num_inference_steps": 40,
+ "output_type": "latent",
+ "safety_checker": false,
+ "width": 1024,
+ "height": 1024
+ }
+ },
+ "1": {
+ "diffusers": "DiffusionPipeline"
}
- }
+ },
+ "file_256": [
+ "357650fbfb3c7b4d94c1f5fd7664da819ad1ff5a839430484b4ec422d03f710a",
+ "83e012a805b84c7ca28e5646747c90a243c65c8ba4f070e2d7ddc9d74661e139",
+ "31e35c80fc4829d14f90153f4c74cd59c90b779f6afe05a74cd6120b893f7e5b",
+ "6f001c090fb13c0d0f8b0a5916da814712a94400b99471fabe77c1c4a51ecaaf"
+ ],
+ "layer_256": [
+ "62a5ab1b5fdfa4fedb32323841298c6effe1af25be94a8583350b0a7641503ef",
+ "34dff8d98898baa0f10e71943e56b588cc114253b0d2f1051f3ce7a8a45fee0b",
+ "56b1ccd89b0d6ab658048aa34d659788b6ed663f13ef566f4b11bccef590b9da"
+ ],
+ "layer_b3": [
+ "8be44fa13c1efa60f8bcadaa57f1d718473f9660f03c4f0e65dc037960d8cba1",
+ "c9ab95ed1851418b65ef99651c1eb6bbdd2e3b0715e0e435d6d1e56ce310fac3",
+ "adfa260098d87616d748e3cf9c10bb2c90ff8890a84abbb2853d4aa69664070b"
+ ],
+ "identifiers": [
+ "logit_scale",
+ "conditioner.embedders.0.transformer.text_model.encoder.layers.0.self_attn.k_proj.weight",
+ "add_embedding.linear_2.bias"
+ ],
+ "pipe_names": {}
},
"pony-diffusion": {
"file_256": [
@@ -282,7 +613,8 @@
"703f775c6e48ed5b0eba6e847414f047bcd4adc677dbc1bf221b3ef05b2ac471",
"72d4ebe4af61f8a7add8fe36b8acd16602894279fb5a744ad50b5b5bac7067b8",
"acb757b851db12cdf9d4365a45ee0d6e64afa77ac95583bb82711baf7c4125fd"
- ]
+ ],
+ "pipe_names": {}
},
"segmind-vega": {
"repo": "segmind/Segmind-Vega",
@@ -333,7 +665,81 @@
"pkg": {
"0": {
"diffusers": "StableDiffusionXLImg2ImgPipeline"
+ },
+ "1": {
+ "diffusers": "DiffusionPipeline",
+ "generation": {
+ "num_inference_steps": 40,
+ "denoising_end": 0.8
+ }
}
+ },
+ "identifiers": [
+ "conditioner.embedders.0.model.transformer.resblocks.9.mlp.c_proj.bias"
+ ],
+ "file_256": [
+ "54f9cd2f2daf3aeec0b2708fa3dbc0e84e4f8ddd1ddead42e5bc60c6572c989f",
+ "7440042bbdc8a24813002c09b6b69b64dc90fded4472613437b7f55f9b7d9c5f",
+ "3ea0376dcf065eaefd27806394a90e310001b1a71d4f1cf1f655e86c0e566ffe"
+ ],
+ "layer_b3": [
+ "6281355dbb37e5769c9460ae0ac75506d89932e2f97b09d9ade32ecf191e75ba",
+ "afb0639aae2eb65577c12d4a30cf7c9b3620ae63ba64a8fa632b58608c8a7a2e",
+ "669046014b69d98ab0f6fbb59547644436e0275f8b638f467ce2a873c3313683"
+ ],
+ "layer_256": [
+ "bb9eadbfabb52c0d8645783525a3fa70b59e9d7d09d5290d742a303262e793a2",
+ "c5adb56fe51343af2c3d493eb9f41515c204bd91eb9f40b983d45f70a1fa3b6d",
+ "1f838e39ed6e916258aee6990b72c09b34aa8eb3b5342234a497b8852b3df1c6"
+ ],
+ "tasks": [
+ "StableDiffusionXLControlNetImg2ImgPipeline",
+ "StableDiffusionXLControlNetInpaintPipeline",
+ "StableDiffusionXLControlNetPAGImg2ImgPipeline",
+ "StableDiffusionXLControlNetPAGPipeline",
+ "StableDiffusionXLControlNetPipeline",
+ "StableDiffusionXLControlNetUnionImg2ImgPipeline",
+ "StableDiffusionXLControlNetUnionInpaintPipeline",
+ "StableDiffusionXLControlNetUnionPipeline",
+ "StableDiffusionXLImg2ImgPipeline",
+ "StableDiffusionXLInpaintPipeline",
+ "StableDiffusionXLPAGImg2ImgPipeline",
+ "StableDiffusionXLPAGInpaintPipeline",
+ "StableDiffusionXLPAGPipeline",
+ "StableDiffusionXLPipeline"
+ ],
+ "pipe_names": {
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "text_encoder_2": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "stable-diffusion-xl-refiner-1"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "stable-diffusion-xl-refiner-1"
+ ],
+ "scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "feature_extractor": [
+ "info.dit.flux1-schnell",
+ "*"
+ ]
}
}
},
@@ -344,6 +750,47 @@
"0": {
"diffusers": "StableDiffusionXLInstructPix2PixPipeline"
}
+ },
+ "tasks": [
+ "StableDiffusionXLControlNetImg2ImgPipeline",
+ "StableDiffusionXLControlNetInpaintPipeline",
+ "StableDiffusionXLControlNetPAGImg2ImgPipeline",
+ "StableDiffusionXLControlNetPAGPipeline",
+ "StableDiffusionXLControlNetPipeline",
+ "StableDiffusionXLControlNetUnionImg2ImgPipeline",
+ "StableDiffusionXLControlNetUnionInpaintPipeline",
+ "StableDiffusionXLControlNetUnionPipeline",
+ "StableDiffusionXLImg2ImgPipeline",
+ "StableDiffusionXLInpaintPipeline",
+ "StableDiffusionXLPAGImg2ImgPipeline",
+ "StableDiffusionXLPAGInpaintPipeline",
+ "StableDiffusionXLPAGPipeline",
+ "StableDiffusionXLPipeline"
+ ],
+ "pipe_names": {
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "text_encoder_2": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "sdxl-pix2pix-768"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "sdxl-pix2pix-768"
+ ],
+ "scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ]
}
}
},
@@ -352,9 +799,23 @@
"repo": "rhymes-ai/Allegro",
"pkg": {
"0": {
- "diffusers": "AllegroPipeline"
+ "precision": "ops.precision.bfloat.B16",
+ "generation": {
+ "guidance_scale": 7.5,
+ "max_sequence_length": 512,
+ "num_inference_steps": 100
+ }
}
- }
+ },
+ "file_256": [
+ "6927dcc812841c1da549bf11c97ddf30532aee0e708a6642fa64cf8e0dfcdef7"
+ ],
+ "layer_b3": [
+ "8b20714a6af89ea4bf4ada1f805c5b9d529ef136c229e9b75392242d62d80c3e"
+ ],
+ "layer_256": [
+ "9e44e6c919dc71c24a193641e6265cd9983a2a773b9bbaf527c10ac4837b29fd"
+ ]
}
},
"info.dit.amused-512": {
@@ -364,6 +825,26 @@
"0": {
"diffusers": "AmusedInpaintPipeline"
}
+ },
+ "pipe_names": {
+ "vqvae": [
+ "VQModel"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "amused-512"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "transformer": [
+ "UVit2DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.amused",
+ "scheduler"
+ ]
}
}
},
@@ -424,6 +905,40 @@
"0": {
"diffusers": "BriaPipeline"
}
+ },
+ "pipe_names": {
+ "transformer": [
+ "BriaTransformer2DModel"
+ ],
+ "scheduler": [
+ [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ]
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.stst.t5",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "bria-3"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "feature_extractor": [
+ "info.dit.flux1-schnell",
+ "*"
+ ]
}
}
},
@@ -434,6 +949,27 @@
"0": {
"diffusers": "Flux2Pipeline"
}
+ },
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "text_encoder": [
+ "info.vit.mistral-3-2503",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "flux2-dev"
+ ],
+ "transformer": [
+ "Flux2Transformer2DModel"
+ ]
}
}
},
@@ -442,9 +978,54 @@
"repo": "black-forest-labs/FLUX.1-schnell",
"pkg": {
"0": {
- "diffusers": "FluxInpaintPipeline"
+ "precision": "ops.precision.bfloat.B16",
+ "generation": {
+ "height": 1024,
+ "width": 1024,
+ "guidance_scale": 0.0,
+ "num_inference_steps": 4,
+ "max_sequence_length": 256
+ }
+ },
+ "1": {
+ "mflux": "flux.flux.Flux1",
+ "generation": {
+ "height": 1024,
+ "width": 1024,
+ "num_inference_steps": 4
+ }
}
- }
+ },
+ "identifiers": [
+ "double_blocks.12.txt_mod.lin.weight",
+ "add_q_proj.weight",
+ "single_transformer_blocks.9.norm.linear.weight"
+ ],
+ "file_256": [
+ "9403429e0052277ac2a87ad800adece5481eecefd9ed334e1f348723621d2a0a",
+ "9b633dbe87316385c5b1c262bd4b5a01e3d955170661d63dcec8a01e89c0d820"
+ ],
+ "layer_b3": [
+ "c65ba812ce3ce056eb1585673f62fb896afe6ec049faaf00a97bc35c9a398c44",
+ "03049273329fc7db2da10de6d3eb27cb03f190e379c0556cc97b3f0f29001d0c",
+ "483c4be8ef031c56bc8450d1a3cfbe54445ed317bcd801be5abe89f1d3c48790"
+ ],
+ "layer_256": [
+ "79c07e339865fe9e22c80f723d728c778130acd07a330339c68218b92bb7b3b8",
+ "ef5c9cd1ebe6e3be5e8b1347eca0a6f0b138986c71220a7f1c2c14f29d01beed",
+ "27bc71eca2d2ff7459165acc12010230911db7709a4f6a5c255befedfa6b1649"
+ ],
+ "tasks": [
+ "Image",
+ "Redux",
+ "Kontext",
+ "Depth",
+ "Fill",
+ "ConceptAttention",
+ "ControlNet",
+ "CavTon",
+ "IC-Edit"
+ ]
},
"shuttle-3-aesthetic": {
"repo": "shuttleai/shuttle-3.1-aesthetic",
@@ -566,6 +1147,46 @@
"0": {
"diffusers": "FluxFillPipeline"
}
+ },
+ "tasks": [
+ "FluxControlImg2ImgPipeline",
+ "FluxControlInpaintPipeline",
+ "FluxControlNetImg2ImgPipeline",
+ "FluxControlNetInpaintPipeline",
+ "FluxControlNetPipeline",
+ "FluxControlPipeline",
+ "FluxImg2ImgPipeline",
+ "FluxInpaintPipeline",
+ "FluxKontextPipeline",
+ "FluxPipeline"
+ ],
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "flux1-fill-dev"
+ ],
+ "text_encoder_2": [
+ "info.stst.t5",
+ "*"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "flux1-fill-dev"
+ ],
+ "transformer": [
+ "FluxTransformer2DModel"
+ ]
}
}
},
@@ -576,6 +1197,54 @@
"0": {
"diffusers": "FluxKontextInpaintPipeline"
}
+ },
+ "tasks": [
+ "FluxControlImg2ImgPipeline",
+ "FluxControlInpaintPipeline",
+ "FluxControlNetImg2ImgPipeline",
+ "FluxControlNetInpaintPipeline",
+ "FluxControlNetPipeline",
+ "FluxControlPipeline",
+ "FluxImg2ImgPipeline",
+ "FluxInpaintPipeline",
+ "FluxKontextPipeline",
+ "FluxPipeline"
+ ],
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "flux1-kontext-dev"
+ ],
+ "text_encoder_2": [
+ "info.stst.t5",
+ "*"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "flux1-kontext-dev"
+ ],
+ "transformer": [
+ "FluxTransformer2DModel"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "feature_extractor": [
+ "info.dit.flux1-schnell",
+ "*"
+ ]
}
}
},
@@ -586,6 +1255,31 @@
"0": {
"diffusers": "PRXPipeline"
}
+ },
+ "pipe_names": {
+ "transformer": [
+ "PRXTransformer2DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "text_encoder": [
+ "info.stst.t5gemma-prefixlm",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "prx-512-t2i-sft"
+ ],
+ "vae": [
+ "AutoencoderKL",
+ [
+ "info.vae.dc",
+ "sana-1024px-bf16"
+ ],
+ "NoneType"
+ ]
}
}
},
@@ -596,6 +1290,36 @@
"0": {
"diffusers": "AudioLDMPipeline"
}
+ },
+ "file_256": [
+ "fc30d5b5a3bb8d08672736efb1fff10755ba7024dace39b2dcb579a105aa2a5a"
+ ],
+ "layer_b3": [
+ "82fbcc553c1ad770d28fd1866b935249c5ebfbf75f3166ae823e1bc6ef39a95a"
+ ],
+ "layer_256": [
+ "d076446a58a36bf436e37444679d62bcf2f45689d4aa3d799b3fe801c71ed2c8"
+ ],
+ "pipe_names": {
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.vit.clap-htsat-fused",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "audioldm-s-v2"
+ ],
+ "scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ],
+ "vocoder": [
+ "info.stst.speecht5-asr",
+ "*"
+ ]
}
}
},
@@ -604,9 +1328,22 @@
"repo": "cvssp/audioldm2",
"pkg": {
"0": {
- "diffusers": "AudioLDM2Pipeline"
+ "precision": "ops.precision.float.F16",
+ "generation": {
+ "num_inference_steps": 200,
+ "audio_length_in_s": 10.0
+ }
}
- }
+ },
+ "file_256": [
+ "359a5ffb89a844beb2fcfac584aae2cd7cd6e87c3ab1ec4e892ef45d91db77c2"
+ ],
+ "layer_b3": [
+ "eac241273f9f30982fc04aa88b4dc1c38b533430956a55b9ed4d3e5c717ec962"
+ ],
+ "layer_256": [
+ "ab109d01b43788063802f00c6ecab024c830ea58d668f5c2df9e3ae5b87d86cb"
+ ]
}
},
"info.unet.blipdiffusion": {
@@ -616,6 +1353,31 @@
"0": {
"diffusers": "BlipDiffusionPipeline"
}
+ },
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "blipdiffusion"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "scheduler": [
+ "ops.scheduler.pndm",
+ "scheduler"
+ ],
+ "qformer": [
+ "info.vit.blip2-opt",
+ "*"
+ ],
+ "image_processor": [
+ "info.dit.flux1-schnell",
+ "*"
+ ]
}
}
},
@@ -625,7 +1387,60 @@
"pkg": {
"0": {
"diffusers": "ChromaPipeline"
+ },
+ "1": {
+ "generation": {
+ "neg_text": "",
+ "num_steps": "28",
+ "latent_size": [
+ 64,
+ 64
+ ]
+ }
}
+ },
+ "file_256": [
+ "53adcb3b6b6005758d40e2d8058b044ed4892bc8616efb7a62cc2dd384be07de",
+ "2c41e8a9831f3be1eaff2c2ed590abb62e4534e814f7ec58a5fd74ff71dc2036",
+ "0a7b2d9699dbd22b3744ee2692900cabcfb731a43dac13729c33807f2bb7c9f6",
+ "6ddc9e2bbe3376ab5ee9f10b2d947f127b6bf6f879f06f316a2208bb0da357b8"
+ ],
+ "layer_b3": [
+ "15e227ced8a89c41abaa9cc44f84dfffdf5ead0c626035e5a2dde2bbb0935479"
+ ],
+ "layer_256": [
+ "a4daa6ff6f45ca70c738adb8c19bc3b6f228df931e6bf2a3394463e4dd7ec882"
+ ],
+ "tasks": [
+ "ChromaPipeline"
+ ],
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.stst.t5",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "chroma"
+ ],
+ "transformer": [
+ "ChromaTransformer2DModel"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "feature_extractor": [
+ "info.dit.flux1-schnell",
+ "*"
+ ]
}
},
"chroma1-hd": {
@@ -659,7 +1474,38 @@
"2c0c7d908d04418a48b453c293237a9826d54472cf0ba76e28697d1309d1021b",
"c88f6794753ba23e8f6bf8c84cf220daa35a6aa16d54ea0c3e0136f52e5da7e1",
"c759d67ca3ef50a9a1c242e3291c57f406646f226a95f43f66577996494986db"
- ]
+ ],
+ "tasks": [
+ "ChromaPipeline"
+ ],
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.stst.t5",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "chroma"
+ ],
+ "transformer": [
+ "ChromaTransformer2DModel"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "feature_extractor": [
+ "info.dit.flux1-schnell",
+ "*"
+ ]
+ }
}
},
"info.dit.chroma1-hd": {
@@ -669,6 +1515,37 @@
"0": {
"diffusers": "ChromaImg2ImgPipeline"
}
+ },
+ "tasks": [
+ "ChromaPipeline"
+ ],
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.stst.t5",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "chroma1-hd"
+ ],
+ "transformer": [
+ "ChromaTransformer2DModel"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "feature_extractor": [
+ "info.dit.flux1-schnell",
+ "*"
+ ]
}
}
},
@@ -677,9 +1554,24 @@
"repo": "zai-org/CogVideoX-2b",
"pkg": {
"0": {
- "diffusers": "CogVideoXPipeline"
+ "precision": "ops.precision.float.F16",
+ "generation": {
+ "num_videos_per_prompt": 1,
+ "num_inference_steps": 50,
+ "num_frames": 49,
+ "guidance_scale": 6
+ }
}
- }
+ },
+ "file_256": [
+ "8fbb6a5e67c70885a8ed8e33df144ac61253e45977be5035fa18cfdf77d386c7"
+ ],
+ "layer_b3": [
+ "1db3439649b5362448455fb2ed6ebde0c3b973655a206832731149757ad165bb"
+ ],
+ "layer_256": [
+ "edd6bd51f1236f528ff8d32dc754f0b86cfac901b800642ea497358156dc00bd"
+ ]
}
},
"info.controlnet.cogvideox-fun-v-pose": {
@@ -699,6 +1591,33 @@
"0": {
"diffusers": "CogVideoXImageToVideoPipeline"
}
+ },
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "cogvideox-i2v"
+ ],
+ "text_encoder": [
+ "info.stst.t5",
+ "*"
+ ],
+ "vae": [
+ "info.vae.cogvideox",
+ "cogvideox-i2v"
+ ],
+ "transformer": [
+ "CogVideoXTransformer3DModel"
+ ],
+ "scheduler": [
+ [
+ "ops.scheduler.cogvideoxddim",
+ "scheduler"
+ ],
+ [
+ "ops.scheduler.cogvideoxdpm",
+ "scheduler"
+ ]
+ ]
}
}
},
@@ -707,7 +1626,14 @@
"repo": "zai-org/CogView3-Plus-3B",
"pkg": {
"0": {
- "diffusers": "CogView3PlusPipeline"
+ "precision": "ops.precision.float.F16",
+ "generation": {
+ "guidance_scale": 7.0,
+ "num_images_per_prompt": 1,
+ "num_inference_steps": 50,
+ "width": 1024,
+ "height": 1024
+ }
}
}
}
@@ -719,6 +1645,30 @@
"0": {
"diffusers": "CogView4Pipeline"
}
+ },
+ "tasks": [
+ "CogView4ControlPipeline",
+ "CogView4Pipeline"
+ ],
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "cogview4"
+ ],
+ "text_encoder": [
+ "info.stst.glm-4-chat",
+ "*"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "transformer": [
+ "CogView4Transformer2DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ]
}
}
},
@@ -739,6 +1689,30 @@
"0": {
"diffusers": "Cosmos2_5_PredictBasePipeline"
}
+ },
+ "pipe_names": {
+ "text_encoder": [
+ "info.vit.qwen2-vl",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "pre-trianed"
+ ],
+ "transformer": [
+ "CosmosTransformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "scheduler": [
+ "ops.scheduler.unipc",
+ "multistep"
+ ],
+ "safety_checker": [
+ "CosmosSafetyChecker"
+ ]
}
}
},
@@ -749,6 +1723,75 @@
"0": {
"diffusers": "Cosmos2TextToImagePipeline"
}
+ },
+ "file_256": [
+ "7fbd20dae97cc26a55c7aff3024bc84e554cff8f69966c725a24c8238c5431ec",
+ "6d211f1c14cd793156da3a840dd5462ae072046fcd6f1dc64c613a5343bfe896",
+ "95a2b32ad31a271eb64d35985c7ea46f1448528af70932eb1f35d57f90c27be2",
+ "344e67faf333b7849fa94290c9028bdd5e40eb19700754c833cda0423bc10ad0",
+ "ce15ef565cbb9ef414a6f7a396c455d82d5f762d2174493da87fe009c5fee75b",
+ "94aa9f2b59330b88e97b6b439e2f206a51c86e6b154fb66d43ed149bfac23cf8",
+ "636de5388da249130d51752991a1792b90af31cbf43f021ae07f75756ee2d79a",
+ "472c5e4cf5056a1a59085addb5a86d801de39bf5e000d253f206a7f63c710029",
+ "663266ace67c22529c3b6bfa0e8bd69f0ba6e683f5f02b8e3da50881057ba142",
+ "21a674b314c1364d0dbb3712f5ed702996a7b7403c452835cac22709e01c2f77",
+ "3bf2df806c6472e039efc9e8d3181163d7faa7b385e61519b7d17d5e9c993a49",
+ "1de35e1603c4c30bc80b132ccea15fc0503369caf68290708f17e679e98cd41f",
+ "0738e559bbd71f7351ccba34b2b47362a3f829b92f3dbcffeaf1e44b0d52f42c"
+ ],
+ "layer_b3": [
+ "5a18ba14c41c6601dcc1195ca180ac7744357eb15ace39272788bda1a7151e9b",
+ "67cc3eaf7987c89cd7ccff13de6bc03e3eec59d260d44486e2367cd946ce6f20",
+ "3c6fefa107742488d2e6856714198a762f2fd35c67edd50d4657eaf4b59c7ca3",
+ "4e1f90ee1e8959d334c9b1ea2cc5e58d0b8340e271c35f81c8a5ec26e16d9d76",
+ "f8171071e828524fcc2806126ad100a2198e450c82c0864c8fe8b358c5cbbfbd",
+ "8126101a0207ecfbd741394fd59f306bcb4c492b2a921e0921c426ca7bd38985",
+ "c942c5a85ff7cb602d8ca894f5d180c2224e91f0b62c3a21f6a425f9e0e8554b",
+ "c8c500de74da879a547875fe1046f62ab18bdfd09c09eb3da723cbc2319cb4e3",
+ "c0ac3f67501004e9e9a55d1658402ad97e42bf8a266edf81f6f3bb835ee476b9",
+ "84f5926eb4e11d826815682b076ed7d3bba4c86520859be80aa1ef92c72b26a4",
+ "1d4375aab5548708559b0fde150754a2163cd211eb20a5471e17afaeeb26e082",
+ "68bd8982f59c60d69c301d16dfb5a60f5d43d66c0b60138d48a22f5ded598e7b",
+ "c3e9a10cad7aebf979072092008be6e2815d03d28cbf316c15e8daf22116bd7d"
+ ],
+ "layer_256": [
+ "38f2a75eab667c0cc85f3946a23ca6dc2278438c25a9f93aaaa9f79c3808e180",
+ "ee8434a5e9bc6fa07199de2d0c69fb87f7922c31792bafd13f527c9d92fecb0c",
+ "2f8382657babb4d0ae4f8e425ae33b21ad71deb6ba457fd6734f05208d52e06a",
+ "34b181a8291b571857cdbf67ac0081fea594a2f223bf20bd2fc8b0c889e9602d",
+ "d198c412b972e381acfb812304fa98ed0d97a2f072ddc195cd9a1eb83b1d8146",
+ "79580a13aff9859e67b0a9f4f8893236cdcfa58c3d43770641aaac8daee55a94",
+ "cfd48c7ad71c913fa8768167ed0c2ee8c207311b22b1e5a8761369b5a780e8d6",
+ "da91362ad85d4d2e80a2cb7a55e4ae0e52c9eef8b437a95894ce5ab75d36568c",
+ "15f84001f5205b6dd8c6f1334cb51c46f6171c7795fb2a557ea16b874f0c71e5",
+ "5d29179ad15a15d2561defcdda66f1d1e4d065c1e0738f9cba4db5b68b93d2ea",
+ "7ec489d1e461f5fb2af627b68034ca57f19c516aeccbc5d188b3bd27e3353a15",
+ "c8dc42fe7b411d746ebdf86286b91cd6893c5f028076b8fe4103f7ea8e1d8833",
+ "86df7c095aee01588e961438f322b85ca0100a9e440b8a2b6c724e00f748d8b5"
+ ],
+ "pipe_names": {
+ "text_encoder": [
+ "info.stst.t5",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "cosmos-predict2-text2image"
+ ],
+ "transformer": [
+ "CosmosTransformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "safety_checker": [
+ "CosmosSafetyChecker"
+ ]
}
}
},
@@ -759,6 +1802,30 @@
"0": {
"diffusers": "Cosmos2VideoToWorldPipeline"
}
+ },
+ "pipe_names": {
+ "text_encoder": [
+ "info.stst.t5",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "cosmos-predict2-video2world"
+ ],
+ "transformer": [
+ "CosmosTransformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "safety_checker": [
+ "CosmosSafetyChecker"
+ ]
}
}
},
@@ -769,6 +1836,30 @@
"0": {
"diffusers": "CosmosTextToWorldPipeline"
}
+ },
+ "pipe_names": {
+ "text_encoder": [
+ "info.stst.t5",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "cosmos-1-diffusion-text2world"
+ ],
+ "transformer": [
+ "CosmosTransformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "cosmos-1-diffusion-video2world"
+ ],
+ "scheduler": [
+ "ops.scheduler.edmeuler",
+ "scheduler"
+ ],
+ "safety_checker": [
+ "CosmosSafetyChecker"
+ ]
}
}
},
@@ -779,6 +1870,30 @@
"0": {
"diffusers": "CosmosVideoToWorldPipeline"
}
+ },
+ "pipe_names": {
+ "text_encoder": [
+ "info.stst.t5",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "cosmos-1-diffusion-video2world"
+ ],
+ "transformer": [
+ "CosmosTransformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "cosmos-1-diffusion-video2world"
+ ],
+ "scheduler": [
+ "ops.scheduler.edmeuler",
+ "scheduler"
+ ],
+ "safety_checker": [
+ "CosmosSafetyChecker"
+ ]
}
}
},
@@ -789,6 +1904,24 @@
"0": {
"diffusers": "IFSuperResolutionPipeline"
}
+ },
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "if-ii-l-v1"
+ ],
+ "text_encoder": [
+ "info.stst.t5",
+ "*"
+ ],
+ "scheduler": [
+ "ops.scheduler.ddpm",
+ "scheduler"
+ ],
+ "image_noising_scheduler": [
+ "ops.scheduler.ddpm",
+ "scheduler"
+ ]
}
}
},
@@ -799,6 +1932,30 @@
"0": {
"diffusers": "EasyAnimatePipeline"
}
+ },
+ "pipe_names": {
+ "vae": [
+ "info.vae.kl",
+ "easyanimatev5-zh"
+ ],
+ "text_encoder": [
+ "Qwen2VLForConditionalGeneration",
+ [
+ "info.art.bert-uncased",
+ "*"
+ ]
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "easyanimatev5-zh"
+ ],
+ "transformer": [
+ "EasyAnimateTransformer3DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ]
}
}
},
@@ -819,6 +1976,30 @@
"0": {
"diffusers": "EasyAnimateInpaintPipeline"
}
+ },
+ "pipe_names": {
+ "vae": [
+ "info.vae.kl",
+ "easyanimatev5-zh"
+ ],
+ "text_encoder": [
+ "Qwen2VLForConditionalGeneration",
+ [
+ "info.art.bert-uncased",
+ "*"
+ ]
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "easyanimatev5-zh-inp"
+ ],
+ "transformer": [
+ "EasyAnimateTransformer3DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ]
}
}
},
@@ -829,6 +2010,60 @@
"0": {
"diffusers": "HiDreamImagePipeline"
}
+ },
+ "file_256": [
+ "3cb3f6d77a3fce19b90fa7f66da0cbe997b0785a38a788b559290d3062f6fd26"
+ ],
+ "layer_b3": [
+ "612eb9b2676a3e7b28b10aae045a97a95de2a399fe3801c8f6369589c3a832a6"
+ ],
+ "layer_256": [
+ "78fbfb7fddb9ccbdf91f22b0c3d304cbf0cc7305dbccb216982233849ec727df"
+ ],
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "hidream-i1"
+ ],
+ "text_encoder_2": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "hidream-i1"
+ ],
+ "text_encoder_3": [
+ "info.stst.t5",
+ "*"
+ ],
+ "tokenizer_3": [
+ "info.encoder.tokenizer",
+ "hidream-i1"
+ ],
+ "text_encoder_4": [
+ "info.stst.llama-2-hf",
+ "*"
+ ],
+ "tokenizer_4": [
+ "info.encoder.tokenizer",
+ "hidream-i1"
+ ],
+ "transformer": [
+ "info.dit.flux1-schnell",
+ "*"
+ ]
}
}
},
@@ -837,9 +2072,29 @@
"repo": "tencent-hunyuan/hunyuandiT-v1.2-diffusers",
"pkg": {
"0": {
- "diffusers": "HunyuanDiTPipeline"
+ "precision": "ops.precision.float.F16"
}
- }
+ },
+ "identifiers": [
+ "extra_embedder",
+ "model.blocks",
+ "skip_norm.weight"
+ ],
+ "file_256": [
+ "4fb84f84079cda457d171b3c6b15d1be95b5a3e5d9825703951a99ddf92d1787",
+ "e01db5e129e8ca1117e9cf473fc5a2b096949f03ab90048aeabbc328de7ec800",
+ "8af691cadb78047d55721259355d708e87ddbba1b7845df9377d9a5ae917b45d"
+ ],
+ "layer_b3": [
+ "aead6b61b17ebc77c4c186a4b82c193f11ec267b20d909726422ee9852e2e0b2",
+ "885a056b94f6f9844c0660be489844d63bb74cc13316f441d10968fff3dd3120",
+ "390d951cbdda6e2cffb690031b60f02921624651534c2effaaa7d68ab476c700"
+ ],
+ "layer_256": [
+ "d4842ce2b7f927203326b25ff4d6738ec9a8b95327f06791c387e4a351ed6ed0",
+ "5af943f96f5dc9fecb1e92fe2b1fa17c94dd6947690201f4a5ee1a4a2721a68e",
+ "4a1f2b8234fa4336e263842e042d42e8d64d8a4d3941d9c0c78366b50303950c"
+ ]
}
},
"info.dit.hunyuanvideo": {
@@ -849,6 +2104,44 @@
"0": {
"diffusers": "HunyuanVideoPipeline"
}
+ },
+ "file_256": [
+ "bdb957b35585ea74ae42ca92865a68fa1bf1ebc6c5b7e686a889e5c977dc24c7"
+ ],
+ "layer_b3": [
+ "d31c56b4c9444d4c2f1b10120fe964e0956f6b8c7e7c1e4cc5a1f37406fc49f5"
+ ],
+ "layer_256": [
+ "fe741fdfd163bcb1e0ed81d80f79ac3576dbf6e6740674efadfeff782a48bed4"
+ ],
+ "pipe_names": {
+ "text_encoder": [
+ "info.stst.llama-2-hf",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "hunyuanvideo"
+ ],
+ "transformer": [
+ "HunyuanVideoTransformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "hunyuanvideo-i2v"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "text_encoder_2": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "hunyuanvideo"
+ ]
}
}
},
@@ -859,6 +2152,39 @@
"0": {
"diffusers": "HunyuanVideoImageToVideoPipeline"
}
+ },
+ "pipe_names": {
+ "text_encoder": [
+ "info.vit.llava",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "hunyuanvideo-i2v"
+ ],
+ "transformer": [
+ "HunyuanVideoTransformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "hunyuanvideo-i2v"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "text_encoder_2": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "hunyuanvideo-i2v"
+ ],
+ "image_processor": [
+ "info.dit.flux1-schnell",
+ "*"
+ ]
}
}
},
@@ -869,6 +2195,38 @@
"0": {
"diffusers": "HunyuanVideo15Pipeline"
}
+ },
+ "pipe_names": {
+ "text_encoder": [
+ "info.vit.qwen2-vl",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "hunyuanvideo-1-480p-t2v"
+ ],
+ "transformer": [
+ "HunyuanVideo15Transformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "hunyuanvideo-i2v"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "text_encoder_2": [
+ "info.stst.t5",
+ "*"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "hunyuanvideo-1-480p-t2v"
+ ],
+ "guider": [
+ "ClassifierFreeGuidance"
+ ]
}
}
},
@@ -879,6 +2237,45 @@
"0": {
"diffusers": "HunyuanVideo15ImageToVideoPipeline"
}
+ },
+ "pipe_names": {
+ "text_encoder": [
+ "info.vit.qwen2-vl",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "hunyuanvideo-1-480p-i2v"
+ ],
+ "transformer": [
+ "HunyuanVideo15Transformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "hunyuanvideo-i2v"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "text_encoder_2": [
+ "info.stst.t5",
+ "*"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "hunyuanvideo-1-480p-i2v"
+ ],
+ "guider": [
+ "ClassifierFreeGuidance"
+ ],
+ "image_encoder": [
+ "SiglipVisionModel"
+ ],
+ "feature_extractor": [
+ "info.dit.flux1-schnell",
+ "*"
+ ]
}
}
},
@@ -889,6 +2286,36 @@
"0": {
"diffusers": "HunyuanImagePipeline"
}
+ },
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "text_encoder": [
+ "info.vit.qwen2-vl",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "hunyuanimage-2"
+ ],
+ "text_encoder_2": [
+ "info.stst.t5",
+ "*"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "hunyuanimage-2"
+ ],
+ "transformer": [
+ "info.dit.flux1-schnell",
+ "*"
+ ]
}
}
},
@@ -899,6 +2326,28 @@
"0": {
"diffusers": "HunyuanImageRefinerPipeline"
}
+ },
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "text_encoder": [
+ "info.vit.qwen2-vl",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "hunyuanimage-2-refiner"
+ ],
+ "transformer": [
+ "info.dit.flux1-schnell",
+ "*"
+ ]
}
}
},
@@ -909,6 +2358,47 @@
"0": {
"diffusers": "KandinskyPriorPipeline"
}
+ },
+ "tasks": [
+ "Kandinsky3Img2ImgPipeline",
+ "Kandinsky3Pipeline",
+ "KandinskyCombinedPipeline",
+ "KandinskyImg2ImgCombinedPipeline",
+ "KandinskyImg2ImgPipeline",
+ "KandinskyInpaintCombinedPipeline",
+ "KandinskyInpaintPipeline",
+ "KandinskyPipeline",
+ "KandinskyV22CombinedPipeline",
+ "KandinskyV22Img2ImgCombinedPipeline",
+ "KandinskyV22Img2ImgPipeline",
+ "KandinskyV22InpaintCombinedPipeline",
+ "KandinskyV22InpaintPipeline",
+ "KandinskyV22Pipeline"
+ ],
+ "pipe_names": {
+ "prior": [
+ "PriorTransformer"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "kandinsky-2-1"
+ ],
+ "scheduler": [
+ "ops.scheduler.unclip",
+ "scheduler"
+ ],
+ "image_processor": [
+ "info.dit.flux1-schnell",
+ "*"
+ ]
}
}
},
@@ -919,6 +2409,31 @@
"0": {
"diffusers": "KandinskyV22PriorPipeline"
}
+ },
+ "pipe_names": {
+ "prior": [
+ "PriorTransformer"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "kandinsky-2-2"
+ ],
+ "scheduler": [
+ "ops.scheduler.unclip",
+ "scheduler"
+ ],
+ "image_processor": [
+ "info.dit.flux1-schnell",
+ "*"
+ ]
}
}
},
@@ -929,6 +2444,26 @@
"0": {
"diffusers": "LattePipeline"
}
+ },
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "latte-1"
+ ],
+ "text_encoder": [
+ "info.stst.t5",
+ "*"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "transformer": [
+ "LatteTransformer3DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ]
}
}
},
@@ -939,6 +2474,27 @@
"0": {
"diffusers": "LTXImageToVideoPipeline"
}
+ },
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "ltx-video"
+ ],
+ "text_encoder": [
+ "info.stst.t5",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "ltx-video"
+ ],
+ "transformer": [
+ "LTXVideoTransformer3DModel"
+ ]
}
}
},
@@ -949,6 +2505,27 @@
"0": {
"diffusers": "LTXConditionPipeline"
}
+ },
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "ltx-video"
+ ],
+ "text_encoder": [
+ "info.stst.t5",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "ltx-video-09"
+ ],
+ "transformer": [
+ "LTXVideoTransformer3DModel"
+ ]
}
}
},
@@ -957,9 +2534,24 @@
"repo": "Alpha-VLLM/Lumina-Next-SFT-diffusers",
"pkg": {
"0": {
- "diffusers": "LuminaPipeline"
+ "precision": " ops.precision.bfloat.B16"
}
- }
+ },
+ "identifiers": [
+ "time_caption",
+ "feed_forward"
+ ],
+ "file_256": [
+ "371153b7c7b7a64899d4016970c7cc472039f9c9b21ebe073adf0b8525cdf1bd"
+ ],
+ "layer_b3": [
+ "fa134efd6e9672e7de2965e4895fc58879bd0a6c4fdf9165c278f2748254675f",
+ "4d960ec35c53f72f065b94b836bcd923ea6074d38ad49881061f315d62e3c839"
+ ],
+ "layer_256": [
+ "3938a85568d9df186923edf04391d79e89e6199123bc175afb520e0948d1ae05",
+ "c0ca51fdea051fcd042bf4b56d32e1e8bb9525a921f2e197f370f101e90527f0"
+ ]
}
},
"info.dit.lumina-image-2": {
@@ -969,6 +2561,41 @@
"0": {
"diffusers": "Lumina2Pipeline"
}
+ },
+ "file_256": [
+ "132b4d213fdd3cfc14333746fc3eb8bbe6358cd73c3bc95ac4ccec230b97dca3",
+ "a7c09ebae62996a8289782161338a3cdba58c11d2d849c50b2d6502e152b0d6d"
+ ],
+ "layer_b3": [
+ "198bde52f09736f1fc650dcdbd0e6b0f6a5ce186582554c1d9ee8ab16ac0feb2",
+ "b52807536902cabbf84f99e4fa2f8713fb4ef77e739f06367ee0d486e3222faa"
+ ],
+ "layer_256": [
+ "982893c99860aac8198c2e435cf85f782fce8f10732daf1f2881a26864400a4e",
+ "dc937b59892604f5a86ac96936cd7ff09e25f18ae6b758e8014a24c7fa039e91"
+ ],
+ "tasks": [
+ "Lumina2Pipeline"
+ ],
+ "pipe_names": {
+ "transformer": [
+ "Lumina2Transformer2DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.stst.gemma2",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "lumina-image-2"
+ ]
}
},
"illustrious-lumina-v3": {
@@ -987,6 +2614,34 @@
]
}
},
+ "info.dit.lucy-edit-dev": {
+ "*": {
+ "repo": "decart-ai/Lucy-Edit-Dev",
+ "pkg": {
+ "0": {
+ "diffusers": "LucyEditPipeline"
+ }
+ },
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "lucy-edit-dev"
+ ],
+ "text_encoder": [
+ "info.stst.mt5",
+ "*"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ]
+ }
+ }
+ },
"info.dit.longcat-image": {
"*": {
"repo": "meituan-longcat/LongCat-Image",
@@ -994,6 +2649,30 @@
"0": {
"diffusers": "LongCatImagePipeline"
}
+ },
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.vit.qwen2-vl",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "longcat-image"
+ ],
+ "text_processor": [
+ "Qwen2VLProcessor"
+ ],
+ "transformer": [
+ "info.dit.flux1-schnell",
+ "*"
+ ]
}
}
},
@@ -1004,6 +2683,30 @@
"0": {
"diffusers": "LongCatImageEditPipeline"
}
+ },
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.vit.qwen2-vl",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "longcat-image-edit"
+ ],
+ "text_processor": [
+ "Qwen2VLProcessor"
+ ],
+ "transformer": [
+ "info.dit.flux1-schnell",
+ "*"
+ ]
}
}
},
@@ -1014,6 +2717,27 @@
"0": {
"diffusers": "MochiPipeline"
}
+ },
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "mochi-1"
+ ],
+ "text_encoder": [
+ "info.stst.t5",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "mochi-1"
+ ],
+ "transformer": [
+ "MochiTransformer3DModel"
+ ]
}
}
},
@@ -1022,9 +2746,21 @@
"repo": "ucsd-reach/musicldm",
"pkg": {
"0": {
- "diffusers": "MusicLDMPipeline"
+ "generation": {
+ "num_inference_steps": 200,
+ "audio_length_in_s": 10.0
+ }
}
- }
+ },
+ "file_256": [
+ "853d0ef1d61cbf5d682872322ea8b761ba3d2f85bfbccd58363bd6b2f837268f"
+ ],
+ "layer_b3": [
+ "82fbcc553c1ad770d28fd1866b935249c5ebfbf75f3166ae823e1bc6ef39a95a"
+ ],
+ "layer_256": [
+ "d076446a58a36bf436e37444679d62bcf2f45689d4aa3d799b3fe801c71ed2c8"
+ ]
}
},
"info.dit.omnigen-v1": {
@@ -1034,6 +2770,22 @@
"0": {
"diffusers": "OmniGenPipeline"
}
+ },
+ "pipe_names": {
+ "transformer": [
+ "OmniGenTransformer2DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "omnigen-v1"
+ ]
}
}
},
@@ -1044,6 +2796,30 @@
"0": {
"diffusers": "OvisImagePipeline"
}
+ },
+ "tasks": [
+ "OvisImagePipeline"
+ ],
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.stst.qwen3",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "ovis-image"
+ ],
+ "transformer": [
+ "info.dit.flux1-schnell",
+ "*"
+ ]
}
}
},
@@ -1054,6 +2830,44 @@
"0": {
"diffusers": "VisualClozeGenerationPipeline"
}
+ },
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "visualclozepipeline-384"
+ ],
+ "text_encoder_2": [
+ "info.stst.t5",
+ "*"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "visualclozepipeline-384"
+ ],
+ "transformer": [
+ "FluxTransformer2DModel"
+ ]
+ }
+ }
+ },
+ "info.lora.pia-condition-adapter": {
+ "*": {
+ "repo": "openmmlab/PIA-condition-adapter",
+ "pkg": {
+ "0": {
+ "diffusers": "PIAPipeline"
+ }
}
}
},
@@ -1064,6 +2878,44 @@
"0": {
"diffusers": "PixArtAlphaPipeline"
}
+ },
+ "identifiers": [
+ "aspect_ratio",
+ "y_embedding",
+ "emb.resolution",
+ "caption_projection"
+ ],
+ "file_256": [
+ "809a92d52a4a228f381a4b4f4b76051294b73285fb0cbb02f0ad24f9372217a8"
+ ],
+ "layer_b3": [
+ "c5be83545ce9dbc564bcc9fd8fe4157d131347ccfc8f62adc877ec205b20acee"
+ ],
+ "layer_256": [
+ "117225c0e91423746114b23d3e409708ad55c90ff52b21fa7a1c5105d2e935a5"
+ ],
+ "tasks": [
+ "PixArtAlphaPipeline"
+ ],
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "pixart-xl-2-1024-ms"
+ ],
+ "text_encoder": [
+ "info.stst.t5",
+ "*"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "transformer": [
+ "PixArtTransformer2DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.dpm",
+ "multistep"
+ ]
}
}
},
@@ -1074,17 +2926,72 @@
"0": {
"diffusers": "PixArtSigmaPipeline"
}
- }
- }
+ },
+ "identifiers": [
+ "adaln_single",
+ "scale_shift_table"
+ ],
+ "file_256": [
+ "c34b520ef473329b945c2a21083cdf1337c5a468d23b3215b65576789bfd0305",
+ "2fa4dee9229c02b03163f57bdb8e80c7a5ee364b7161796abe9c05e8dd13f239"
+ ],
+ "layer_b3": [
+ "a199930ff537994872da77391955f0dd52eddd22ab9105388f0c5852f1b8021f",
+ "ee6f980c32e98da6885f3e97d3f88d9158031e362cd3a49b20d1e23924b251e3"
+ ],
+ "layer_256": [
+ "e0afd203aff5a1d192e325d0f59361373273d85d138b51768c3f10a75c154dc0",
+ "987f3c2ff5d399191e5fd7dd7b1f1f285c197dc8124ad77f05cde7f2fb677a3c"
+ ],
+ "tasks": [
+ "PixArtAlphaPipeline",
+ "PixArtSigmaPipeline"
+ ],
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "pixart-sigma-xl-2-1024-ms"
+ ],
+ "text_encoder": [
+ "info.stst.t5",
+ "*"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "transformer": [
+ "PixArtTransformer2DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ]
+ }
+ }
},
"info.dit.sana-1024px-bf16": {
"diffusers": {
"repo": "Efficient-Large-Model/Sana_1600M_1024px_BF16_diffusers",
"pkg": {
"0": {
- "diffusers": "SanaPipeline"
+ "generation": {
+ "height": 1024,
+ "width": 1024,
+ "guidance_scale": 4.5,
+ "num_inference_steps": 20
+ },
+ "precision": "ops.precision.bfloat.B16"
}
- }
+ },
+ "file_256": [
+ "b0b50c33be8758713459aa3c760feef6315d4bea31521fb5b8c3e8fdd9841ffe"
+ ],
+ "layer_b3": [
+ "461e3d83dfa7e075ef21e2138ef153922ecfadde3db464b03dff92819f3e86dd"
+ ],
+ "layer_256": [
+ "b928bbcc2ce99d55d21c189e2b1c57498bc313ef5b1457036e356107d567fc4e"
+ ]
}
},
"info.controlnet.sana-1024px-controlnet": {
@@ -1104,6 +3011,31 @@
"0": {
"diffusers": "SanaSprintPipeline"
}
+ },
+ "tasks": [
+ "SanaPAGPipeline",
+ "SanaPipeline"
+ ],
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "sana-sprint-1024px"
+ ],
+ "text_encoder": [
+ "info.stst.gemma2",
+ "*"
+ ],
+ "vae": [
+ "info.vae.dc",
+ "sana-1024px-bf16"
+ ],
+ "transformer": [
+ "SanaTransformer2DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.dpm",
+ "multistep"
+ ]
}
}
},
@@ -1114,6 +3046,33 @@
"0": {
"diffusers": "SanaImageToVideoPipeline"
}
+ },
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "sana-video"
+ ],
+ "text_encoder": [
+ "info.stst.gemma2",
+ "*"
+ ],
+ "vae": [
+ [
+ "info.vae.dc",
+ "sana-1024px-bf16"
+ ],
+ [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ]
+ ],
+ "transformer": [
+ "SanaVideoTransformer3DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ]
}
}
},
@@ -1122,7 +3081,12 @@
"repo": "openai/shap-e",
"pkg": {
"0": {
- "diffusers": "ShapEPipeline"
+ "precision": "ops.precision.float.F16",
+ "generation": {
+ "num_inference_steps": 64,
+ "size": 256,
+ "guidance_scale": 15
+ }
}
}
}
@@ -1132,7 +3096,12 @@
"repo": "stabilityai/stable-audio-open-1.0",
"pkg": {
"0": {
- "diffusers": "StableAudioPipeline"
+ "precision": "ops.precision.float.F16",
+ "generation": {
+ "num_inference_steps": 200,
+ "audio_end_in_s": 10,
+ "num_waveforms_per_prompt": 3
+ }
}
}
}
@@ -1142,9 +3111,85 @@
"repo": "stabilityai/stable-cascade-prior",
"pkg": {
"0": {
- "diffusers": "StableCascadePriorPipeline"
+ "precision": "ops.precision.bfloat.B16",
+ "generation": {
+ "negative_prompt": "",
+ "num_images_per_prompt": 1,
+ "num_inference_steps": 20,
+ "guidance_scale": 4.0,
+ "width": 1024,
+ "height": 1024
+ }
+ }
+ },
+ "file_256": [
+ "673b3173b037fb5f65b14fde37267390641a36726683de75dcf9df76fce2b866",
+ "45c1eb5ce9b69efac891ad459b15c215cd90a986adbbfaf3effd3a89578cbcaf",
+ "088ddf1e444abf399007b2da2bac87791df165c69f477994f6b3c745a20904b0",
+ "39cec96c7212607f9e526db719bf1df507166d09f4748676c13b0d31cd4adb07",
+ "31ffe2f1a3e2351d658fc7d3002a4eca22466a680f7fb3715b1e3768476f9633",
+ "dfe24009fc881011f350d08d9d13be13a1a3b3cbfed667435efe0fd419aca099"
+ ],
+ "layer_b3": [
+ "c55c83fa435ed128457f605bf1312e54727996d1c94413fc5ab5b49e9933857c",
+ "6fb07ed9fc6ee636e50783802754b3a37bbecfc67037813b616223aeaf6fe877",
+ "2ea194240e105c8962923e2baca88cb6a0c826794afc2ef82474301694711d68",
+ "3412c8a184805621e4595d57268ced0b5c3c1974cd221bf67b2c908eec4fd61c",
+ "53abfb013cfb0e41d0bc7b96bb83e42a4d4c67cb7325f9acf645b02d90efd8fe",
+ "34556558f680c183adc2accd493cb9888a98ba853226bbecb07d95eb2055ff4f"
+ ],
+ "layer_256": [
+ "4f5e0a738b963d3d4f8413387a0966ac1ce51f0f985bcbcc124fa221a2fff467",
+ "8aa77e732a398b7d0dcd9a35d5682c2b5ab090ae90e915c7c91878abff0284d8",
+ "4bbd46ded0916de3108f0da7145a80f5c7acea26ed35b0aaa29af12008352453",
+ "415d1f3ecd06416708c1b83ab21e50b39c9d88d19dc33e60b977b7b7061880b9",
+ "f678c32815c238e14091f690c8a83c3375c8f7738dc7abff79ff086ed9b59204",
+ "17c8da803df7b9bbc8b1d7cc0c44916fea5b5ac0891330c4fdf0326fcd4496cb"
+ ],
+ "identifiers": [
+ "down_blocks.0.2.kv_mapper",
+ "previewer",
+ "backbone"
+ ]
+ },
+ "decoder": {
+ "pkg": {
+ "0": {
+ "generation": {
+ "negative_prompt": "",
+ "guidance_scale": 0.0,
+ "output_type": "pil",
+ "num_inference_steps": 10
+ },
+ "precision": "ops.precision.bfloat.B16"
}
- }
+ },
+ "file_256": [
+ "fe92687deefcfb33bb3ec181254b55fe4e434c5084ce9d38815eaa32487ad376",
+ "2c8d58b267678aecfa6705a0a0375c88613065a8a8d32ad3a4c3867f5461cb3a",
+ "6c218dc948575e3b14b03dffe2014d7870ac505005770ce3abdc28e920a03c05",
+ "a6c3d534a9be308e95d2c3224af94a854bebd9b503f620f1ae3c8e6ba4a341bf",
+ "7b431ea7d0f10e72b3eaece353bf6bf2f6bc717b6f4207411be186b40dec1f43"
+ ],
+ "layer_b3": [
+ "9506d989de0226018de214f7ced4670eb5aad4a0c399a9229488ceccdf9a3ceb",
+ "6c09dcb83e0cd7ad735eb763c5e3721c579d796853f0b9d31ba74fb13cad4f94",
+ "e07025965cee925e31f1d617ea8baa575e7db910d40cc0482fd83df317c0812b",
+ "d9a42e4226fb2778aaeaf0d6bda173a4ff95aa574c6d9e27e41542aa469e40a3",
+ "8dcd87dc7a9b877e8e2a00abac44c4da9eadf2b8df4ae68f27415bb791381a96"
+ ],
+ "layer_256": [
+ "630ec0f3adf97145316c034139836f9df952060d0237ac4e478c55d9a3a50bc8",
+ "80904f707c192ddd06be2cebeb2ebbec3eb0e9c99076d50824d391ef3ac67bf2",
+ "8ccedbe1e8cc4093f05b5f8d90e6103e688ae1ac71e0d6261fb17c42ff7c25e4",
+ "3524e7fa9ca6f7ef695bc2d3410934eabd5272946a05c8cacd7f329e0bd9f1dd",
+ "40499a8f45ae28558ed2fe4fc549a4cb469bd237434b331ccc0b1910310ed733"
+ ],
+ "identifiers": [
+ "0.2.channelwise",
+ "clip_mapper.bias",
+ ".12.self_attn.k_proj.weight"
+ ]
}
},
"info.dit.auraflow": {
@@ -1154,6 +3199,55 @@
"0": {
"diffusers": "AuraFlowPipeline"
}
+ },
+ "identifiers": [
+ [
+ 8192,
+ 3072
+ ],
+ "mlpX.c_fc2.weight",
+ "joint_transformer_blocks.2.ff_context.linear_2.weight"
+ ],
+ "file_256": [
+ "ce3e475246258b94ee9dcb8b83292cb34edfffc2bbde46c74604d9c6cd7c585c",
+ "526be97cf581c89ad87c6b19c1f7c2378851137698f7ec436596d061a382d37b",
+ "6a40b011f287452dbca80face78e667055904c5ad97eb2097ade3200259b2203",
+ "05e5493018333d947bb5940083dbc2f071093027ff414bc5b1b1229e4836e5cb"
+ ],
+ "layer_b3": [
+ "cc6d383576c35a9709798d2e2b9e3eb31ba8c608040cf3712bc37871cfd14e21",
+ "ddd54c44fa28fbddecf7cfae91cfa04917fd2f2fa94fc78c528cef2356a4ec3a",
+ "90c694e7d1e20e6da49b571e9954338d384775419790be315304103227b1051b",
+ "9e85aec1bdb616f52f88c80ddc7ab1eae8c16c0b5fbfcdb61a71ac02c325003d"
+ ],
+ "layer_256": [
+ "3c13e6a965d03a49227d8b1606ba6a343a23772d8768407cc78d4ddb9102bc80",
+ "b356cc84a23bc93bda4cc0fce1d0ba1b8e3d5a521e659ffc72e9e4a2d2c7f204",
+ "270df7317fe01abf06333acbbd4f15f8fc7a7c56053219f42efb598454a3af24",
+ "7ab6aa4514dd09f3cf589587d51a81734193ce45dd51bda9db0bd62fe48ef7d5"
+ ],
+ "tasks": [
+ "AuraFlowPipeline"
+ ],
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "auraflow"
+ ],
+ "text_encoder": [
+ "info.stst.mt5",
+ "*"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "transformer": [
+ "AuraFlowTransformer2DModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ]
}
}
},
@@ -1162,9 +3256,27 @@
"repo": "stabilityai/stable-diffusion-3.5-medium",
"pkg": {
"0": {
- "diffusers": "StableDiffusion3Pipeline"
+ "precision": "ops.precision.float.F16"
}
- }
+ },
+ "identifiers": [
+ "model.diffusion_model.joint_blocks.",
+ "transformer_blocks.21.norm1_context.linear.weight",
+ "transformer_blocks.31.norm1_context.linear.weight",
+ "blocks.11.ff.net.2.weight"
+ ],
+ "file_256": [
+ "ffef7a279d9134626e6ce0d494fba84fc1c7e720b3c7df2d19a09dc3796d8f93",
+ "11fe06e22364b823dfeedc275912336b932b32a293a0b2f35ffac071990cc4de"
+ ],
+ "layer_b3": [
+ "e411016545785046810b29cc3999f40bc6392be134a1318386c6f1c48f98726a",
+ "a81e07ee67bc627e8b3c5e292ec1ca239009517a2106e8249d670ced0a88f746"
+ ],
+ "layer_256": [
+ "13c982a6dc82d21c9f459e837d8c6f6d4696fd6e7e7b5783bdd2250b1f4fec61",
+ "6ee79050373337bf63ac20916596df778bb22022bb38af986128a7459eda1463"
+ ]
},
"stable-diffusion-3-turbo": {
"repo": "tensorart/stable-diffusion-3.5-medium-turbo",
@@ -1226,6 +3338,27 @@
"0": {
"diffusers": "StableVideoDiffusionPipeline"
}
+ },
+ "pipe_names": {
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "unet": [
+ "UNetSpatioTemporalConditionModel"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "feature_extractor": [
+ "info.dit.flux1-schnell",
+ "*"
+ ]
}
}
},
@@ -1236,6 +3369,64 @@
"0": {
"diffusers": "StableDiffusionLDM3DPipeline"
}
+ },
+ "tasks": [
+ "StableDiffusion3ControlNetInpaintingPipeline",
+ "StableDiffusion3ControlNetPipeline",
+ "StableDiffusion3Img2ImgPipeline",
+ "StableDiffusion3InpaintPipeline",
+ "StableDiffusion3PAGImg2ImgPipeline",
+ "StableDiffusion3PAGPipeline",
+ "StableDiffusion3Pipeline",
+ "StableDiffusionControlNetImg2ImgPipeline",
+ "StableDiffusionControlNetInpaintPipeline",
+ "StableDiffusionControlNetPAGInpaintPipeline",
+ "StableDiffusionControlNetPAGPipeline",
+ "StableDiffusionControlNetPipeline",
+ "StableDiffusionImg2ImgPipeline",
+ "StableDiffusionInpaintPipeline",
+ "StableDiffusionPAGImg2ImgPipeline",
+ "StableDiffusionPAGInpaintPipeline",
+ "StableDiffusionPAGPipeline",
+ "StableDiffusionPipeline",
+ "StableDiffusionXLControlNetImg2ImgPipeline",
+ "StableDiffusionXLControlNetInpaintPipeline",
+ "StableDiffusionXLControlNetPAGImg2ImgPipeline",
+ "StableDiffusionXLControlNetPAGPipeline",
+ "StableDiffusionXLControlNetPipeline",
+ "StableDiffusionXLControlNetUnionImg2ImgPipeline",
+ "StableDiffusionXLControlNetUnionInpaintPipeline",
+ "StableDiffusionXLControlNetUnionPipeline",
+ "StableDiffusionXLImg2ImgPipeline",
+ "StableDiffusionXLInpaintPipeline",
+ "StableDiffusionXLPAGImg2ImgPipeline",
+ "StableDiffusionXLPAGInpaintPipeline",
+ "StableDiffusionXLPAGPipeline",
+ "StableDiffusionXLPipeline"
+ ],
+ "pipe_names": {
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "ldm3d-4c"
+ ],
+ "scheduler": [
+ "ops.scheduler.karrasdiffusion",
+ "schedulers"
+ ],
+ "safety_checker": [
+ "StableDiffusionSafetyChecker"
+ ],
+ "feature_extractor": [
+ "info.dit.flux1-schnell",
+ "*"
+ ]
}
}
},
@@ -1246,6 +3437,34 @@
"0": {
"diffusers": "I2VGenXLPipeline"
}
+ },
+ "pipe_names": {
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "i2vgen-xl"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "feature_extractor": [
+ "info.dit.flux1-schnell",
+ "*"
+ ],
+ "unet": [
+ "I2VGenXLUNet"
+ ],
+ "scheduler": [
+ "ops.scheduler.ddim",
+ "scheduler"
+ ]
}
}
},
@@ -1256,6 +3475,27 @@
"0": {
"diffusers": "WuerstchenPriorPipeline"
}
+ },
+ "tasks": [
+ "WuerstchenCombinedPipeline",
+ "WuerstchenDecoderPipeline"
+ ],
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "wuerstchen"
+ ],
+ "text_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "prior": [
+ "WuerstchenPrior"
+ ],
+ "scheduler": [
+ "ops.scheduler.ddpmwuerstchen",
+ "scheduler"
+ ]
}
}
},
@@ -1264,9 +3504,43 @@
"repo": "Wan-AI/Wan2.1-T2V-14B-Diffusers",
"pkg": {
"0": {
- "diffusers": "WanPipeline"
+ "precision": "ops.precision.bfloat.B16",
+ "generation": {
+ "height": 480,
+ "width": 832,
+ "num_frames": 81,
+ "guidance_scale": 5.0
+ }
}
- }
+ },
+ "file_256": [
+ "299e6304544f2783896372fa919e755a8bb9ab8caf898ce08a678dae391e1179",
+ "a9278e6e9c82d174e6c67b3c97d8b97fef30af51dcf59160f2fc241f6819f5dc",
+ "be531024cd9018cb5b48c40cfbb6a6191645b1c792eb8bf4f8c1c6e10f924dc5",
+ "6f999b0d6cb9a72b3d98ac386ed96f57f8cecae13994a69232514ea4974ad5fd",
+ "2e39adde59c5e0e90edbb35873126b0d67928b5c11c501e384e976d6dc597cce",
+ "2ee88ab18d7ed7691c5b7f8bdc3d0a9815e6efe75499287564830fd209d3cdfb",
+ "46c27d3693bf2475990a912e08bf67fc6e6cd5396eab87b5e8dd1fcd3651364a",
+ "193535c6450045f718df5f011de6d94d49bd9b13f37ca0412500f050dbbb01a8"
+ ],
+ "layer_b3": [
+ "32266d1c79b518adb9d21837e6a427f6ae55b68cfdd673a7dadb38820fddeb48",
+ "3b6989856f4f05368524c1852d8660b73c84cfbe44460af017d7139c2a4641b8",
+ "f4d6cee3c112db93b3c9137ad102ec0e79ec7ab68b9bbc59004fbc268ccd5ddb",
+ "e627144f41055619eb5407699c46e69ac0d87cf8873721e3e48c9e842656abf8",
+ "6c00f3fadedacb841c4b9b4321b94a11ef85a08c9dd9253e5f9ba95856715579",
+ "a0c339253c714b05877c8fbab649ed631cf021930978f3696a46f685a07c9092",
+ "6435da89a870fd0e88680d31de75b9a40c408a4768eff384ce9b9e99481e8e66"
+ ],
+ "layer_256": [
+ "52493c23c5fc1d087a283bc4eabb151421b7ae09affa12a5bb059d62656c5766",
+ "058dedb3d2683a9a5b671c6302690e22722c93f6ed92281d5fa74ab190e632a1",
+ "5fbed4b95e7196d3626003ea9e0fbbffd074b4297ca406e01b5b6c5d881a6080",
+ "3a2335c8e7a4359c071b50333b5c00eef6f42a1d5206915e2ee99464a8c5eae7",
+ "0542780670dd75d4cd9deda123d2e150730646c0a1a8d34582460991498a77a6",
+ "e925b8222774905c8fbf10af77811fde7870e563eedcde2c94bd5c727e952d49",
+ "3d915854976284347efa7aa0a117c0fc3b415c4208e1a6c94beb4ccb9720743d"
+ ]
}
},
"info.dit.wan-animate": {
@@ -1276,6 +3550,40 @@
"0": {
"diffusers": "WanAnimatePipeline"
}
+ },
+ "tasks": [
+ "WanImageToVideoPipeline",
+ "WanPipeline",
+ "WanVideoToVideoPipeline"
+ ],
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "wan-animate"
+ ],
+ "text_encoder": [
+ "info.stst.mt5",
+ "*"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "scheduler": [
+ "ops.scheduler.unipc",
+ "multistep"
+ ],
+ "image_processor": [
+ "info.dit.flux1-schnell",
+ "*"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "transformer": [
+ "WanAnimateTransformer3DModel"
+ ]
}
}
},
@@ -1286,6 +3594,68 @@
"0": {
"diffusers": "WanImageToVideoPipeline"
}
+ },
+ "file_256": [
+ "b4602c35fa0519750a42c03e3f296c02d542291e344c4d702522cddbd1711f13",
+ "6d7a34b63b70eb608324e546d979167a5e787ac6bca3528e63f54a11572d66aa",
+ "b2051cd29d6b2f0c924fa7a3e78a4772f0134d7b059f21590dcce416f4f6cbe8",
+ "7664fe075b3c82dcecf89012ad3429eee41ee9f10d476f60bc2d2ae3c4ca986c",
+ "8ef7ea5bf9eea636b9b3ebd84c40671b4a18ae2704cb4c8595cb5b25c1d8e8b9",
+ "b2de21b99b2e72cb0ff15253b07e926f26e7cf1b7e229efc32f94ad1f1ed9395",
+ "0ca75338e7a47ca7cacddb7e626647e65829c497387f718ecb6ea0bae456944a",
+ "c058a4ac5363c35d1ab4dd3bdec788c23b267fa42a0d7c68aba599f2f74600c9",
+ "27988f6b510eb8d5fdd7485671b54897f8683f2bba7a772c5671be21d3491253"
+ ],
+ "layer_b3": [
+ "4b6c3354c9ee5694e00a78f5658fdf14129f159c3b78a57f82fb18e0f265a83d",
+ "c36c783559a40d22504f6c4bfb4f5aae760f3f46bbb3a595be79880935122175",
+ "ac62f7d5583fd2e85b738fafaf233e2cde6e2857e04351135bb9ded45f9082ce",
+ "215e89e855b5e9456af9aa68bc67567dc2269002aaa6b01d849ffec425fc628d",
+ "324b8b6c2d512547a2c31bafa12e20acf313fd3aad587b293334f9f629edeec6"
+ ],
+ "layer_256": [
+ "137881dad8c00063bc8bf05f93067736e419173cd171acc22f77b730db688a19",
+ "8c5952fd3d333d3a4b719bf7d8ce6b12d1d2e78caaa7e42d713788cfdcadd244",
+ "86c58bc4864c97f394ea6bccb2ecedc4aab7166f5b9bfeb313edfdcb2918164a",
+ "cac45f7d8f1a0628cb0738bd308689e439b1cc6206e5f887d60d5b37d30138f2",
+ "60e4f71a0961b1346b6f6b5ebe4c8cc93219239c5e13b4c0f1e19e9b8e1324d5"
+ ],
+ "tasks": [
+ "WanImageToVideoPipeline",
+ "WanPipeline",
+ "WanVideoToVideoPipeline"
+ ],
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "wan2-i2v-480p"
+ ],
+ "text_encoder": [
+ "info.stst.mt5",
+ "*"
+ ],
+ "vae": [
+ "info.vae.wan",
+ "wan2-i2v-480p"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "image_processor": [
+ "info.dit.flux1-schnell",
+ "*"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "transformer": [
+ "WanTransformer3DModel"
+ ],
+ "transformer_2": [
+ "WanTransformer3DModel"
+ ]
}
}
},
@@ -1296,6 +3666,54 @@
"0": {
"diffusers": "WanVACEPipeline"
}
+ },
+ "file_256": [
+ "bd8bbb8834a274525ab65cbb063f21aa58973a054bfd1638bfe395504c9d9b99",
+ "192804a4e10b5bb0a13f5c224bc4ec9707b3b8cc0def8eea005dbce7c9d6752a",
+ "f202a5c59b8a91ada1862c46a038214f1f7f216c61ec8350d25f69b919da4307",
+ "654693bf2a93a27cd67c3bcee238bc1d0cbb0dd9a74928ed7155fb21a2a1900a",
+ "640ccc0577e6a5d4bb15cd91b11b699ef914fc55f126c5a1c544e152130784f2"
+ ],
+ "layer_b3": [
+ "5357d78799a61cd2d72a8a2824c919d63f718eb3fba624af63689e9c657db032",
+ "7ae67b7ccf79d1c3f4531ae138e1eb63d52dd97a66b3fcbe1d68fded8df4d5b1",
+ "ee63ecdfb3da6901853a59ec950f3e7c3f6595ac46347a03881a4a9c71425377",
+ "82762df3539021d3c0342e0da04137ddbe95ef37ea933cd0a68c09c2c650f2ac"
+ ],
+ "layer_256": [
+ "2684413479030170fb3f08c1069c02957ffc386a59168d23b55d579d5c675269",
+ "d527680fa735e5f30ef8852aabf8a49f02a094bc4718f0787c5b85710a13c026",
+ "9677492a107b3ed827c7285db3393f5321d451cc6d922a4d0488d2a67e939446",
+ "aaef66a4f65ecf852888d160b2122753fe4c6d642b5d41db29e4ce9e6855b5a0"
+ ],
+ "tasks": [
+ "WanImageToVideoPipeline",
+ "WanPipeline",
+ "WanVideoToVideoPipeline"
+ ],
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "wan21-vace"
+ ],
+ "text_encoder": [
+ "info.stst.mt5",
+ "*"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "transformer": [
+ "WanVACETransformer3DModel"
+ ],
+ "transformer_2": [
+ "WanVACETransformer3DModel"
+ ]
}
}
},
@@ -1304,8 +3722,38 @@
"repo": "Wan-AI/Wan2.1-T2V-1.3B-Diffusers",
"pkg": {
"0": {
- "diffusers": "WanVideoToVideoPipeline"
+ "diffusers": "WanPipeline",
+ "precision": "ops.precision.bfloat.B16",
+ "generation": {
+ "height": 480,
+ "width": 832,
+ "num_frames": 81,
+ "guidance_scale": 5.0
+ }
}
+ },
+ "tasks": [
+ "WanImageToVideoPipeline",
+ "WanPipeline",
+ "WanVideoToVideoPipeline"
+ ],
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "wan21-t2v"
+ ],
+ "text_encoder": [
+ "info.stst.mt5",
+ "*"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ]
}
}
},
@@ -1316,6 +3764,35 @@
"0": {
"diffusers": "Kandinsky5T2VPipeline"
}
+ },
+ "pipe_names": {
+ "transformer": [
+ "Kandinsky5Transformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "hunyuanvideo-i2v"
+ ],
+ "text_encoder": [
+ "info.vit.qwen2-vl",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "kandinsky-5-t2v-lite-sft-5s"
+ ],
+ "text_encoder_2": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "kandinsky-5-t2v-lite-sft-5s"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ]
}
}
},
@@ -1326,6 +3803,34 @@
"0": {
"diffusers": "Kandinsky5I2IPipeline"
}
+ },
+ "pipe_names": {
+ "transformer": [
+ "Kandinsky5Transformer3DModel"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.vit.qwen2-vl",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "kandinsky-5-i2i-lite-sft"
+ ],
+ "text_encoder_2": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "kandinsky-5-i2i-lite-sft"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ]
}
}
},
@@ -1336,6 +3841,35 @@
"0": {
"diffusers": "Kandinsky5I2VPipeline"
}
+ },
+ "pipe_names": {
+ "transformer": [
+ "Kandinsky5Transformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "hunyuanvideo-i2v"
+ ],
+ "text_encoder": [
+ "info.vit.qwen2-vl",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "kandinsky-5-i2v-sft-5s"
+ ],
+ "text_encoder_2": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "kandinsky-5-i2v-sft-5s"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ]
}
}
},
@@ -1346,6 +3880,34 @@
"0": {
"diffusers": "Kandinsky5T2IPipeline"
}
+ },
+ "pipe_names": {
+ "transformer": [
+ "Kandinsky5Transformer3DModel"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "info.vit.qwen2-vl",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "kandinsky-5-t2i-lite-sft"
+ ],
+ "text_encoder_2": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "tokenizer_2": [
+ "info.encoder.tokenizer",
+ "kandinsky-5-t2i-lite-sft"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ]
}
}
},
@@ -1356,6 +3918,41 @@
"0": {
"diffusers": "ZImageOmniPipeline"
}
+ },
+ "tasks": [
+ "ZImageControlNetInpaintPipeline",
+ "ZImageControlNetPipeline",
+ "ZImageImg2ImgPipeline",
+ "ZImageOmniPipeline",
+ "ZImagePipeline"
+ ],
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "AutoencoderKL"
+ ],
+ "text_encoder": [
+ "PreTrainedModel"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "z-image-turbo"
+ ],
+ "transformer": [
+ "info.dit.flux1-schnell",
+ "*"
+ ],
+ "siglip": [
+ "info.vit.siglip2-patch16-224",
+ "*"
+ ],
+ "siglip_processor": [
+ "info.dit.flux1-schnell",
+ "*"
+ ]
}
}
},
@@ -1376,6 +3973,27 @@
"0": {
"diffusers": "SkyReelsV2Pipeline"
}
+ },
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "skyreels-v2-t2v-720p"
+ ],
+ "text_encoder": [
+ "info.stst.mt5",
+ "*"
+ ],
+ "transformer": [
+ "SkyReelsV2Transformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "scheduler": [
+ "ops.scheduler.unipc",
+ "multistep"
+ ]
}
}
},
@@ -1386,6 +4004,27 @@
"0": {
"diffusers": "SkyReelsV2DiffusionForcingVideoToVideoPipeline"
}
+ },
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "skyreels-v2-df-720p"
+ ],
+ "text_encoder": [
+ "info.stst.mt5",
+ "*"
+ ],
+ "transformer": [
+ "SkyReelsV2Transformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "scheduler": [
+ "ops.scheduler.unipc",
+ "multistep"
+ ]
}
}
},
@@ -1396,6 +4035,34 @@
"0": {
"diffusers": "SkyReelsV2ImageToVideoPipeline"
}
+ },
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "skyreels-v2-i2v-720p"
+ ],
+ "text_encoder": [
+ "info.stst.mt5",
+ "*"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "image_processor": [
+ "CLIPProcessor"
+ ],
+ "transformer": [
+ "SkyReelsV2Transformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "scheduler": [
+ "ops.scheduler.unipc",
+ "multistep"
+ ]
}
}
},
@@ -1406,6 +4073,46 @@
"0": {
"diffusers": "QwenImageInpaintPipeline"
}
+ },
+ "file_256": [
+ "9f33a59093af3abcc2836d4cf4b7bd122c238ca70a26c70f34fdde64646b3bcd"
+ ],
+ "layer_b3": [
+ "c87eedda853c12844a8deb3592a90bbcbd4dff2f7a850c28755e4aa171432150"
+ ],
+ "layer_256": [
+ "fda2472d8ef6587a4c979021a2390eeb7c8fc2bcf565330ab8dc6b22f5348ec9"
+ ],
+ "tasks": [
+ "QwenImageControlNetPipeline",
+ "QwenImageEditInpaintPipeline",
+ "QwenImageEditPipeline",
+ "QwenImageEditPlusPipeline",
+ "QwenImageImg2ImgPipeline",
+ "QwenImageInpaintPipeline",
+ "QwenImagePipeline"
+ ],
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "qwen-image"
+ ],
+ "text_encoder": [
+ "info.vit.qwen2-vl",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "qwen-image"
+ ],
+ "transformer": [
+ "info.dit.flux1-schnell",
+ "*"
+ ]
}
}
},
@@ -1436,6 +4143,40 @@
"0": {
"diffusers": "QwenImageEditInpaintPipeline"
}
+ },
+ "tasks": [
+ "QwenImageControlNetPipeline",
+ "QwenImageEditInpaintPipeline",
+ "QwenImageEditPipeline",
+ "QwenImageEditPlusPipeline",
+ "QwenImageImg2ImgPipeline",
+ "QwenImageInpaintPipeline",
+ "QwenImagePipeline"
+ ],
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "qwen-image"
+ ],
+ "text_encoder": [
+ "info.vit.qwen2-vl",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "qwen-image-edit"
+ ],
+ "processor": [
+ "Qwen2VLProcessor"
+ ],
+ "transformer": [
+ "info.dit.flux1-schnell",
+ "*"
+ ]
}
}
},
@@ -1446,6 +4187,40 @@
"0": {
"diffusers": "QwenImageEditPlusPipeline"
}
+ },
+ "tasks": [
+ "QwenImageControlNetPipeline",
+ "QwenImageEditInpaintPipeline",
+ "QwenImageEditPipeline",
+ "QwenImageEditPlusPipeline",
+ "QwenImageImg2ImgPipeline",
+ "QwenImageInpaintPipeline",
+ "QwenImagePipeline"
+ ],
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "qwen-image"
+ ],
+ "text_encoder": [
+ "info.vit.qwen2-vl",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "qwen-image-edit-2509"
+ ],
+ "processor": [
+ "Qwen2VLProcessor"
+ ],
+ "transformer": [
+ "info.dit.flux1-schnell",
+ "*"
+ ]
}
}
},
@@ -1456,6 +4231,40 @@
"0": {
"diffusers": "QwenImageLayeredPipeline"
}
+ },
+ "tasks": [
+ "QwenImageControlNetPipeline",
+ "QwenImageEditInpaintPipeline",
+ "QwenImageEditPipeline",
+ "QwenImageEditPlusPipeline",
+ "QwenImageImg2ImgPipeline",
+ "QwenImageInpaintPipeline",
+ "QwenImagePipeline"
+ ],
+ "pipe_names": {
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "qwen-image"
+ ],
+ "text_encoder": [
+ "info.vit.qwen2-vl",
+ "*"
+ ],
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "qwen-image-layered"
+ ],
+ "processor": [
+ "Qwen2VLProcessor"
+ ],
+ "transformer": [
+ "info.dit.flux1-schnell",
+ "*"
+ ]
}
}
},
@@ -1466,6 +4275,35 @@
"0": {
"diffusers": "ChronoEditPipeline"
}
+ },
+ "pipe_names": {
+ "tokenizer": [
+ "info.encoder.tokenizer",
+ "chronoedit"
+ ],
+ "text_encoder": [
+ "info.stst.mt5",
+ "*"
+ ],
+ "image_encoder": [
+ "info.vit.clip-vit-patch32",
+ "*"
+ ],
+ "image_processor": [
+ "info.dit.flux1-schnell",
+ "*"
+ ],
+ "transformer": [
+ "ChronoEditTransformer3DModel"
+ ],
+ "vae": [
+ "info.vae.kl",
+ "audioldm-s-v2"
+ ],
+ "scheduler": [
+ "ops.scheduler.euler",
+ "discrete"
+ ]
}
}
},
@@ -1474,9 +4312,48 @@
"repo": "Kwai-Kolors/Kolors-diffusers",
"pkg": {
"0": {
- "diffusers": "KolorsPipeline"
+ "precision": "ops.precision.float.F16",
+ "generation": {
+ "negative_prompt": "",
+ "guidance_scale": 5.0,
+ "num_inference_steps": 50,
+ "width": 1024,
+ "height": 1024
+ }
+ },
+ "1": {
+ "diffusers": "DiffusionPipeline"
}
- }
+ },
+ "file_256": [
+ "425ff1dcbe3a70ac13d3afdd69bd4e3176b0c3260722527c80b210f11d2d966c"
+ ],
+ "layer_b3": [
+ "6eb15506fa38b4cbb26391ab1b6c9ead05f86c711e46583bfbe8fc4421571414"
+ ],
+ "layer_256": [
+ "04e3c17170b8a200481f6941b370fdc5056a00fe5a16956de01790f8a93c0dcd"
+ ],
+ "identifiers": [
+ ".DenseReluDense.wi.weight",
+ "encoder_hid_proj.weight"
+ ],
+ "pipe_names": {}
+ }
+ },
+ "info.moe.trinity": {
+ "*": {
+ "repo": "arcee-ai/Trinity-Mini",
+ "pkg": {
+ "0": {
+ "transformers": "AfmoeModel"
+ }
+ },
+ "tasks": [
+ "AfmoeForCausalLM",
+ "AfmoeModel",
+ "AfmoePreTrainedModel"
+ ]
}
},
"info.encoder.tokenizer": {
@@ -1501,6 +4378,13 @@
}
}
},
+ "afm": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
"aria": {
"pkg": {
"0": {
@@ -1571,6 +4455,13 @@
}
}
},
+ "bitnet-b18-4t": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
"blenderbot": {
"pkg": {
"0": {
@@ -1599,6 +4490,13 @@
}
}
},
+ "blt": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
"bridgetower": {
"pkg": {
"0": {
@@ -1627,6 +4525,13 @@
}
}
},
+ "chameleon": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
"chinese-clip-vit-patch16": {
"pkg": {
"0": {
@@ -1662,6 +4567,13 @@
}
}
},
+ "llama-2-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
"codegen-mono": {
"pkg": {
"0": {
@@ -1669,6 +4581,13 @@
}
}
},
+ "c4ai-command-r-v01": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.cohere.tokenization_cohere.CohereTokenizer"
+ }
+ }
+ },
"conv-bert": {
"pkg": {
"0": {
@@ -1676,6 +4595,20 @@
}
}
},
+ "cpm-ant": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.cpmant.tokenization_cpmant.CpmAntTokenizer"
+ }
+ }
+ },
+ "csm": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
"ctrl": {
"pkg": {
"0": {
@@ -1697,6 +4630,13 @@
}
}
},
+ "dbrx": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
"deberta": {
"pkg": {
"0": {
@@ -1711,80 +4651,164 @@
}
}
},
- "distilbert-uncased": {
+ "deepseek-v2-lite": {
"pkg": {
"0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
}
}
},
- "dpr-ctx-encoder-single-nq": {
+ "deepseek-v3": {
"pkg": {
"0": {
- "transformers": "transformers.models.dpr.tokenization_dpr_fast.DPRQuestionEncoderTokenizerFast"
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
}
}
},
- "electra-discriminator": {
+ "deepseek-vl-chat": {
"pkg": {
"0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
}
}
},
- "ernie-3-zh": {
+ "dia": {
"pkg": {
"0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ "transformers": "transformers.models.dia.tokenization_dia.DiaTokenizer"
}
}
},
- "ernie-4-vl-a-pt": {
+ "diffllama-handcut": {
"pkg": {
"0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
}
}
},
- "esm": {
+ "distilbert-uncased": {
"pkg": {
"0": {
- "transformers": "transformers.models.esm.tokenization_esm.EsmTokenizer"
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
}
}
},
- "falcon-mamba": {
+ "dpr-ctx-encoder-single-nq": {
"pkg": {
"0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
+ "transformers": "transformers.models.dpr.tokenization_dpr_fast.DPRQuestionEncoderTokenizerFast"
}
}
},
- "flaubert-uncased": {
+ "electra-discriminator": {
"pkg": {
"0": {
- "transformers": "transformers.models.flaubert.tokenization_flaubert.FlaubertTokenizer"
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
}
}
},
- "florence-2": {
+ "emu3-chat-hf": {
"pkg": {
"0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
}
}
},
- "fnet": {
+ "ernie-3-zh": {
"pkg": {
"0": {
- "transformers": "transformers.models.fnet.tokenization_fnet.FNetTokenizer"
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
}
}
},
- "wmt19-en-ru": {
+ "ernie-45-pt": {
"pkg": {
"0": {
- "transformers": "transformers.models.fsmt.tokenization_fsmt.FSMTTokenizer"
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "ernie-4-a-pt": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "ernie-4-vl-a-pt": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "esm": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.esm.tokenization_esm.EsmTokenizer"
+ }
+ }
+ },
+ "exaone-4": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "falcon": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "falcon-mamba": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
+ }
+ }
+ },
+ "flaubert-uncased": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.flaubert.tokenization_flaubert.FlaubertTokenizer"
+ }
+ }
+ },
+ "flava": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
+ }
+ }
+ },
+ "flexolmo-7x-1t": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "florence-2": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
+ }
+ }
+ },
+ "fnet": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.fnet.tokenization_fnet.FNetTokenizer"
+ }
+ }
+ },
+ "wmt19-en-ru": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.fsmt.tokenization_fsmt.FSMTTokenizer"
}
}
},
@@ -1795,6 +4819,48 @@
}
}
},
+ "fuyu": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "gemma": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
+ }
+ }
+ },
+ "gemma2": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
+ }
+ }
+ },
+ "gemma-3": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
+ }
+ }
+ },
+ "gemma3-text": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
+ }
+ }
+ },
+ "gemma-3n-e": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
+ }
+ }
+ },
"git": {
"pkg": {
"0": {
@@ -1802,6 +4868,27 @@
}
}
},
+ "glm-4-chat": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "glm-4-0414": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "glm-4-a": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
"glm-4v-thinking": {
"pkg": {
"0": {
@@ -1823,6 +4910,13 @@
}
}
},
+ "got-ocr-2-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
"gpt2": {
"pkg": {
"0": {
@@ -1844,6 +4938,27 @@
}
}
},
+ "gpt-neox": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
+ }
+ }
+ },
+ "gpt-neox-japanese": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer"
+ }
+ }
+ },
+ "gpt-oss": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
"gpt-j": {
"pkg": {
"0": {
@@ -1851,6 +4966,34 @@
}
}
},
+ "granite": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "powermoe": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "granite-4-h": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "moe-active-shared-experts": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
"grounding-dino": {
"pkg": {
"0": {
@@ -1865,6 +5008,13 @@
}
}
},
+ "helium": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
"hubert-ls960": {
"pkg": {
"0": {
@@ -1914,6 +5064,13 @@
}
}
},
+ "jais-2-chat": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
"jamba-v0": {
"pkg": {
"0": {
@@ -1928,6 +5085,13 @@
}
}
},
+ "jetmoe": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
"kosmos-2-patch14-224": {
"pkg": {
"0": {
@@ -2026,6 +5190,13 @@
}
}
},
+ "longformer-4096": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
+ }
+ }
+ },
"long-t5-local": {
"pkg": {
"0": {
@@ -2103,6 +5274,27 @@
}
}
},
+ "max-text-01-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "stral-3-2512": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "mistral-v0": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
"mistral-3-2503": {
"pkg": {
"0": {
@@ -2110,6 +5302,13 @@
}
}
},
+ "mixtral-8x": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
"llama-3-vision": {
"pkg": {
"0": {
@@ -2138,6 +5337,20 @@
}
}
},
+ "moonshine": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
+ "hf-moshiko": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
"mpnet": {
"pkg": {
"0": {
@@ -2187,6 +5400,13 @@
}
}
},
+ "nemotron-3-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
"nllb-moe": {
"pkg": {
"0": {
@@ -2201,31 +5421,66 @@
}
}
},
- "omdet-turbo-swin-hf": {
+ "olmo-hf": {
"pkg": {
"0": {
- "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
+ "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
}
}
},
- "openai-gpt": {
+ "olmo2-1124-hf": {
"pkg": {
"0": {
- "transformers": "transformers.models.openai.tokenization_openai.OpenAIGPTTokenizer"
+ "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
}
}
},
- "opt": {
+ "olmo-3-0725": {
"pkg": {
"0": {
"transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
}
}
},
- "ovis2-hf": {
+ "olmoe-0924": {
"pkg": {
"0": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
+ "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
+ }
+ }
+ },
+ "omdet-turbo-swin-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
+ }
+ }
+ },
+ "oneformer-ade-swin": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
+ }
+ }
+ },
+ "openai-gpt": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.openai.tokenization_openai.OpenAIGPTTokenizer"
+ }
+ }
+ },
+ "opt": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "ovis2-hf": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
}
}
},
@@ -2271,6 +5526,41 @@
}
}
},
+ "persimmon": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "phi-1": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
+ "phi-3": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "phi-3-moe": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
+ },
+ "pixtral": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
"plbart": {
"pkg": {
"0": {
@@ -2278,6 +5568,20 @@
}
}
},
+ "phetnet-uncased": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.prophetnet.tokenization_prophetnet.ProphetNetTokenizer"
+ }
+ }
+ },
+ "qwen2": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
+ }
+ }
+ },
"qwen2-vl": {
"pkg": {
"0": {
@@ -2285,6 +5589,34 @@
}
}
},
+ "qwen15-moe-a": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
+ }
+ }
+ },
+ "qwen3": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
+ }
+ }
+ },
+ "qwen3-a": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
+ }
+ }
+ },
+ "qwen3-next-a": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
+ }
+ }
+ },
"qwen3-vl": {
"pkg": {
"0": {
@@ -2299,6 +5631,13 @@
}
}
},
+ "recurrentgemma": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
+ }
+ }
+ },
"reformer-crime-and-punishment": {
"pkg": {
"0": {
@@ -2376,6 +5715,13 @@
}
}
},
+ "smollm3": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
+ }
+ }
+ },
"s2t-librispeech-asr": {
"pkg": {
"0": {
@@ -2404,6 +5750,20 @@
}
}
},
+ "stablelm-4e1t": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
+ }
+ }
+ },
+ "starcoder2": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
+ }
+ }
+ },
"switch-8": {
"pkg": {
"0": {
@@ -2418,6 +5778,13 @@
}
}
},
+ "t5gemma-prefixlm": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
+ }
+ }
+ },
"tapas-finetuned-sqa": {
"pkg": {
"0": {
@@ -2585,6 +5952,13 @@
"transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
}
}
+ },
+ "zamba2": {
+ "pkg": {
+ "0": {
+ "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
+ }
+ }
}
},
"info.vit.aimv2-patch14-224-lit": {
@@ -2594,7 +5968,29 @@
"0": {
"transformers": "Aimv2Model"
}
- }
+ },
+ "tasks": [
+ "Aimv2VisionModel",
+ "Aimv2Model",
+ "Aimv2PreTrainedModel",
+ "Aimv2TextModel"
+ ]
+ }
+ },
+ "info.vit.aimv2-patch14-224": {
+ "*": {
+ "repo": "apple/aimv2-large-patch14-224",
+ "pkg": {
+ "0": {
+ "transformers": "Aimv2VisionModel"
+ }
+ },
+ "tasks": [
+ "Aimv2VisionModel",
+ "Aimv2Model",
+ "Aimv2PreTrainedModel",
+ "Aimv2TextModel"
+ ]
}
},
"info.art.albert-xx-v2": {
@@ -2604,7 +6000,17 @@
"0": {
"transformers": "AlbertModel"
}
- }
+ },
+ "tasks": [
+ "AlbertPreTrainedModel",
+ "AlbertModel",
+ "AlbertForPreTraining",
+ "AlbertForMaskedLM",
+ "AlbertForSequenceClassification",
+ "AlbertForTokenClassification",
+ "AlbertForQuestionAnswering",
+ "AlbertForMultipleChoice"
+ ]
}
},
"info.vit.align": {
@@ -2614,7 +6020,13 @@
"0": {
"transformers": "AlignModel"
}
- }
+ },
+ "tasks": [
+ "AlignPreTrainedModel",
+ "AlignTextModel",
+ "AlignVisionModel",
+ "AlignModel"
+ ]
}
},
"info.vit.altclip": {
@@ -2624,7 +6036,47 @@
"0": {
"transformers": "AltCLIPModel"
}
- }
+ },
+ "tasks": [
+ "AltCLIPPreTrainedModel",
+ "AltCLIPVisionModel",
+ "AltCLIPTextModel",
+ "AltCLIPModel"
+ ]
+ }
+ },
+ "info.stst.apertus": {
+ "*": {
+ "repo": "swiss-ai/Apertus-8B",
+ "pkg": {
+ "0": {
+ "transformers": "ApertusModel"
+ }
+ },
+ "tasks": [
+ "ApertusModel",
+ "ApertusForCausalLM",
+ "ApertusForTokenClassification",
+ "ApertusPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.afm": {
+ "*": {
+ "repo": "arcee-ai/AFM-4.5B",
+ "pkg": {
+ "0": {
+ "transformers": "ArceeModel"
+ }
+ },
+ "tasks": [
+ "ArceeForCausalLM",
+ "ArceeForQuestionAnswering",
+ "ArceeForSequenceClassification",
+ "ArceeForTokenClassification",
+ "ArceeModel",
+ "ArceePreTrainedModel"
+ ]
}
},
"info.vit.aria": {
@@ -2634,7 +6086,15 @@
"0": {
"transformers": "AriaModel"
}
- }
+ },
+ "tasks": [
+ "AriaForConditionalGeneration",
+ "AriaPreTrainedModel",
+ "AriaTextPreTrainedModel",
+ "AriaTextModel",
+ "AriaModel",
+ "AriaTextForCausalLM"
+ ]
}
},
"info.vit.ast-finetuned-audioset-10-10-0593": {
@@ -2644,7 +6104,12 @@
"0": {
"transformers": "ASTModel"
}
- }
+ },
+ "tasks": [
+ "ASTForAudioClassification",
+ "ASTModel",
+ "ASTPreTrainedModel"
+ ]
}
},
"info.stst.audio-flamingo-3-hf": {
@@ -2654,7 +6119,12 @@
"0": {
"transformers": "AudioFlamingo3ForConditionalGeneration"
}
- }
+ },
+ "tasks": [
+ "AudioFlamingo3ForConditionalGeneration",
+ "AudioFlamingo3PreTrainedModel",
+ "AudioFlamingo3Encoder"
+ ]
}
},
"info.aet.audio-flamingo-3-hf": {
@@ -2664,7 +6134,12 @@
"0": {
"transformers": "AudioFlamingo3Encoder"
}
- }
+ },
+ "tasks": [
+ "AudioFlamingo3ForConditionalGeneration",
+ "AudioFlamingo3PreTrainedModel",
+ "AudioFlamingo3Encoder"
+ ]
}
},
"info.stst.autoformer-tourism-monthly": {
@@ -2674,7 +6149,12 @@
"0": {
"transformers": "AutoformerModel"
}
- }
+ },
+ "tasks": [
+ "AutoformerForPrediction",
+ "AutoformerModel",
+ "AutoformerPreTrainedModel"
+ ]
}
},
"info.vit.aya-vision": {
@@ -2684,7 +6164,27 @@
"0": {
"transformers": "AyaVisionModel"
}
- }
+ },
+ "tasks": [
+ "AyaVisionForConditionalGeneration",
+ "AyaVisionPreTrainedModel",
+ "AyaVisionModel"
+ ]
+ }
+ },
+ "info.ssm.bamba-t-hf": {
+ "*": {
+ "repo": "ibm-fms/Bamba-9.8b-2.2T-hf",
+ "pkg": {
+ "0": {
+ "transformers": "BambaModel"
+ }
+ },
+ "tasks": [
+ "BambaModel",
+ "BambaForCausalLM",
+ "BambaPreTrainedModel"
+ ]
}
},
"info.art.bark": {
@@ -2694,7 +6194,15 @@
"0": {
"transformers": "BarkModel"
}
- }
+ },
+ "tasks": [
+ "BarkFineModel",
+ "BarkSemanticModel",
+ "BarkCoarseModel",
+ "BarkModel",
+ "BarkPreTrainedModel",
+ "BarkCausalModel"
+ ]
}
},
"info.stst.bart": {
@@ -2704,7 +6212,17 @@
"0": {
"transformers": "BartModel"
}
- }
+ },
+ "tasks": [
+ "BartForCausalLM",
+ "BartForConditionalGeneration",
+ "BartForQuestionAnswering",
+ "BartForSequenceClassification",
+ "BartModel",
+ "BartPreTrainedModel",
+ "BartPretrainedModel",
+ "PretrainedBartModel"
+ ]
}
},
"info.vit.beit-patch16-224-pt": {
@@ -2714,7 +6232,15 @@
"0": {
"transformers": "BeitModel"
}
- }
+ },
+ "tasks": [
+ "BeitForImageClassification",
+ "BeitForMaskedImageModeling",
+ "BeitForSemanticSegmentation",
+ "BeitModel",
+ "BeitPreTrainedModel",
+ "BeitBackbone"
+ ]
}
},
"info.art.bert-uncased": {
@@ -2724,7 +6250,29 @@
"0": {
"transformers": "BertModel"
}
- }
+ },
+ "file_256": [
+ "c6c6348af2cb4d5852fe51102ce39605903dbe7925c005cf8995506cc21ea914"
+ ],
+ "layer_b3": [
+ "30d7d2cc3ec9e4ba45844e005d0bbcb5887b6a0976042f73da916237dc5c4c12"
+ ],
+ "layer_256": [
+ "94fd2508680ff684eff57e4a5a8ca46bf338fc356a9cf6fe8db2b84543dd7971"
+ ],
+ "tasks": [
+ "BertForMaskedLM",
+ "BertForMultipleChoice",
+ "BertForNextSentencePrediction",
+ "BertForPreTraining",
+ "BertForQuestionAnswering",
+ "BertForSequenceClassification",
+ "BertForTokenClassification",
+ "BertLayer",
+ "BertLMHeadModel",
+ "BertModel",
+ "BertPreTrainedModel"
+ ]
}
},
"info.art.bert-for-seq-generation-l-24-bbc-encoder": {
@@ -2734,7 +6282,12 @@
"0": {
"transformers": "BertGenerationEncoder"
}
- }
+ },
+ "tasks": [
+ "BertGenerationDecoder",
+ "BertGenerationEncoder",
+ "BertGenerationPreTrainedModel"
+ ]
}
},
"info.art.bigbird-roberta": {
@@ -2744,7 +6297,19 @@
"0": {
"transformers": "BigBirdModel"
}
- }
+ },
+ "tasks": [
+ "BigBirdForCausalLM",
+ "BigBirdForMaskedLM",
+ "BigBirdForMultipleChoice",
+ "BigBirdForPreTraining",
+ "BigBirdForQuestionAnswering",
+ "BigBirdForSequenceClassification",
+ "BigBirdForTokenClassification",
+ "BigBirdLayer",
+ "BigBirdModel",
+ "BigBirdPreTrainedModel"
+ ]
}
},
"info.stst.bigbird-pegasus-arxiv": {
@@ -2754,7 +6319,15 @@
"0": {
"transformers": "BigBirdPegasusModel"
}
- }
+ },
+ "tasks": [
+ "BigBirdPegasusForCausalLM",
+ "BigBirdPegasusForConditionalGeneration",
+ "BigBirdPegasusForQuestionAnswering",
+ "BigBirdPegasusForSequenceClassification",
+ "BigBirdPegasusModel",
+ "BigBirdPegasusPreTrainedModel"
+ ]
}
},
"info.art.biogpt": {
@@ -2764,7 +6337,14 @@
"0": {
"transformers": "BioGptModel"
}
- }
+ },
+ "tasks": [
+ "BioGptForCausalLM",
+ "BioGptForTokenClassification",
+ "BioGptForSequenceClassification",
+ "BioGptModel",
+ "BioGptPreTrainedModel"
+ ]
}
},
"info.vit.bit-50": {
@@ -2774,7 +6354,28 @@
"0": {
"transformers": "BitModel"
}
- }
+ },
+ "tasks": [
+ "BitForImageClassification",
+ "BitModel",
+ "BitPreTrainedModel",
+ "BitBackbone"
+ ]
+ }
+ },
+ "info.stst.bitnet-b18-4t": {
+ "*": {
+ "repo": "microsoft/bitnet-b1.58-2B-4T",
+ "pkg": {
+ "0": {
+ "transformers": "BitNetModel"
+ }
+ },
+ "tasks": [
+ "BitNetForCausalLM",
+ "BitNetModel",
+ "BitNetPreTrainedModel"
+ ]
}
},
"info.stst.blenderbot": {
@@ -2784,7 +6385,13 @@
"0": {
"transformers": "BlenderbotModel"
}
- }
+ },
+ "tasks": [
+ "BlenderbotForCausalLM",
+ "BlenderbotForConditionalGeneration",
+ "BlenderbotModel",
+ "BlenderbotPreTrainedModel"
+ ]
}
},
"info.vit.blip-vqa": {
@@ -2794,7 +6401,16 @@
"0": {
"transformers": "BlipModel"
}
- }
+ },
+ "tasks": [
+ "BlipModel",
+ "BlipPreTrainedModel",
+ "BlipForConditionalGeneration",
+ "BlipForQuestionAnswering",
+ "BlipVisionModel",
+ "BlipTextModel",
+ "BlipForImageTextRetrieval"
+ ]
}
},
"info.vit.blip2-opt": {
@@ -2804,7 +6420,17 @@
"0": {
"transformers": "Blip2Model"
}
- }
+ },
+ "tasks": [
+ "Blip2Model",
+ "Blip2VisionModelWithProjection",
+ "Blip2QFormerModel",
+ "Blip2PreTrainedModel",
+ "Blip2ForConditionalGeneration",
+ "Blip2ForImageTextRetrieval",
+ "Blip2VisionModel",
+ "Blip2TextModelWithProjection"
+ ]
}
},
"info.stst.blip2-opt": {
@@ -2814,7 +6440,17 @@
"0": {
"transformers": "Blip2QFormerModel"
}
- }
+ },
+ "tasks": [
+ "Blip2Model",
+ "Blip2VisionModelWithProjection",
+ "Blip2QFormerModel",
+ "Blip2PreTrainedModel",
+ "Blip2ForConditionalGeneration",
+ "Blip2ForImageTextRetrieval",
+ "Blip2VisionModel",
+ "Blip2TextModelWithProjection"
+ ]
}
},
"info.art.bloom": {
@@ -2824,17 +6460,48 @@
"0": {
"transformers": "BloomModel"
}
- }
+ },
+ "tasks": [
+ "BloomForCausalLM",
+ "BloomModel",
+ "BloomPreTrainedModel",
+ "BloomForSequenceClassification",
+ "BloomForTokenClassification",
+ "BloomForQuestionAnswering"
+ ]
}
},
- "info.vit.bridgetower": {
+ "info.vit.blt": {
"*": {
- "repo": "BridgeTower/bridgetower-base",
+ "repo": "facebook/blt",
"pkg": {
"0": {
- "transformers": "BridgeTowerModel"
+ "transformers": "BltModel"
}
- }
+ },
+ "tasks": [
+ "BltPreTrainedModel",
+ "BltModel",
+ "BltPatcher",
+ "BltForCausalLM"
+ ]
+ }
+ },
+ "info.vit.bridgetower": {
+ "*": {
+ "repo": "BridgeTower/bridgetower-base",
+ "pkg": {
+ "0": {
+ "transformers": "BridgeTowerModel"
+ }
+ },
+ "tasks": [
+ "BridgeTowerForContrastiveLearning",
+ "BridgeTowerForImageAndTextRetrieval",
+ "BridgeTowerForMaskedLM",
+ "BridgeTowerModel",
+ "BridgeTowerPreTrainedModel"
+ ]
}
},
"info.art.bros-uncased": {
@@ -2844,7 +6511,14 @@
"0": {
"transformers": "BrosModel"
}
- }
+ },
+ "tasks": [
+ "BrosPreTrainedModel",
+ "BrosModel",
+ "BrosForTokenClassification",
+ "BrosSpadeEEForTokenClassification",
+ "BrosSpadeELForTokenClassification"
+ ]
}
},
"info.art.camembert": {
@@ -2854,7 +6528,17 @@
"0": {
"transformers": "CamembertModel"
}
- }
+ },
+ "tasks": [
+ "CamembertForCausalLM",
+ "CamembertForMaskedLM",
+ "CamembertForMultipleChoice",
+ "CamembertForQuestionAnswering",
+ "CamembertForSequenceClassification",
+ "CamembertForTokenClassification",
+ "CamembertModel",
+ "CamembertPreTrainedModel"
+ ]
}
},
"info.art.canine-s": {
@@ -2864,7 +6548,32 @@
"0": {
"transformers": "CanineModel"
}
- }
+ },
+ "tasks": [
+ "CanineForMultipleChoice",
+ "CanineForQuestionAnswering",
+ "CanineForSequenceClassification",
+ "CanineForTokenClassification",
+ "CanineLayer",
+ "CanineModel",
+ "CaninePreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.chameleon": {
+ "*": {
+ "repo": "meta/chameleon-7B",
+ "pkg": {
+ "0": {
+ "transformers": "ChameleonModel"
+ }
+ },
+ "tasks": [
+ "ChameleonForConditionalGeneration",
+ "ChameleonModel",
+ "ChameleonPreTrainedModel",
+ "ChameleonVQVAE"
+ ]
}
},
"info.vit.chinese-clip-vit-patch16": {
@@ -2874,7 +6583,13 @@
"0": {
"transformers": "ChineseCLIPModel"
}
- }
+ },
+ "tasks": [
+ "ChineseCLIPModel",
+ "ChineseCLIPPreTrainedModel",
+ "ChineseCLIPTextModel",
+ "ChineseCLIPVisionModel"
+ ]
}
},
"info.vit.clap-htsat-fused": {
@@ -2884,7 +6599,27 @@
"0": {
"transformers": "ClapModel"
}
- }
+ },
+ "file_256": [
+ "c92b5a2bee69ff5dd05820d9e0a5cddbc9c9b9dd19a6cb3214f0cf4f29a4d1b0",
+ "ae69f555e7f1a2333b8e684c9fa8233f44a47bbadf76d484f941b74f74d2753d"
+ ],
+ "layer_b3": [
+ "a4d26450ac399d51b9abbe37859615bb02a5cbf63521da4c7cdc549d04a2872c",
+ "ddf310d8eb2d4e3f61e605978675a9d3a748cad9406b9aee8335eae013e77573"
+ ],
+ "layer_256": [
+ "843ba86000971d6067bfc4f3ed6dd01bd6f6726188aaa15d86b05554f4fe8481",
+ "27529e30442d030a28badf9d62710f4b74e38e9c4424ed169c7e0ac072f5a771"
+ ],
+ "tasks": [
+ "ClapModel",
+ "ClapPreTrainedModel",
+ "ClapTextModel",
+ "ClapTextModelWithProjection",
+ "ClapAudioModel",
+ "ClapAudioModelWithProjection"
+ ]
}
},
"info.vit.clip-vit-patch32": {
@@ -2894,7 +6629,16 @@
"0": {
"transformers": "CLIPModel"
}
- }
+ },
+ "tasks": [
+ "CLIPModel",
+ "CLIPPreTrainedModel",
+ "CLIPTextModel",
+ "CLIPTextModelWithProjection",
+ "CLIPVisionModel",
+ "CLIPVisionModelWithProjection",
+ "CLIPForImageClassification"
+ ]
}
},
"info.vit.clipseg-rd64": {
@@ -2904,7 +6648,14 @@
"0": {
"transformers": "CLIPSegModel"
}
- }
+ },
+ "tasks": [
+ "CLIPSegModel",
+ "CLIPSegPreTrainedModel",
+ "CLIPSegTextModel",
+ "CLIPSegVisionModel",
+ "CLIPSegForImageSegmentation"
+ ]
}
},
"info.vit.clvp-dev": {
@@ -2914,7 +6665,33 @@
"0": {
"transformers": "ClvpModelForConditionalGeneration"
}
- }
+ },
+ "tasks": [
+ "ClvpModelForConditionalGeneration",
+ "ClvpForCausalLM",
+ "ClvpModel",
+ "ClvpPreTrainedModel",
+ "ClvpEncoder",
+ "ClvpDecoder"
+ ]
+ }
+ },
+ "info.stst.llama-2-hf": {
+ "*": {
+ "repo": "meta-llama/Llama-2-7b-hf",
+ "pkg": {
+ "0": {
+ "transformers": "LlamaModel"
+ }
+ },
+ "tasks": [
+ "LlamaForCausalLM",
+ "LlamaModel",
+ "LlamaPreTrainedModel",
+ "LlamaForSequenceClassification",
+ "LlamaForQuestionAnswering",
+ "LlamaForTokenClassification"
+ ]
}
},
"info.art.codegen-mono": {
@@ -2924,7 +6701,27 @@
"0": {
"transformers": "CodeGenModel"
}
- }
+ },
+ "tasks": [
+ "CodeGenForCausalLM",
+ "CodeGenModel",
+ "CodeGenPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.c4ai-command-r-v01": {
+ "*": {
+ "repo": "CohereForAI/c4ai-command-r-v01",
+ "pkg": {
+ "0": {
+ "transformers": "CohereModel"
+ }
+ },
+ "tasks": [
+ "CohereForCausalLM",
+ "CohereModel",
+ "CoherePreTrainedModel"
+ ]
}
},
"info.vit.command-a-vision-07-2025": {
@@ -2934,7 +6731,12 @@
"0": {
"transformers": "Cohere2VisionModel"
}
- }
+ },
+ "tasks": [
+ "Cohere2VisionForConditionalGeneration",
+ "Cohere2VisionPreTrainedModel",
+ "Cohere2VisionModel"
+ ]
}
},
"info.detr.conditional-detr-resnet-50": {
@@ -2944,7 +6746,13 @@
"0": {
"transformers": "ConditionalDetrModel"
}
- }
+ },
+ "tasks": [
+ "ConditionalDetrForObjectDetection",
+ "ConditionalDetrForSegmentation",
+ "ConditionalDetrModel",
+ "ConditionalDetrPreTrainedModel"
+ ]
}
},
"info.art.conv-bert": {
@@ -2954,7 +6762,17 @@
"0": {
"transformers": "ConvBertModel"
}
- }
+ },
+ "tasks": [
+ "ConvBertForMaskedLM",
+ "ConvBertForMultipleChoice",
+ "ConvBertForQuestionAnswering",
+ "ConvBertForSequenceClassification",
+ "ConvBertForTokenClassification",
+ "ConvBertLayer",
+ "ConvBertModel",
+ "ConvBertPreTrainedModel"
+ ]
}
},
"info.vit.convnext-224": {
@@ -2964,7 +6782,13 @@
"0": {
"transformers": "ConvNextModel"
}
- }
+ },
+ "tasks": [
+ "ConvNextForImageClassification",
+ "ConvNextModel",
+ "ConvNextPreTrainedModel",
+ "ConvNextBackbone"
+ ]
}
},
"info.vit.convnextv2-224": {
@@ -2974,7 +6798,45 @@
"0": {
"transformers": "ConvNextV2Model"
}
- }
+ },
+ "tasks": [
+ "ConvNextV2ForImageClassification",
+ "ConvNextV2Model",
+ "ConvNextV2PreTrainedModel",
+ "ConvNextV2Backbone"
+ ]
+ }
+ },
+ "info.stst.cpm-ant": {
+ "*": {
+ "repo": "openbmb/cpm-ant-10b",
+ "pkg": {
+ "0": {
+ "transformers": "CpmAntModel"
+ }
+ },
+ "tasks": [
+ "CpmAntForCausalLM",
+ "CpmAntModel",
+ "CpmAntPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.csm": {
+ "*": {
+ "repo": "sesame/csm-1b",
+ "pkg": {
+ "0": {
+ "transformers": "CsmForConditionalGeneration"
+ }
+ },
+ "tasks": [
+ "CsmPreTrainedModel",
+ "CsmBackboneModel",
+ "CsmDepthDecoderModel",
+ "CsmDepthDecoderForCausalLM",
+ "CsmForConditionalGeneration"
+ ]
}
},
"info.art.ctrl": {
@@ -2984,7 +6846,13 @@
"0": {
"transformers": "CTRLModel"
}
- }
+ },
+ "tasks": [
+ "CTRLForSequenceClassification",
+ "CTRLLMHeadModel",
+ "CTRLModel",
+ "CTRLPreTrainedModel"
+ ]
}
},
"info.vit.cvt-13": {
@@ -2994,7 +6862,12 @@
"0": {
"transformers": "CvtModel"
}
- }
+ },
+ "tasks": [
+ "CvtForImageClassification",
+ "CvtModel",
+ "CvtPreTrainedModel"
+ ]
}
},
"info.art.cwm": {
@@ -3004,7 +6877,12 @@
"0": {
"transformers": "CwmModel"
}
- }
+ },
+ "tasks": [
+ "CwmPreTrainedModel",
+ "CwmModel",
+ "CwmForCausalLM"
+ ]
}
},
"info.detr.dfine-x-coco": {
@@ -3014,7 +6892,12 @@
"0": {
"transformers": "DFineModel"
}
- }
+ },
+ "tasks": [
+ "DFineModel",
+ "DFinePreTrainedModel",
+ "DFineForObjectDetection"
+ ]
}
},
"info.detr.dab-detr": {
@@ -3024,7 +6907,12 @@
"0": {
"transformers": "DabDetrModel"
}
- }
+ },
+ "tasks": [
+ "DabDetrForObjectDetection",
+ "DabDetrModel",
+ "DabDetrPreTrainedModel"
+ ]
}
},
"info.gan.dac": {
@@ -3034,7 +6922,11 @@
"0": {
"transformers": "DacModel"
}
- }
+ },
+ "tasks": [
+ "DacModel",
+ "DacPreTrainedModel"
+ ]
}
},
"info.aet.data2vec-audio-960h": {
@@ -3044,7 +6936,15 @@
"0": {
"transformers": "Data2VecAudioModel"
}
- }
+ },
+ "tasks": [
+ "Data2VecAudioForAudioFrameClassification",
+ "Data2VecAudioForCTC",
+ "Data2VecAudioForSequenceClassification",
+ "Data2VecAudioForXVector",
+ "Data2VecAudioModel",
+ "Data2VecAudioPreTrainedModel"
+ ]
}
},
"info.art.data2vec-text": {
@@ -3054,7 +6954,17 @@
"0": {
"transformers": "Data2VecTextModel"
}
- }
+ },
+ "tasks": [
+ "Data2VecTextForCausalLM",
+ "Data2VecTextForMaskedLM",
+ "Data2VecTextForMultipleChoice",
+ "Data2VecTextForQuestionAnswering",
+ "Data2VecTextForSequenceClassification",
+ "Data2VecTextForTokenClassification",
+ "Data2VecTextModel",
+ "Data2VecTextPreTrainedModel"
+ ]
}
},
"info.vit.data2vec-vision": {
@@ -3064,7 +6974,28 @@
"0": {
"transformers": "Data2VecVisionModel"
}
- }
+ },
+ "tasks": [
+ "Data2VecVisionForImageClassification",
+ "Data2VecVisionForSemanticSegmentation",
+ "Data2VecVisionModel",
+ "Data2VecVisionPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.dbrx": {
+ "*": {
+ "repo": "databricks/dbrx-instruct",
+ "pkg": {
+ "0": {
+ "transformers": "DbrxModel"
+ }
+ },
+ "tasks": [
+ "DbrxForCausalLM",
+ "DbrxModel",
+ "DbrxPreTrainedModel"
+ ]
}
},
"info.art.deberta": {
@@ -3074,7 +7005,15 @@
"0": {
"transformers": "DebertaModel"
}
- }
+ },
+ "tasks": [
+ "DebertaForMaskedLM",
+ "DebertaForQuestionAnswering",
+ "DebertaForSequenceClassification",
+ "DebertaForTokenClassification",
+ "DebertaModel",
+ "DebertaPreTrainedModel"
+ ]
}
},
"info.art.deberta-v2-x": {
@@ -3084,7 +7023,16 @@
"0": {
"transformers": "DebertaV2Model"
}
- }
+ },
+ "tasks": [
+ "DebertaV2ForMaskedLM",
+ "DebertaV2ForMultipleChoice",
+ "DebertaV2ForQuestionAnswering",
+ "DebertaV2ForSequenceClassification",
+ "DebertaV2ForTokenClassification",
+ "DebertaV2Model",
+ "DebertaV2PreTrainedModel"
+ ]
}
},
"info.art.decision-transformer-gym-hopper": {
@@ -3094,7 +7042,61 @@
"0": {
"transformers": "DecisionTransformerModel"
}
- }
+ },
+ "tasks": [
+ "DecisionTransformerGPT2Model",
+ "DecisionTransformerGPT2PreTrainedModel",
+ "DecisionTransformerModel",
+ "DecisionTransformerPreTrainedModel"
+ ]
+ }
+ },
+ "info.moe.deepseek-v2-lite": {
+ "*": {
+ "repo": "deepseek-ai/DeepSeek-V2-Lite",
+ "pkg": {
+ "0": {
+ "transformers": "DeepseekV2Model"
+ }
+ },
+ "tasks": [
+ "DeepseekV2PreTrainedModel",
+ "DeepseekV2Model",
+ "DeepseekV2ForCausalLM",
+ "DeepseekV2ForSequenceClassification"
+ ]
+ }
+ },
+ "info.moe.deepseek-v3": {
+ "*": {
+ "repo": "bzantium/tiny-deepseek-v3",
+ "pkg": {
+ "0": {
+ "transformers": "DeepseekV3Model"
+ }
+ },
+ "tasks": [
+ "DeepseekV3PreTrainedModel",
+ "DeepseekV3Model",
+ "DeepseekV3ForCausalLM",
+ "DeepseekV3ForSequenceClassification",
+ "DeepseekV3ForTokenClassification"
+ ]
+ }
+ },
+ "info.vit.deepseek-vl-chat": {
+ "*": {
+ "repo": "deepseek-community/deepseek-vl-1.3b-chat",
+ "pkg": {
+ "0": {
+ "transformers": "DeepseekVLModel"
+ }
+ },
+ "tasks": [
+ "DeepseekVLPreTrainedModel",
+ "DeepseekVLModel",
+ "DeepseekVLForConditionalGeneration"
+ ]
}
},
"info.detr.deformable-detr": {
@@ -3104,7 +7106,12 @@
"0": {
"transformers": "DeformableDetrModel"
}
- }
+ },
+ "tasks": [
+ "DeformableDetrForObjectDetection",
+ "DeformableDetrModel",
+ "DeformableDetrPreTrainedModel"
+ ]
}
},
"info.vit.deit-distilled-patch16-224": {
@@ -3114,7 +7121,14 @@
"0": {
"transformers": "DeiTModel"
}
- }
+ },
+ "tasks": [
+ "DeiTForImageClassification",
+ "DeiTForImageClassificationWithTeacher",
+ "DeiTForMaskedImageModeling",
+ "DeiTModel",
+ "DeiTPreTrainedModel"
+ ]
}
},
"info.vit.depth": {
@@ -3124,7 +7138,12 @@
"0": {
"transformers": "DepthProModel"
}
- }
+ },
+ "tasks": [
+ "DepthProPreTrainedModel",
+ "DepthProModel",
+ "DepthProForDepthEstimation"
+ ]
}
},
"info.detr.detr-resnet-50": {
@@ -3134,7 +7153,46 @@
"0": {
"transformers": "DetrModel"
}
- }
+ },
+ "tasks": [
+ "DetrForObjectDetection",
+ "DetrForSegmentation",
+ "DetrModel",
+ "DetrPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.dia": {
+ "*": {
+ "repo": "nari-labs/Dia-1.6B",
+ "pkg": {
+ "0": {
+ "transformers": "DiaModel"
+ }
+ },
+ "tasks": [
+ "DiaModel",
+ "DiaPreTrainedModel",
+ "DiaForConditionalGeneration"
+ ]
+ }
+ },
+ "info.stst.diffllama-handcut": {
+ "*": {
+ "repo": "kajuma/DiffLlama-0.3B-handcut",
+ "pkg": {
+ "0": {
+ "transformers": "DiffLlamaModel"
+ }
+ },
+ "tasks": [
+ "DiffLlamaPreTrainedModel",
+ "DiffLlamaModel",
+ "DiffLlamaForCausalLM",
+ "DiffLlamaForSequenceClassification",
+ "DiffLlamaForQuestionAnswering",
+ "DiffLlamaForTokenClassification"
+ ]
}
},
"info.gan.dinat-in-224": {
@@ -3144,7 +7202,13 @@
"0": {
"transformers": "DinatModel"
}
- }
+ },
+ "tasks": [
+ "DinatForImageClassification",
+ "DinatModel",
+ "DinatPreTrainedModel",
+ "DinatBackbone"
+ ]
}
},
"info.vit.dinov2-patch16-224": {
@@ -3154,7 +7218,13 @@
"0": {
"transformers": "Dinov2Model"
}
- }
+ },
+ "tasks": [
+ "Dinov2ForImageClassification",
+ "Dinov2Model",
+ "Dinov2PreTrainedModel",
+ "Dinov2Backbone"
+ ]
}
},
"info.vit.dinov2-with-registers": {
@@ -3164,7 +7234,43 @@
"0": {
"transformers": "Dinov2WithRegistersModel"
}
- }
+ },
+ "tasks": [
+ "Dinov2WithRegistersPreTrainedModel",
+ "Dinov2WithRegistersModel",
+ "Dinov2WithRegistersForImageClassification",
+ "Dinov2WithRegistersBackbone"
+ ]
+ }
+ },
+ "info.vit.dinov3-convnext-pretrain-lvd": {
+ "*": {
+ "repo": "facebook/dinov3-convnext-tiny-pretrain-lvd1689m",
+ "pkg": {
+ "0": {
+ "transformers": "DINOv3ConvNextModel"
+ }
+ },
+ "tasks": [
+ "DINOv3ConvNextModel",
+ "DINOv3ConvNextPreTrainedModel",
+ "DINOv3ConvNextBackbone"
+ ]
+ }
+ },
+ "info.vit.dinov3-vits16-pretrain-lvd": {
+ "*": {
+ "repo": "facebook/dinov3-vits16-pretrain-lvd1689m",
+ "pkg": {
+ "0": {
+ "transformers": "DINOv3ViTModel"
+ }
+ },
+ "tasks": [
+ "DINOv3ViTModel",
+ "DINOv3ViTPreTrainedModel",
+ "DINOv3ViTBackbone"
+ ]
}
},
"info.art.distilbert-uncased": {
@@ -3174,7 +7280,32 @@
"0": {
"transformers": "DistilBertModel"
}
- }
+ },
+ "tasks": [
+ "DistilBertForMaskedLM",
+ "DistilBertForMultipleChoice",
+ "DistilBertForQuestionAnswering",
+ "DistilBertForSequenceClassification",
+ "DistilBertForTokenClassification",
+ "DistilBertModel",
+ "DistilBertPreTrainedModel"
+ ]
+ }
+ },
+ "info.moe.doge": {
+ "*": {
+ "repo": "SmallDoge/Doge-320M",
+ "pkg": {
+ "0": {
+ "transformers": "DogeModel"
+ }
+ },
+ "tasks": [
+ "DogeForCausalLM",
+ "DogeModel",
+ "DogePreTrainedModel",
+ "DogeForSequenceClassification"
+ ]
}
},
"info.vit.donut": {
@@ -3184,17 +7315,46 @@
"0": {
"transformers": "DonutSwinModel"
}
- }
+ },
+ "tasks": [
+ "DonutSwinModel",
+ "DonutSwinPreTrainedModel",
+ "DonutSwinForImageClassification"
+ ]
+ }
+ },
+ "info.moe.dots-llm1": {
+ "*": {
+ "repo": "rednote-hilab/dots.llm1.base",
+ "pkg": {
+ "0": {
+ "transformers": "Dots1Model"
+ }
+ },
+ "tasks": [
+ "Dots1PreTrainedModel",
+ "Dots1Model",
+ "Dots1ForCausalLM"
+ ]
}
},
- "info.art.dpr-ctx-encoder-single-nq": {
+ "info.vit.dpr-ctx-encoder-single-nq": {
"*": {
"repo": "facebook/dpr-ctx_encoder-single-nq-base",
"pkg": {
"0": {
"transformers": "DPRQuestionEncoder"
}
- }
+ },
+ "tasks": [
+ "DPRContextEncoder",
+ "DPRPretrainedContextEncoder",
+ "DPRPreTrainedModel",
+ "DPRPretrainedQuestionEncoder",
+ "DPRPretrainedReader",
+ "DPRQuestionEncoder",
+ "DPRReader"
+ ]
}
},
"info.detr.dpt": {
@@ -3204,7 +7364,13 @@
"0": {
"transformers": "DPTModel"
}
- }
+ },
+ "tasks": [
+ "DPTForDepthEstimation",
+ "DPTForSemanticSegmentation",
+ "DPTModel",
+ "DPTPreTrainedModel"
+ ]
}
},
"info.vit.edgetam1-hiera": {
@@ -3214,7 +7380,12 @@
"0": {
"transformers": "EdgeTamModel"
}
- }
+ },
+ "tasks": [
+ "EdgeTamModel",
+ "EdgeTamVisionModel",
+ "EdgeTamPreTrainedModel"
+ ]
}
},
"info.vit.edgetam": {
@@ -3224,8 +7395,43 @@
"0": {
"transformers": "EdgeTamVideoModel"
}
- }
- }
+ },
+ "tasks": [
+ "EdgeTamVideoModel",
+ "EdgeTamVideoInferenceSession",
+ "EdgeTamVideoPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.efficientloftr": {
+ "*": {
+ "repo": "zju-community/efficientloftr",
+ "pkg": {
+ "0": {
+ "transformers": "EfficientLoFTRModel"
+ }
+ },
+ "tasks": [
+ "EfficientLoFTRPreTrainedModel",
+ "EfficientLoFTRModel",
+ "EfficientLoFTRForKeypointMatching"
+ ]
+ }
+ },
+ "info.vit.efficientnet-b7": {
+ "*": {
+ "repo": "google/efficientnet-b7",
+ "pkg": {
+ "0": {
+ "transformers": "EfficientNetModel"
+ }
+ },
+ "tasks": [
+ "EfficientNetForImageClassification",
+ "EfficientNetModel",
+ "EfficientNetPreTrainedModel"
+ ]
+ }
},
"info.art.electra-discriminator": {
"*": {
@@ -3234,7 +7440,36 @@
"0": {
"transformers": "ElectraModel"
}
- }
+ },
+ "tasks": [
+ "ElectraForCausalLM",
+ "ElectraForMaskedLM",
+ "ElectraForMultipleChoice",
+ "ElectraForPreTraining",
+ "ElectraForQuestionAnswering",
+ "ElectraForSequenceClassification",
+ "ElectraForTokenClassification",
+ "ElectraModel",
+ "ElectraPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.emu3-chat-hf": {
+ "*": {
+ "repo": "Emu3-community/Emu3-Chat-hf",
+ "pkg": {
+ "0": {
+ "transformers": "Emu3Model"
+ }
+ },
+ "tasks": [
+ "Emu3ForConditionalGeneration",
+ "Emu3ForCausalLM",
+ "Emu3TextModel",
+ "Emu3PreTrainedModel",
+ "Emu3VQVAE",
+ "Emu3Model"
+ ]
}
},
"info.gan.encodec": {
@@ -3244,7 +7479,11 @@
"0": {
"transformers": "EncodecModel"
}
- }
+ },
+ "tasks": [
+ "EncodecModel",
+ "EncodecPreTrainedModel"
+ ]
}
},
"info.art.ernie-3-zh": {
@@ -3254,7 +7493,49 @@
"0": {
"transformers": "ErnieModel"
}
- }
+ },
+ "tasks": [
+ "ErnieForCausalLM",
+ "ErnieForMaskedLM",
+ "ErnieForMultipleChoice",
+ "ErnieForNextSentencePrediction",
+ "ErnieForPreTraining",
+ "ErnieForQuestionAnswering",
+ "ErnieForSequenceClassification",
+ "ErnieForTokenClassification",
+ "ErnieModel",
+ "ErniePreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.ernie-45-pt": {
+ "*": {
+ "repo": "baidu/ERNIE-4.5-0.3B-PT",
+ "pkg": {
+ "0": {
+ "transformers": "Ernie4_5Model"
+ }
+ },
+ "tasks": [
+ "Ernie4_5ForCausalLM",
+ "Ernie4_5Model",
+ "Ernie4_5PreTrainedModel"
+ ]
+ }
+ },
+ "info.moe.ernie-4-a-pt": {
+ "*": {
+ "repo": "baidu/ERNIE-4.5-21B-A3B-PT",
+ "pkg": {
+ "0": {
+ "transformers": "Ernie4_5_MoeModel"
+ }
+ },
+ "tasks": [
+ "Ernie4_5_MoeForCausalLM",
+ "Ernie4_5_MoeModel",
+ "Ernie4_5_MoePreTrainedModel"
+ ]
}
},
"info.vit.ernie-4-vl-a-pt": {
@@ -3264,7 +7545,15 @@
"0": {
"transformers": "Ernie4_5_VL_MoeModel"
}
- }
+ },
+ "tasks": [
+ "Ernie4_5_VL_MoePreTrainedModel",
+ "Ernie4_5_VL_MoeForConditionalGeneration",
+ "Ernie4_5_VL_MoeModel",
+ "Ernie4_5_VL_MoeTextModel",
+ "Ernie4_5_VL_MoeVisionTransformerPretrainedModel",
+ "Ernie4_5_VL_MoeVariableResolutionResamplerModel"
+ ]
}
},
"info.aet.esm": {
@@ -3274,7 +7563,80 @@
"0": {
"transformers": "EsmModel"
}
- }
+ },
+ "tasks": [
+ "EsmForMaskedLM",
+ "EsmForSequenceClassification",
+ "EsmForTokenClassification",
+ "EsmModel",
+ "EsmPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.evolla-hf": {
+ "*": {
+ "repo": "westlake-repl/Evolla-10B-hf",
+ "pkg": {
+ "0": {
+ "transformers": "EvollaModel"
+ }
+ },
+ "tasks": [
+ "EvollaForProteinText2Text",
+ "EvollaModel",
+ "EvollaPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.exaone-4": {
+ "*": {
+ "repo": "LGAI-EXAONE/EXAONE-4.0-32B",
+ "pkg": {
+ "0": {
+ "transformers": "Exaone4Model"
+ }
+ },
+ "tasks": [
+ "Exaone4PreTrainedModel",
+ "Exaone4Model",
+ "Exaone4ForCausalLM",
+ "Exaone4ForSequenceClassification",
+ "Exaone4ForTokenClassification",
+ "Exaone4ForQuestionAnswering"
+ ]
+ }
+ },
+ "info.ssm.falcon": {
+ "*": {
+ "repo": "tiiuae/falcon-7b",
+ "pkg": {
+ "0": {
+ "transformers": "FalconModel"
+ }
+ },
+ "tasks": [
+ "FalconForCausalLM",
+ "FalconModel",
+ "FalconPreTrainedModel",
+ "FalconForSequenceClassification",
+ "FalconForTokenClassification",
+ "FalconForQuestionAnswering"
+ ]
+ }
+ },
+ "info.ssm.falconh1-t-hf": {
+ "*": {
+ "repo": "tiiuae/Falcon-H1-34B-Instruct",
+ "pkg": {
+ "0": {
+ "transformers": "FalconH1Model"
+ }
+ },
+ "tasks": [
+ "FalconH1Model",
+ "FalconH1ForCausalLM",
+ "FalconH1PreTrainedModel"
+ ]
}
},
"info.ssm.falcon-mamba": {
@@ -3284,7 +7646,13 @@
"0": {
"transformers": "FalconMambaModel"
}
- }
+ },
+ "tasks": [
+ "FalconMambaForCausalLM",
+ "FalconMambaModel",
+ "FalconMambaPreTrainedModel",
+ "FalconMambaCache"
+ ]
}
},
"info.vit.fastvlm": {
@@ -3294,7 +7662,12 @@
"0": {
"transformers": "FastVlmModel"
}
- }
+ },
+ "tasks": [
+ "FastVlmForConditionalGeneration",
+ "FastVlmModel",
+ "FastVlmPreTrainedModel"
+ ]
}
},
"info.aet.fastspeech2-conformer": {
@@ -3304,7 +7677,29 @@
"0": {
"transformers": "FastSpeech2ConformerModel"
}
- }
+ },
+ "tasks": [
+ "FastSpeech2ConformerWithHifiGan",
+ "FastSpeech2ConformerHifiGan",
+ "FastSpeech2ConformerModel",
+ "FastSpeech2ConformerPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.fastspeech2-conformer": {
+ "*": {
+ "repo": "espnet/fastspeech2_conformer",
+ "pkg": {
+ "0": {
+ "transformers": "FastSpeech2ConformerWithHifiGan"
+ }
+ },
+ "tasks": [
+ "FastSpeech2ConformerWithHifiGan",
+ "FastSpeech2ConformerHifiGan",
+ "FastSpeech2ConformerModel",
+ "FastSpeech2ConformerPreTrainedModel"
+ ]
}
},
"info.art.flaubert-uncased": {
@@ -3314,7 +7709,51 @@
"0": {
"transformers": "FlaubertModel"
}
- }
+ },
+ "tasks": [
+ "FlaubertForMultipleChoice",
+ "FlaubertForQuestionAnswering",
+ "FlaubertForQuestionAnsweringSimple",
+ "FlaubertForSequenceClassification",
+ "FlaubertForTokenClassification",
+ "FlaubertModel",
+ "FlaubertWithLMHeadModel",
+ "FlaubertPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.flava": {
+ "*": {
+ "repo": "facebook/flava-full",
+ "pkg": {
+ "0": {
+ "transformers": "FlavaModel"
+ }
+ },
+ "tasks": [
+ "FlavaForPreTraining",
+ "FlavaImageCodebook",
+ "FlavaImageModel",
+ "FlavaModel",
+ "FlavaMultimodalModel",
+ "FlavaPreTrainedModel",
+ "FlavaTextModel"
+ ]
+ }
+ },
+ "info.moe.flexolmo-7x-1t": {
+ "*": {
+ "repo": "allenai/FlexOlmo-7x7B-1T",
+ "pkg": {
+ "0": {
+ "transformers": "FlexOlmoModel"
+ }
+ },
+ "tasks": [
+ "FlexOlmoForCausalLM",
+ "FlexOlmoModel",
+ "FlexOlmoPreTrainedModel"
+ ]
}
},
"info.vit.florence-2": {
@@ -3324,7 +7763,14 @@
"0": {
"transformers": "Florence2Model"
}
- }
+ },
+ "tasks": [
+ "Florence2Model",
+ "Florence2ForConditionalGeneration",
+ "Florence2PreTrainedModel",
+ "Florence2VisionBackbone",
+ "Florence2VisionPreTrainedModel"
+ ]
}
},
"info.art.fnet": {
@@ -3334,7 +7780,19 @@
"0": {
"transformers": "FNetModel"
}
- }
+ },
+ "tasks": [
+ "FNetForMaskedLM",
+ "FNetForMultipleChoice",
+ "FNetForNextSentencePrediction",
+ "FNetForPreTraining",
+ "FNetForQuestionAnswering",
+ "FNetForSequenceClassification",
+ "FNetForTokenClassification",
+ "FNetLayer",
+ "FNetModel",
+ "FNetPreTrainedModel"
+ ]
}
},
"info.vit.focalnet": {
@@ -3344,7 +7802,14 @@
"0": {
"transformers": "FocalNetModel"
}
- }
+ },
+ "tasks": [
+ "FocalNetForImageClassification",
+ "FocalNetForMaskedImageModeling",
+ "FocalNetBackbone",
+ "FocalNetModel",
+ "FocalNetPreTrainedModel"
+ ]
}
},
"info.stst.wmt19-en-ru": {
@@ -3354,7 +7819,12 @@
"0": {
"transformers": "FSMTModel"
}
- }
+ },
+ "tasks": [
+ "FSMTForConditionalGeneration",
+ "FSMTModel",
+ "PretrainedFSMTModel"
+ ]
}
},
"info.aet.funnel": {
@@ -3364,997 +7834,2887 @@
"0": {
"transformers": "FunnelModel"
}
- }
+ },
+ "tasks": [
+ "FunnelBaseModel",
+ "FunnelForMaskedLM",
+ "FunnelForMultipleChoice",
+ "FunnelForPreTraining",
+ "FunnelForQuestionAnswering",
+ "FunnelForSequenceClassification",
+ "FunnelForTokenClassification",
+ "FunnelModel",
+ "FunnelPreTrainedModel"
+ ]
}
},
- "info.vit.git": {
+ "info.vit.fuyu": {
"*": {
- "repo": "microsoft/git-base",
+ "repo": "adept/fuyu-8b",
"pkg": {
"0": {
- "transformers": "GitModel"
+ "transformers": "FuyuModel"
}
- }
+ },
+ "tasks": [
+ "FuyuForCausalLM",
+ "FuyuPreTrainedModel",
+ "FuyuModel"
+ ]
}
},
- "info.vit.glm-4v-thinking": {
+ "info.stst.gemma": {
"*": {
- "repo": "zai-org/GLM-4.1V-9B-Thinking",
+ "repo": "google/gemma-7b",
"pkg": {
"0": {
- "transformers": "Glm46VModel"
+ "transformers": "GemmaModel"
}
- }
+ },
+ "file_256": [
+ "01676b4c6e765f737a5e9854a315de3887e939c370cae116d505777729099a68"
+ ],
+ "layer_b3": [
+ "438d82c867240f194a4e15798eef2886a911c8f57fa2d9f4ffad1d56e7bd1ccf",
+ "1de38e09f5f2c5345de48b8cd4dddcfff3e341cc0059752446e186b3863f0981"
+ ],
+ "layer_256": [
+ "e4835a72d582b4ae066d6ff0519f2ee9f8b21fb02e8c28d8eaa317f8d1e9ea75",
+ "1657c7180b48672004f4463308dfdd56d92eedeb23d1408ea766985ca208e5aa"
+ ],
+ "tasks": [
+ "GemmaModel",
+ "GemmaForCausalLM",
+ "GemmaForSequenceClassification",
+ "GemmaForTokenClassification",
+ "GemmaPreTrainedModel"
+ ]
}
},
- "info.vit.glm-4v": {
+ "info.stst.gemma2": {
"*": {
- "repo": "zai-org/GLM-4.5V",
+ "repo": "google/gemma-2-9b",
"pkg": {
"0": {
- "transformers": "Glm4vMoeModel"
+ "transformers": "Gemma2Model"
}
- }
+ },
+ "file_256": [
+ "e909230aabafad02d097c7dc02f2ae062b4e6b0593477c1f07679d277e09ce71",
+ "d61628bc793240439e608c5ae744f55ec8770f684abb63602648a24cb6da60bc"
+ ],
+ "layer_b3": [
+ "55a3c812ac0832d154867f5927365bcc776926e48e65f7f35a81fc11f4bb81da",
+ "543572889beb25cad83a43ce70cdd255d2c82951d6595e8c97ff62fd05871c99"
+ ],
+ "layer_256": [
+ "a0d820c39578cf888f398579d9a00d69b31c81e049795ba70008dad8fe5b3a33",
+ "abc83b04a04467579ea1952a7efbdd252b8641ac0e2a6a9be2a5a73e371111d6"
+ ],
+ "tasks": [
+ "Gemma2ForCausalLM",
+ "Gemma2Model",
+ "Gemma2PreTrainedModel",
+ "Gemma2ForSequenceClassification",
+ "Gemma2ForTokenClassification"
+ ]
}
},
- "info.stst.glm-asr-nano-2512": {
+ "info.vit.gemma-3": {
"*": {
- "repo": "zai-org/GLM-ASR-Nano-2512",
+ "repo": "google/gemma-3-4b-it",
"pkg": {
"0": {
- "transformers": "GlmAsrForConditionalGeneration"
+ "transformers": "Gemma3Model"
}
- }
+ },
+ "tasks": [
+ "Gemma3PreTrainedModel",
+ "Gemma3TextModel",
+ "Gemma3ForCausalLM",
+ "Gemma3ForConditionalGeneration",
+ "Gemma3Model",
+ "Gemma3ForSequenceClassification",
+ "Gemma3TextForSequenceClassification"
+ ]
}
},
- "info.vit.glpn-kitti": {
+ "info.stst.gemma3-text": {
"*": {
- "repo": "vinvino02/glpn-kitti",
+ "repo": "google/gemma-3-12b-it",
"pkg": {
"0": {
- "transformers": "GLPNModel"
+ "transformers": "Gemma3TextModel"
}
- }
+ },
+ "tasks": [
+ "Gemma3PreTrainedModel",
+ "Gemma3TextModel",
+ "Gemma3ForCausalLM",
+ "Gemma3ForConditionalGeneration",
+ "Gemma3Model",
+ "Gemma3ForSequenceClassification",
+ "Gemma3TextForSequenceClassification"
+ ]
}
},
- "info.art.gpt2": {
+ "info.vit.gemma-3n-e": {
"*": {
- "repo": "openai-community/gpt2",
+ "repo": "google/gemma-3n-E4B",
"pkg": {
"0": {
- "transformers": "GPT2Model"
+ "transformers": "Gemma3nModel"
}
- }
+ },
+ "tasks": [
+ "Gemma3nAudioEncoder",
+ "Gemma3nForCausalLM",
+ "Gemma3nForConditionalGeneration",
+ "Gemma3nModel",
+ "Gemma3nPreTrainedModel",
+ "Gemma3nTextModel"
+ ]
}
},
- "info.art.gpt-bigcode-santacoder": {
+ "info.art.gemma-3n-e": {
"*": {
- "repo": "bigcode/gpt_bigcode-santacoder",
+ "repo": "google/gemma-3n-E4B",
"pkg": {
"0": {
- "transformers": "GPTBigCodeModel"
+ "transformers": "Gemma3nAudioEncoder"
}
- }
+ },
+ "tasks": [
+ "Gemma3nAudioEncoder",
+ "Gemma3nForCausalLM",
+ "Gemma3nForConditionalGeneration",
+ "Gemma3nModel",
+ "Gemma3nPreTrainedModel",
+ "Gemma3nTextModel"
+ ]
}
},
- "info.art.gpt-neo": {
+ "info.stst.gemma-3n-e": {
"*": {
- "repo": "EleutherAI/gpt-neo-1.3B",
+ "repo": "google/gemma-3n-E4B",
"pkg": {
"0": {
- "transformers": "GPTNeoModel"
+ "transformers": "Gemma3nTextModel"
}
- }
+ },
+ "tasks": [
+ "Gemma3nAudioEncoder",
+ "Gemma3nForCausalLM",
+ "Gemma3nForConditionalGeneration",
+ "Gemma3nModel",
+ "Gemma3nPreTrainedModel",
+ "Gemma3nTextModel"
+ ]
}
},
- "info.art.gpt-j": {
+ "info.vit.git": {
"*": {
- "repo": "EleutherAI/gpt-j-6B",
+ "repo": "microsoft/git-base",
"pkg": {
"0": {
- "transformers": "GPTJModel"
+ "transformers": "GitModel"
}
- }
+ },
+ "tasks": [
+ "GitForCausalLM",
+ "GitModel",
+ "GitPreTrainedModel",
+ "GitVisionModel"
+ ]
}
},
- "info.vit.llava-v1-mistral-hf": {
+ "info.stst.glm-4-chat": {
"*": {
- "repo": "llava-hf/llava-v1.6-mistral-7b-hf",
+ "repo": "zai-org/glm-4-9b-chat",
"pkg": {
"0": {
- "transformers": "LlavaNextModel"
+ "transformers": "GlmModel"
}
- }
+ },
+ "tasks": [
+ "GlmPreTrainedModel",
+ "GlmModel",
+ "GlmForCausalLM",
+ "GlmForSequenceClassification",
+ "GlmForTokenClassification"
+ ]
}
},
- "info.detr.grounding-dino": {
+ "info.stst.glm-4-0414": {
"*": {
- "repo": "IDEA-Research/grounding-dino-tiny",
+ "repo": "zai-org/GLM-4-9B-0414",
"pkg": {
"0": {
- "transformers": "GroundingDinoModel"
+ "transformers": "Glm4Model"
}
- }
+ },
+ "tasks": [
+ "Glm4PreTrainedModel",
+ "Glm4Model",
+ "Glm4ForCausalLM",
+ "Glm4ForSequenceClassification",
+ "Glm4ForTokenClassification"
+ ]
}
},
- "info.vit.groupvit-gcc-yfcc": {
+ "info.vit.glm-4v-thinking": {
"*": {
- "repo": "nvidia/groupvit-gcc-yfcc",
+ "repo": "zai-org/GLM-4.1V-9B-Thinking",
"pkg": {
"0": {
- "transformers": "GroupViTModel"
+ "transformers": "Glm46VModel"
}
- }
+ },
+ "tasks": [
+ "Glm46VModel",
+ "Glm46VPreTrainedModel",
+ "Glm46VForConditionalGeneration"
+ ]
}
},
- "info.vit.dfine-x-coco": {
+ "info.moe.glm-4-a": {
"*": {
- "repo": "ustc-community/dfine_x_coco",
+ "repo": "zai-org/GLM-4.5-Air",
"pkg": {
"0": {
- "transformers": "HGNetV2Backbone"
+ "transformers": "Glm4MoeModel"
}
- }
+ },
+ "tasks": [
+ "Glm4MoePreTrainedModel",
+ "Glm4MoeModel",
+ "Glm4MoeForCausalLM"
+ ]
}
},
- "info.vit.hiera-224": {
+ "info.vit.glm-4v": {
"*": {
- "repo": "facebook/hiera-base-224-hf",
+ "repo": "zai-org/GLM-4.5V",
"pkg": {
"0": {
- "transformers": "HieraModel"
+ "transformers": "Glm4vMoeModel"
}
- }
+ },
+ "tasks": [
+ "Glm4vMoeForConditionalGeneration",
+ "Glm4vMoeModel",
+ "Glm4vMoePreTrainedModel",
+ "Glm4vMoeTextModel",
+ "Glm4vMoeVisionModel"
+ ]
}
},
- "info.aet.hubert-ls960": {
+ "info.moe.glm-4v": {
"*": {
- "repo": "facebook/hubert-base-ls960",
+ "repo": "zai-org/GLM-4.5V",
"pkg": {
"0": {
- "transformers": "HubertModel"
+ "transformers": "Glm4vMoeTextModel"
}
- }
+ },
+ "tasks": [
+ "Glm4vMoeForConditionalGeneration",
+ "Glm4vMoeModel",
+ "Glm4vMoePreTrainedModel",
+ "Glm4vMoeTextModel",
+ "Glm4vMoeVisionModel"
+ ]
}
},
- "info.art.ibert-roberta": {
+ "info.stst.glm-4v-thinking": {
"*": {
- "repo": "kssteven/ibert-roberta-base",
+ "repo": "zai-org/GLM-4.1V-9B-Thinking",
"pkg": {
"0": {
- "transformers": "IBertModel"
+ "transformers": "Glm4vTextModel"
}
- }
+ },
+ "tasks": [
+ "Glm4vForConditionalGeneration",
+ "Glm4vModel",
+ "Glm4vPreTrainedModel",
+ "Glm4vTextModel",
+ "Glm4vVisionModel"
+ ]
}
},
- "info.vit.idefics": {
+ "info.stst.glm-asr-nano-2512": {
"*": {
- "repo": "HuggingFaceM4/idefics-9b",
+ "repo": "zai-org/GLM-ASR-Nano-2512",
"pkg": {
"0": {
- "transformers": "IdeficsModel"
+ "transformers": "GlmAsrForConditionalGeneration"
}
- }
+ },
+ "tasks": [
+ "GlmAsrEncoder",
+ "GlmAsrForConditionalGeneration",
+ "GlmAsrPreTrainedModel"
+ ]
}
},
- "info.vit.idefics2": {
+ "info.vit.glpn-kitti": {
"*": {
- "repo": "HuggingFaceM4/idefics2-8b",
+ "repo": "vinvino02/glpn-kitti",
"pkg": {
"0": {
- "transformers": "Idefics2Model"
+ "transformers": "GLPNModel"
}
- }
+ },
+ "tasks": [
+ "GLPNForDepthEstimation",
+ "GLPNLayer",
+ "GLPNModel",
+ "GLPNPreTrainedModel"
+ ]
}
},
- "info.vit.idefics3-llama3": {
+ "info.vit.got-ocr-2-hf": {
"*": {
- "repo": "HuggingFaceM4/Idefics3-8B-Llama3",
+ "repo": "stepfun-ai/GOT-OCR-2.0-hf",
"pkg": {
"0": {
- "transformers": "Idefics3Model"
+ "transformers": "GotOcr2Model"
}
- }
+ },
+ "tasks": [
+ "GotOcr2PreTrainedModel",
+ "GotOcr2Model",
+ "GotOcr2ForConditionalGeneration"
+ ]
}
},
- "info.vit.siglip-patch16-224": {
+ "info.art.gpt2": {
"*": {
- "repo": "google/siglip-base-patch16-224",
+ "repo": "openai-community/gpt2",
"pkg": {
"0": {
- "transformers": "Idefics3VisionTransformer"
+ "transformers": "GPT2Model"
}
- }
+ },
+ "tasks": [
+ "GPT2DoubleHeadsModel",
+ "GPT2ForQuestionAnswering",
+ "GPT2ForSequenceClassification",
+ "GPT2ForTokenClassification",
+ "GPT2LMHeadModel",
+ "GPT2Model",
+ "GPT2PreTrainedModel"
+ ]
}
},
- "info.vit.ijepa-vith14": {
+ "info.art.gpt-bigcode-santacoder": {
"*": {
- "repo": "facebook/ijepa_vith14_1k",
+ "repo": "bigcode/gpt_bigcode-santacoder",
"pkg": {
"0": {
- "transformers": "IJepaModel"
+ "transformers": "GPTBigCodeModel"
}
- }
+ },
+ "tasks": [
+ "GPTBigCodeForSequenceClassification",
+ "GPTBigCodeForTokenClassification",
+ "GPTBigCodeForCausalLM",
+ "GPTBigCodeModel",
+ "GPTBigCodePreTrainedModel"
+ ]
}
},
- "info.art.imagegpt": {
+ "info.art.gpt-neo": {
"*": {
- "repo": "openai/imagegpt-small",
+ "repo": "EleutherAI/gpt-neo-1.3B",
"pkg": {
"0": {
- "transformers": "ImageGPTModel"
+ "transformers": "GPTNeoModel"
}
- }
+ },
+ "tasks": [
+ "GPTNeoForCausalLM",
+ "GPTNeoForQuestionAnswering",
+ "GPTNeoForSequenceClassification",
+ "GPTNeoForTokenClassification",
+ "GPTNeoModel",
+ "GPTNeoPreTrainedModel"
+ ]
}
},
- "info.vit.blip-flan-t5": {
+ "info.stst.gpt-neox": {
"*": {
- "repo": "Salesforce/instructblip-flan-t5-xl",
+ "repo": "EleutherAI/gpt-neox-20b",
"pkg": {
"0": {
- "transformers": "InstructBlipModel"
+ "transformers": "GPTNeoXModel"
}
- }
+ },
+ "tasks": [
+ "GPTNeoXForCausalLM",
+ "GPTNeoXForQuestionAnswering",
+ "GPTNeoXForSequenceClassification",
+ "GPTNeoXForTokenClassification",
+ "GPTNeoXLayer",
+ "GPTNeoXModel",
+ "GPTNeoXPreTrainedModel"
+ ]
}
},
- "info.vit.internvl3-hf": {
+ "info.stst.gpt-neox-japanese": {
"*": {
- "repo": "OpenGVLab/InternVL3-1B-hf",
+ "repo": "abeja/gpt-neox-japanese-2.7b",
"pkg": {
"0": {
- "transformers": "InternVLModel"
+ "transformers": "GPTNeoXJapaneseModel"
}
- }
+ },
+ "tasks": [
+ "GPTNeoXJapaneseForCausalLM",
+ "GPTNeoXJapaneseLayer",
+ "GPTNeoXJapaneseModel",
+ "GPTNeoXJapanesePreTrainedModel"
+ ]
}
},
- "info.ssm.jamba-v0": {
+ "info.moe.gpt-oss": {
"*": {
- "repo": "ai21labs/Jamba-v0.1",
+ "repo": "openai/gpt-oss-120b",
"pkg": {
"0": {
- "transformers": "JambaModel"
+ "transformers": "GptOssModel"
}
- }
+ },
+ "file_256": [
+ "68a8dc1f8e2e5996cb702f14332a25ddf3463daeab2df68e21ca09ef181203c3",
+ "a881aa5f561b26a22b14a8262aa61849ace349ffd73d74769e030ac90a1fcf8a"
+ ],
+ "layer_b3": [
+ "b52807536902cabbf84f99e4fa2f8713fb4ef77e739f06367ee0d486e3222faa",
+ "43c618018db1fd6e915dead610652da261d9058b73bc5355c85c6ac69af4d913",
+ "ab27ce7391b7fbd6ce3c319faa119afdac68f746af6a0ce2c3400a132f36f6ac"
+ ],
+ "layer_256": [
+ "de5dcad822be5ed6196f0f3f6965739993118d14db97b33a94a269f4f1b7a363",
+ "575f1977ed42d95a050e13dadaafc05a6d94c8aadca8364dca8a62aa4f2b146c"
+ ],
+ "tasks": [
+ "GptOssForCausalLM",
+ "GptOssForSequenceClassification",
+ "GptOssForTokenClassification",
+ "GptOssModel",
+ "GptOssPreTrainedModel"
+ ]
}
},
- "info.vit.janus": {
+ "info.art.gpt-j": {
"*": {
- "repo": "deepseek-community/Janus-Pro-1B",
+ "repo": "EleutherAI/gpt-j-6B",
"pkg": {
"0": {
- "transformers": "JanusModel"
+ "transformers": "GPTJModel"
}
- }
+ },
+ "tasks": [
+ "GPTJForCausalLM",
+ "GPTJForQuestionAnswering",
+ "GPTJForSequenceClassification",
+ "GPTJModel",
+ "GPTJPreTrainedModel"
+ ]
}
},
- "info.vit.kosmos-2-patch14-224": {
+ "info.stst.granite": {
"*": {
- "repo": "microsoft/kosmos-2-patch14-224",
+ "repo": "ibm-granite/granite-3.3-2b-base",
"pkg": {
"0": {
- "transformers": "Kosmos2Model"
+ "transformers": "GraniteModel"
}
- }
+ },
+ "tasks": [
+ "GraniteForCausalLM",
+ "GraniteModel",
+ "GranitePreTrainedModel"
+ ]
}
},
- "info.vit.kosmos-2": {
+ "info.moe.powermoe": {
"*": {
- "repo": "microsoft/kosmos-2.5",
+ "repo": "ibm-research/PowerMoE-3b",
"pkg": {
"0": {
- "transformers": "Kosmos2_5Model"
+ "transformers": "GraniteMoeModel"
}
- }
+ },
+ "tasks": [
+ "GraniteMoeForCausalLM",
+ "GraniteMoeModel",
+ "GraniteMoePreTrainedModel"
+ ]
}
},
- "info.aet.todo": {
+ "info.ssm.granite-4-h": {
"*": {
- "repo": "TODO/TODO",
+ "repo": "ibm-granite/granite-4.0-h-small",
"pkg": {
"0": {
- "transformers": "LasrForCTC"
+ "transformers": "GraniteMoeHybridModel"
}
- }
+ },
+ "tasks": [
+ "GraniteMoeHybridForCausalLM",
+ "GraniteMoeHybridModel",
+ "GraniteMoeHybridPreTrainedModel"
+ ]
}
},
- "info.stst.todo": {
+ "info.moe.moe-active-shared-experts": {
"*": {
- "repo": "TODO/TODO",
+ "repo": "ibm-research/moe-7b-1b-active-shared-experts",
"pkg": {
"0": {
- "transformers": "LasrEncoder"
+ "transformers": "GraniteMoeSharedModel"
}
- }
+ },
+ "tasks": [
+ "GraniteMoeSharedForCausalLM",
+ "GraniteMoeSharedModel",
+ "GraniteMoeSharedPreTrainedModel"
+ ]
}
},
- "info.art.layoutlm-uncased": {
+ "info.vit.llava-v1-mistral-hf": {
"*": {
- "repo": "microsoft/layoutlm-base-uncased",
+ "repo": "llava-hf/llava-v1.6-mistral-7b-hf",
"pkg": {
"0": {
- "transformers": "LayoutLMModel"
+ "transformers": "LlavaNextModel"
}
- }
+ },
+ "tasks": [
+ "LlavaNextForConditionalGeneration",
+ "LlavaNextPreTrainedModel",
+ "LlavaNextModel"
+ ]
}
},
- "info.art.layoutlmv2-uncased": {
+ "info.detr.grounding-dino": {
"*": {
- "repo": "microsoft/layoutlmv2-base-uncased",
+ "repo": "IDEA-Research/grounding-dino-tiny",
"pkg": {
"0": {
- "transformers": "LayoutLMv2Model"
+ "transformers": "GroundingDinoModel"
}
- }
+ },
+ "tasks": [
+ "GroundingDinoForObjectDetection",
+ "GroundingDinoModel",
+ "GroundingDinoPreTrainedModel"
+ ]
}
},
- "info.vit.layoutlmv3": {
+ "info.vit.groupvit-gcc-yfcc": {
"*": {
- "repo": "microsoft/layoutlmv3-base",
+ "repo": "nvidia/groupvit-gcc-yfcc",
"pkg": {
"0": {
- "transformers": "LayoutLMv3Model"
+ "transformers": "GroupViTModel"
}
- }
+ },
+ "tasks": [
+ "GroupViTModel",
+ "GroupViTPreTrainedModel",
+ "GroupViTTextModel",
+ "GroupViTVisionModel"
+ ]
}
},
- "info.stst.led-16384": {
+ "info.stst.helium": {
"*": {
- "repo": "allenai/led-base-16384",
+ "repo": "kyutai/helium-1-2b",
"pkg": {
"0": {
- "transformers": "LEDModel"
+ "transformers": "HeliumModel"
}
- }
+ },
+ "tasks": [
+ "HeliumPreTrainedModel",
+ "HeliumModel",
+ "HeliumForCausalLM",
+ "HeliumForSequenceClassification",
+ "HeliumForTokenClassification"
+ ]
}
},
- "info.gan.levit-128s": {
+ "info.vit.dfine-x-coco": {
"*": {
- "repo": "facebook/levit-128S",
+ "repo": "ustc-community/dfine_x_coco",
"pkg": {
"0": {
- "transformers": "LevitModel"
+ "transformers": "HGNetV2Backbone"
}
- }
+ },
+ "tasks": [
+ "HGNetV2Backbone",
+ "HGNetV2PreTrainedModel",
+ "HGNetV2ForImageClassification"
+ ]
}
},
- "info.stst.lfm2-a": {
+ "info.vit.hiera-224": {
"*": {
- "repo": "LiquidAI/LFM2-8B-A1B",
+ "repo": "facebook/hiera-base-224-hf",
"pkg": {
"0": {
- "transformers": "Lfm2MoeModel"
+ "transformers": "HieraModel"
}
- }
+ },
+ "tasks": [
+ "HieraForImageClassification",
+ "HieraForPreTraining",
+ "HieraBackbone",
+ "HieraModel",
+ "HieraPreTrainedModel"
+ ]
}
},
- "info.vit.lfm2-vl": {
+ "info.aet.hubert-ls960": {
"*": {
- "repo": "LiquidAI/LFM2-VL-1.6B",
+ "repo": "facebook/hubert-base-ls960",
"pkg": {
"0": {
- "transformers": "Lfm2VlModel"
+ "transformers": "HubertModel"
}
- }
+ },
+ "tasks": [
+ "HubertForCTC",
+ "HubertForSequenceClassification",
+ "HubertModel",
+ "HubertPreTrainedModel"
+ ]
}
},
- "info.aet.lightglue-superpoint": {
+ "info.stst.hunyuan": {
"*": {
- "repo": "ETH-CVG/lightglue_superpoint",
+ "repo": "tencent/Hunyuan-7B-Instruct",
"pkg": {
"0": {
- "transformers": "LightGlueForKeypointMatching"
+ "transformers": "HunYuanDenseV1Model"
}
- }
+ },
+ "tasks": [
+ "HunYuanDenseV1ForCausalLM",
+ "HunYuanDenseV1Model",
+ "HunYuanDenseV1PreTrainedModel",
+ "HunYuanDenseV1ForSequenceClassification"
+ ]
}
},
- "info.art.lilt-roberta-en": {
+ "info.moe.hunyuan-a": {
"*": {
- "repo": "SCUT-DLVCLab/lilt-roberta-en-base",
+ "repo": "tencent/Hunyuan-A13B-Instruct",
"pkg": {
"0": {
- "transformers": "LiltModel"
+ "transformers": "HunYuanMoEV1Model"
}
- }
+ },
+ "tasks": [
+ "HunYuanMoEV1ForCausalLM",
+ "HunYuanMoEV1Model",
+ "HunYuanMoEV1PreTrainedModel",
+ "HunYuanMoEV1ForSequenceClassification"
+ ]
}
},
- "info.vit.llama-4-scout-16e": {
+ "info.art.ibert-roberta": {
"*": {
- "repo": "meta-llama/Llama-4-Scout-17B-16E",
+ "repo": "kssteven/ibert-roberta-base",
"pkg": {
"0": {
- "transformers": "Llama4ForConditionalGeneration"
+ "transformers": "IBertModel"
}
- }
+ },
+ "tasks": [
+ "IBertForMaskedLM",
+ "IBertForMultipleChoice",
+ "IBertForQuestionAnswering",
+ "IBertForSequenceClassification",
+ "IBertForTokenClassification",
+ "IBertModel",
+ "IBertPreTrainedModel"
+ ]
}
},
- "info.moe.llama-4-scout-16e": {
+ "info.vit.idefics": {
"*": {
- "repo": "meta-llama/Llama-4-Scout-17B-16E",
+ "repo": "HuggingFaceM4/idefics-9b",
"pkg": {
"0": {
- "transformers": "Llama4TextModel"
+ "transformers": "IdeficsModel"
}
- }
+ },
+ "tasks": [
+ "IdeficsForVisionText2Text",
+ "IdeficsModel",
+ "IdeficsPreTrainedModel"
+ ]
}
},
- "info.vit.llava": {
+ "info.vit.idefics2": {
"*": {
- "repo": "llava-hf/llava-9b",
+ "repo": "HuggingFaceM4/idefics2-8b",
"pkg": {
"0": {
- "transformers": "LlavaModel"
+ "transformers": "Idefics2Model"
}
- }
+ },
+ "tasks": [
+ "Idefics2ForConditionalGeneration",
+ "Idefics2PreTrainedModel",
+ "Idefics2Model"
+ ]
}
},
- "info.vit.llava-next-video-hf": {
+ "info.vit.idefics3-llama3": {
"*": {
- "repo": "llava-hf/LLaVA-NeXT-Video-7B-hf",
+ "repo": "HuggingFaceM4/Idefics3-8B-Llama3",
"pkg": {
"0": {
- "transformers": "LlavaNextVideoModel"
+ "transformers": "Idefics3Model"
}
- }
+ },
+ "tasks": [
+ "Idefics3ForConditionalGeneration",
+ "Idefics3PreTrainedModel",
+ "Idefics3Model",
+ "Idefics3VisionTransformer"
+ ]
}
},
- "info.vit.llava-onevision-qwen2-ov-hf": {
+ "info.vit.siglip-patch16-224": {
"*": {
- "repo": "llava-hf/llava-onevision-qwen2-7b-ov-hf",
+ "repo": "google/siglip-base-patch16-224",
"pkg": {
"0": {
- "transformers": "LlavaOnevisionModel"
+ "transformers": "Idefics3VisionTransformer"
}
- }
+ },
+ "tasks": [
+ "Idefics3ForConditionalGeneration",
+ "Idefics3PreTrainedModel",
+ "Idefics3Model",
+ "Idefics3VisionTransformer"
+ ]
}
},
- "info.stst.long-t5-local": {
+ "info.vit.ijepa-vith14": {
"*": {
- "repo": "google/long-t5-local-base",
+ "repo": "facebook/ijepa_vith14_1k",
"pkg": {
"0": {
- "transformers": "LongT5Model"
+ "transformers": "IJepaModel"
}
- }
+ },
+ "tasks": [
+ "IJepaPreTrainedModel",
+ "IJepaModel",
+ "IJepaForImageClassification"
+ ]
}
},
- "info.art.luke": {
+ "info.art.imagegpt": {
"*": {
- "repo": "studio-ousia/luke-base",
+ "repo": "openai/imagegpt-small",
"pkg": {
"0": {
- "transformers": "LukeModel"
+ "transformers": "ImageGPTModel"
}
- }
+ },
+ "tasks": [
+ "ImageGPTForCausalImageModeling",
+ "ImageGPTForImageClassification",
+ "ImageGPTModel",
+ "ImageGPTPreTrainedModel"
+ ]
}
},
- "info.art.lxmert-uncased": {
+ "info.stst.informer-tourism-monthly": {
"*": {
- "repo": "unc-nlp/lxmert-base-uncased",
+ "repo": "huggingface/informer-tourism-monthly",
"pkg": {
"0": {
- "transformers": "LxmertModel"
+ "transformers": "InformerModel"
}
- }
+ },
+ "tasks": [
+ "InformerForPrediction",
+ "InformerModel",
+ "InformerPreTrainedModel"
+ ]
}
},
- "info.stst.m": {
+ "info.vit.blip-flan-t5": {
"*": {
- "repo": "facebook/m2m100_418M",
+ "repo": "Salesforce/instructblip-flan-t5-xl",
+ "pkg": {
+ "0": {
+ "transformers": "InstructBlipModel"
+ }
+ },
+ "tasks": [
+ "InstructBlipQFormerModel",
+ "InstructBlipPreTrainedModel",
+ "InstructBlipModel",
+ "InstructBlipForConditionalGeneration",
+ "InstructBlipVisionModel"
+ ]
+ }
+ },
+ "info.vit.internvl3-hf": {
+ "*": {
+ "repo": "OpenGVLab/InternVL3-1B-hf",
+ "pkg": {
+ "0": {
+ "transformers": "InternVLModel"
+ }
+ },
+ "tasks": [
+ "InternVLVisionPreTrainedModel",
+ "InternVLVisionModel",
+ "InternVLPreTrainedModel",
+ "InternVLModel",
+ "InternVLForConditionalGeneration"
+ ]
+ }
+ },
+ "info.stst.jais-2-chat": {
+ "*": {
+ "repo": "inceptionai/Jais-2-8B-Chat",
+ "pkg": {
+ "0": {
+ "transformers": "Jais2Model"
+ }
+ },
+ "tasks": [
+ "Jais2Model",
+ "Jais2ForCausalLM",
+ "Jais2PreTrainedModel"
+ ]
+ }
+ },
+ "info.ssm.jamba-v0": {
+ "*": {
+ "repo": "ai21labs/Jamba-v0.1",
+ "pkg": {
+ "0": {
+ "transformers": "JambaModel"
+ }
+ },
+ "tasks": [
+ "JambaForCausalLM",
+ "JambaForSequenceClassification",
+ "JambaModel",
+ "JambaPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.janus": {
+ "*": {
+ "repo": "deepseek-community/Janus-Pro-1B",
+ "pkg": {
+ "0": {
+ "transformers": "JanusModel"
+ }
+ },
+ "tasks": [
+ "JanusPreTrainedModel",
+ "JanusForConditionalGeneration",
+ "JanusModel",
+ "JanusVQVAE",
+ "JanusVisionModel"
+ ]
+ }
+ },
+ "info.moe.jetmoe": {
+ "*": {
+ "repo": "jetmoe/jetmoe-8b",
+ "pkg": {
+ "0": {
+ "transformers": "JetMoeModel"
+ }
+ },
+ "tasks": [
+ "JetMoeForCausalLM",
+ "JetMoeModel",
+ "JetMoePreTrainedModel",
+ "JetMoeForSequenceClassification"
+ ]
+ }
+ },
+ "info.vit.kosmos-2-patch14-224": {
+ "*": {
+ "repo": "microsoft/kosmos-2-patch14-224",
+ "pkg": {
+ "0": {
+ "transformers": "Kosmos2Model"
+ }
+ },
+ "tasks": [
+ "Kosmos2ForConditionalGeneration",
+ "Kosmos2Model",
+ "Kosmos2PreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.kosmos-2": {
+ "*": {
+ "repo": "microsoft/kosmos-2.5",
+ "pkg": {
+ "0": {
+ "transformers": "Kosmos2_5Model"
+ }
+ },
+ "tasks": [
+ "Kosmos2_5ForConditionalGeneration",
+ "Kosmos2_5Model",
+ "Kosmos2_5PreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.stt-en-trfs": {
+ "*": {
+ "repo": "kyutai/stt-2.6b-en-trfs",
+ "pkg": {
+ "0": {
+ "transformers": "KyutaiSpeechToTextModel"
+ }
+ },
+ "tasks": [
+ "KyutaiSpeechToTextPreTrainedModel",
+ "KyutaiSpeechToTextModel",
+ "KyutaiSpeechToTextForConditionalGeneration"
+ ]
+ }
+ },
+ "info.aet.todo": {
+ "*": {
+ "repo": "TODO/TODO",
+ "pkg": {
+ "0": {
+ "transformers": "LasrForCTC"
+ }
+ },
+ "tasks": [
+ "LasrForCTC",
+ "LasrEncoder",
+ "LasrPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.todo": {
+ "*": {
+ "repo": "TODO/TODO",
+ "pkg": {
+ "0": {
+ "transformers": "LasrEncoder"
+ }
+ },
+ "tasks": [
+ "LasrForCTC",
+ "LasrEncoder",
+ "LasrPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.layoutlm-uncased": {
+ "*": {
+ "repo": "microsoft/layoutlm-base-uncased",
+ "pkg": {
+ "0": {
+ "transformers": "LayoutLMModel"
+ }
+ },
+ "tasks": [
+ "LayoutLMForMaskedLM",
+ "LayoutLMForSequenceClassification",
+ "LayoutLMForTokenClassification",
+ "LayoutLMForQuestionAnswering",
+ "LayoutLMModel",
+ "LayoutLMPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.layoutlmv2-uncased": {
+ "*": {
+ "repo": "microsoft/layoutlmv2-base-uncased",
+ "pkg": {
+ "0": {
+ "transformers": "LayoutLMv2Model"
+ }
+ },
+ "tasks": [
+ "LayoutLMv2ForQuestionAnswering",
+ "LayoutLMv2ForSequenceClassification",
+ "LayoutLMv2ForTokenClassification",
+ "LayoutLMv2Layer",
+ "LayoutLMv2Model",
+ "LayoutLMv2PreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.layoutlmv3": {
+ "*": {
+ "repo": "microsoft/layoutlmv3-base",
+ "pkg": {
+ "0": {
+ "transformers": "LayoutLMv3Model"
+ }
+ },
+ "tasks": [
+ "LayoutLMv3ForQuestionAnswering",
+ "LayoutLMv3ForSequenceClassification",
+ "LayoutLMv3ForTokenClassification",
+ "LayoutLMv3Model",
+ "LayoutLMv3PreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.led-16384": {
+ "*": {
+ "repo": "allenai/led-base-16384",
+ "pkg": {
+ "0": {
+ "transformers": "LEDModel"
+ }
+ },
+ "tasks": [
+ "LEDForConditionalGeneration",
+ "LEDForQuestionAnswering",
+ "LEDForSequenceClassification",
+ "LEDModel",
+ "LEDPreTrainedModel"
+ ]
+ }
+ },
+ "info.gan.levit-128s": {
+ "*": {
+ "repo": "facebook/levit-128S",
+ "pkg": {
+ "0": {
+ "transformers": "LevitModel"
+ }
+ },
+ "tasks": [
+ "LevitForImageClassification",
+ "LevitForImageClassificationWithTeacher",
+ "LevitModel",
+ "LevitPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.lfm": {
+ "*": {
+ "repo": "LiquidAI/LFM2-1.2B",
+ "pkg": {
+ "0": {
+ "transformers": "Lfm2Model"
+ }
+ },
+ "tasks": [
+ "Lfm2ForCausalLM",
+ "Lfm2Model",
+ "Lfm2PreTrainedModel"
+ ]
+ }
+ },
+ "info.moe.lfm2-a": {
+ "*": {
+ "repo": "LiquidAI/LFM2-8B-A1B",
+ "pkg": {
+ "0": {
+ "transformers": "Lfm2MoeModel"
+ }
+ },
+ "tasks": [
+ "Lfm2MoeForCausalLM",
+ "Lfm2MoeModel",
+ "Lfm2MoePreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.lfm2-vl": {
+ "*": {
+ "repo": "LiquidAI/LFM2-VL-1.6B",
+ "pkg": {
+ "0": {
+ "transformers": "Lfm2VlModel"
+ }
+ },
+ "tasks": [
+ "Lfm2VlForConditionalGeneration",
+ "Lfm2VlPreTrainedModel",
+ "Lfm2VlModel"
+ ]
+ }
+ },
+ "info.aet.lightglue-superpoint": {
+ "*": {
+ "repo": "ETH-CVG/lightglue_superpoint",
+ "pkg": {
+ "0": {
+ "transformers": "LightGlueForKeypointMatching"
+ }
+ },
+ "tasks": [
+ "LightGluePreTrainedModel",
+ "LightGlueForKeypointMatching"
+ ]
+ }
+ },
+ "info.art.lilt-roberta-en": {
+ "*": {
+ "repo": "SCUT-DLVCLab/lilt-roberta-en-base",
+ "pkg": {
+ "0": {
+ "transformers": "LiltModel"
+ }
+ },
+ "tasks": [
+ "LiltForQuestionAnswering",
+ "LiltForSequenceClassification",
+ "LiltForTokenClassification",
+ "LiltModel",
+ "LiltPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.llama-4-scout-16e": {
+ "*": {
+ "repo": "meta-llama/Llama-4-Scout-17B-16E",
+ "pkg": {
+ "0": {
+ "transformers": "Llama4ForConditionalGeneration"
+ }
+ },
+ "tasks": [
+ "Llama4PreTrainedModel",
+ "Llama4TextModel",
+ "Llama4VisionModel",
+ "Llama4ForCausalLM",
+ "Llama4ForConditionalGeneration"
+ ]
+ }
+ },
+ "info.moe.llama-4-scout-16e": {
+ "*": {
+ "repo": "meta-llama/Llama-4-Scout-17B-16E",
+ "pkg": {
+ "0": {
+ "transformers": "Llama4TextModel"
+ }
+ },
+ "tasks": [
+ "Llama4PreTrainedModel",
+ "Llama4TextModel",
+ "Llama4VisionModel",
+ "Llama4ForCausalLM",
+ "Llama4ForConditionalGeneration"
+ ]
+ }
+ },
+ "info.vit.llava": {
+ "*": {
+ "repo": "llava-hf/llava-9b",
+ "pkg": {
+ "0": {
+ "transformers": "LlavaModel"
+ }
+ },
+ "file_256": [
+ "f5ad57d3eda300a3195bc9c0bb36ab76ebe88831f128e9851e63440aff4a6741"
+ ],
+ "layer_b3": [
+ "d7d6ccb9dbba90b64e4cd259b6309e56708b3f4fbd6e9f85e9f0410e549133ef"
+ ],
+ "layer_256": [
+ "9969c41152aba689413b7f63888ecdc0c0badad2c2960e689ebc4c0e4a696c73"
+ ],
+ "tasks": [
+ "LlavaForConditionalGeneration",
+ "LlavaPreTrainedModel",
+ "LlavaModel"
+ ]
+ }
+ },
+ "info.vit.llava-next-video-hf": {
+ "*": {
+ "repo": "llava-hf/LLaVA-NeXT-Video-7B-hf",
+ "pkg": {
+ "0": {
+ "transformers": "LlavaNextVideoModel"
+ }
+ },
+ "tasks": [
+ "LlavaNextVideoForConditionalGeneration",
+ "LlavaNextVideoModel",
+ "LlavaNextVideoPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.llava-onevision-qwen2-ov-hf": {
+ "*": {
+ "repo": "llava-hf/llava-onevision-qwen2-7b-ov-hf",
+ "pkg": {
+ "0": {
+ "transformers": "LlavaOnevisionModel"
+ }
+ },
+ "tasks": [
+ "LlavaOnevisionModel",
+ "LlavaOnevisionForConditionalGeneration",
+ "LlavaOnevisionPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.longcat-flash-chat": {
+ "*": {
+ "repo": "meituan-longcat/LongCat-Flash-Chat",
+ "pkg": {
+ "0": {
+ "transformers": "LongcatFlashModel"
+ }
+ },
+ "tasks": [
+ "LongcatFlashPreTrainedModel",
+ "LongcatFlashModel",
+ "LongcatFlashForCausalLM"
+ ]
+ }
+ },
+ "info.art.longformer-4096": {
+ "*": {
+ "repo": "allenai/longformer-base-4096",
+ "pkg": {
+ "0": {
+ "transformers": "LongformerModel"
+ }
+ },
+ "tasks": [
+ "LongformerForMaskedLM",
+ "LongformerForMultipleChoice",
+ "LongformerForQuestionAnswering",
+ "LongformerForSequenceClassification",
+ "LongformerForTokenClassification",
+ "LongformerModel",
+ "LongformerPreTrainedModel",
+ "LongformerSelfAttention"
+ ]
+ }
+ },
+ "info.stst.long-t5-local": {
+ "*": {
+ "repo": "google/long-t5-local-base",
+ "pkg": {
+ "0": {
+ "transformers": "LongT5Model"
+ }
+ },
+ "tasks": [
+ "LongT5EncoderModel",
+ "LongT5ForConditionalGeneration",
+ "LongT5Model",
+ "LongT5PreTrainedModel"
+ ]
+ }
+ },
+ "info.art.luke": {
+ "*": {
+ "repo": "studio-ousia/luke-base",
+ "pkg": {
+ "0": {
+ "transformers": "LukeModel"
+ }
+ },
+ "tasks": [
+ "LukeForEntityClassification",
+ "LukeForEntityPairClassification",
+ "LukeForEntitySpanClassification",
+ "LukeForMultipleChoice",
+ "LukeForQuestionAnswering",
+ "LukeForSequenceClassification",
+ "LukeForTokenClassification",
+ "LukeForMaskedLM",
+ "LukeModel",
+ "LukePreTrainedModel"
+ ]
+ }
+ },
+ "info.art.lxmert-uncased": {
+ "*": {
+ "repo": "unc-nlp/lxmert-base-uncased",
+ "pkg": {
+ "0": {
+ "transformers": "LxmertModel"
+ }
+ },
+ "tasks": [
+ "LxmertEncoder",
+ "LxmertForPreTraining",
+ "LxmertForQuestionAnswering",
+ "LxmertModel",
+ "LxmertPreTrainedModel",
+ "LxmertVisualFeatureEncoder",
+ "LxmertXLayer"
+ ]
+ }
+ },
+ "info.stst.m": {
+ "*": {
+ "repo": "facebook/m2m100_418M",
+ "pkg": {
+ "0": {
+ "transformers": "M2M100Model"
+ }
+ },
+ "tasks": [
+ "M2M100ForConditionalGeneration",
+ "M2M100Model",
+ "M2M100PreTrainedModel"
+ ]
+ }
+ },
+ "info.ssm.mamba": {
+ "*": {
+ "repo": "state-spaces/mamba-2.8b",
+ "pkg": {
+ "0": {
+ "transformers": "MambaModel"
+ }
+ },
+ "tasks": [
+ "MambaForCausalLM",
+ "MambaModel",
+ "MambaPreTrainedModel",
+ "MambaCache"
+ ]
+ }
+ },
+ "info.ssm.mamba2": {
+ "*": {
+ "repo": "AntonV/mamba2-2.7b-hf",
+ "pkg": {
+ "0": {
+ "transformers": "Mamba2Model"
+ }
+ },
+ "tasks": [
+ "Mamba2ForCausalLM",
+ "Mamba2Model",
+ "Mamba2PreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.opus-mt-en-de": {
+ "*": {
+ "repo": "Helsinki-NLP/opus-mt-en-de",
+ "pkg": {
+ "0": {
+ "transformers": "MarianModel"
+ }
+ },
+ "tasks": [
+ "MarianForCausalLM",
+ "MarianModel",
+ "MarianMTModel",
+ "MarianPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.markuplm": {
+ "*": {
+ "repo": "microsoft/markuplm-base",
+ "pkg": {
+ "0": {
+ "transformers": "MarkupLMModel"
+ }
+ },
+ "tasks": [
+ "MarkupLMForQuestionAnswering",
+ "MarkupLMForSequenceClassification",
+ "MarkupLMForTokenClassification",
+ "MarkupLMModel",
+ "MarkupLMPreTrainedModel"
+ ]
+ }
+ },
+ "info.detr.mask2former-swin-coco-instance": {
+ "*": {
+ "repo": "facebook/mask2former-swin-small-coco-instance",
+ "pkg": {
+ "0": {
+ "transformers": "Mask2FormerModel"
+ }
+ },
+ "tasks": [
+ "Mask2FormerForUniversalSegmentation",
+ "Mask2FormerModel",
+ "Mask2FormerPreTrainedModel"
+ ]
+ }
+ },
+ "info.detr.maskformer-swin-ade": {
+ "*": {
+ "repo": "facebook/maskformer-swin-base-ade",
+ "pkg": {
+ "0": {
+ "transformers": "MaskFormerModel"
+ }
+ },
+ "tasks": [
+ "MaskFormerForInstanceSegmentation",
+ "MaskFormerModel",
+ "MaskFormerPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.swin-patch4-window7-224": {
+ "*": {
+ "repo": "microsoft/swin-tiny-patch4-window7-224",
+ "pkg": {
+ "0": {
+ "transformers": "MaskFormerSwinModel"
+ }
+ },
+ "tasks": [
+ "MaskFormerSwinBackbone",
+ "MaskFormerSwinModel",
+ "MaskFormerSwinPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.mbart-cc25": {
+ "*": {
+ "repo": "facebook/mbart-large-cc25",
+ "pkg": {
+ "0": {
+ "transformers": "MBartModel"
+ }
+ },
+ "tasks": [
+ "MBartForCausalLM",
+ "MBartForConditionalGeneration",
+ "MBartForQuestionAnswering",
+ "MBartForSequenceClassification",
+ "MBartModel",
+ "MBartPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.megatron-bert-uncased": {
+ "*": {
+ "repo": "nvidia/megatron-bert-uncased-345m",
+ "pkg": {
+ "0": {
+ "transformers": "MegatronBertModel"
+ }
+ },
+ "tasks": [
+ "MegatronBertForCausalLM",
+ "MegatronBertForMaskedLM",
+ "MegatronBertForMultipleChoice",
+ "MegatronBertForNextSentencePrediction",
+ "MegatronBertForPreTraining",
+ "MegatronBertForQuestionAnswering",
+ "MegatronBertForSequenceClassification",
+ "MegatronBertForTokenClassification",
+ "MegatronBertModel",
+ "MegatronBertPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.metaclip-2-worldwide-huge-quickgelu": {
+ "*": {
+ "repo": "facebook/metaclip-2-worldwide-huge-quickgelu",
+ "pkg": {
+ "0": {
+ "transformers": "MetaClip2Model"
+ }
+ },
+ "tasks": [
+ "MetaClip2Model",
+ "MetaClip2PreTrainedModel",
+ "MetaClip2TextModel",
+ "MetaClip2TextModelWithProjection",
+ "MetaClip2VisionModel",
+ "MetaClip2VisionModelWithProjection",
+ "MetaClip2ForImageClassification"
+ ]
+ }
+ },
+ "info.vit.mgp-str": {
+ "*": {
+ "repo": "alibaba-damo/mgp-str-base",
+ "pkg": {
+ "0": {
+ "transformers": "MgpstrForSceneTextRecognition"
+ }
+ },
+ "tasks": [
+ "MgpstrModel",
+ "MgpstrPreTrainedModel",
+ "MgpstrForSceneTextRecognition"
+ ]
+ }
+ },
+ "info.gan.mimi": {
+ "*": {
+ "repo": "kyutai/mimi",
+ "pkg": {
+ "0": {
+ "transformers": "MimiModel"
+ }
+ },
+ "tasks": [
+ "MimiModel",
+ "MimiPreTrainedModel"
+ ]
+ }
+ },
+ "info.moe.max-text-01-hf": {
+ "*": {
+ "repo": "MiniMaxAI/MiniMax-Text-01-hf",
+ "pkg": {
+ "0": {
+ "transformers": "MiniMaxModel"
+ }
+ },
+ "tasks": [
+ "MiniMaxPreTrainedModel",
+ "MiniMaxModel",
+ "MiniMaxForCausalLM",
+ "MiniMaxForSequenceClassification",
+ "MiniMaxForTokenClassification",
+ "MiniMaxForQuestionAnswering"
+ ]
+ }
+ },
+ "info.stst.stral-2410": {
+ "*": {
+ "repo": "mistralai/Ministral-8B-Instruct-2410",
+ "pkg": {
+ "0": {
+ "transformers": "MinistralModel"
+ }
+ },
+ "tasks": [
+ "MinistralPreTrainedModel",
+ "MinistralModel",
+ "MinistralForCausalLM",
+ "MinistralForSequenceClassification",
+ "MinistralForTokenClassification",
+ "MinistralForQuestionAnswering"
+ ]
+ }
+ },
+ "info.stst.stral-3-2512": {
+ "*": {
+ "repo": "mistralai/Ministral-3-8B-Base-2512",
+ "pkg": {
+ "0": {
+ "transformers": "Ministral3Model"
+ }
+ },
+ "tasks": [
+ "Ministral3ForCausalLM",
+ "Ministral3ForQuestionAnswering",
+ "Ministral3Model",
+ "Ministral3PreTrainedModel",
+ "Ministral3ForSequenceClassification",
+ "Ministral3ForTokenClassification"
+ ]
+ }
+ },
+ "info.stst.mistral-v0": {
+ "*": {
+ "repo": "mistralai/Mistral-7B-v0.1",
"pkg": {
"0": {
- "transformers": "M2M100Model"
+ "transformers": "MistralModel"
}
- }
+ },
+ "tasks": [
+ "MistralForCausalLM",
+ "MistralForQuestionAnswering",
+ "MistralModel",
+ "MistralPreTrainedModel",
+ "MistralForSequenceClassification",
+ "MistralForTokenClassification"
+ ]
}
},
- "info.ssm.mamba": {
+ "info.vit.mistral-3-2503": {
"*": {
- "repo": "state-spaces/mamba-2.8b",
+ "repo": "mistralai/Mistral-Small-3.1-24B-Instruct-2503",
"pkg": {
"0": {
- "transformers": "MambaModel"
+ "transformers": "Mistral3Model"
}
- }
+ },
+ "tasks": [
+ "Mistral3Model",
+ "Mistral3PreTrainedModel",
+ "Mistral3ForConditionalGeneration"
+ ]
}
},
- "info.ssm.mamba2": {
+ "info.moe.mixtral-8x": {
"*": {
- "repo": "AntonV/mamba2-2.7b-hf",
+ "repo": "mistralai/Mixtral-8x7B-v0.1",
"pkg": {
"0": {
- "transformers": "Mamba2Model"
+ "transformers": "MixtralModel"
}
- }
+ },
+ "tasks": [
+ "MixtralForCausalLM",
+ "MixtralForQuestionAnswering",
+ "MixtralModel",
+ "MixtralPreTrainedModel",
+ "MixtralForSequenceClassification",
+ "MixtralForTokenClassification"
+ ]
+ }
+ },
+ "info.vit.mlcd-vit-bigg-patch14-336": {
+ "*": {
+ "repo": "DeepGlint-AI/mlcd-vit-bigG-patch14-336",
+ "pkg": {
+ "0": {
+ "transformers": "MLCDVisionModel"
+ }
+ },
+ "tasks": [
+ "MLCDPreTrainedModel",
+ "MLCDVisionModel"
+ ]
+ }
+ },
+ "info.vit.llama-3-vision": {
+ "*": {
+ "repo": "meta-llama/Llama-3.2-11B-Vision",
+ "pkg": {
+ "0": {
+ "transformers": "MllamaModel"
+ }
+ },
+ "tasks": [
+ "MllamaForConditionalGeneration",
+ "MllamaForCausalLM",
+ "MllamaTextModel",
+ "MllamaVisionModel",
+ "MllamaPreTrainedModel",
+ "MllamaModel"
+ ]
+ }
+ },
+ "info.detr.mm-grounding-dino-o365v1-goldg-v3det": {
+ "*": {
+ "repo": "openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det",
+ "pkg": {
+ "0": {
+ "transformers": "MMGroundingDinoModel"
+ }
+ },
+ "tasks": [
+ "MMGroundingDinoForObjectDetection",
+ "MMGroundingDinoModel",
+ "MMGroundingDinoPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.mobilebert-uncased": {
+ "*": {
+ "repo": "google/mobilebert-uncased",
+ "pkg": {
+ "0": {
+ "transformers": "MobileBertModel"
+ }
+ },
+ "tasks": [
+ "MobileBertForMaskedLM",
+ "MobileBertForMultipleChoice",
+ "MobileBertForNextSentencePrediction",
+ "MobileBertForPreTraining",
+ "MobileBertForQuestionAnswering",
+ "MobileBertForSequenceClassification",
+ "MobileBertForTokenClassification",
+ "MobileBertLayer",
+ "MobileBertModel",
+ "MobileBertPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.mobilenet-v1-1--224": {
+ "*": {
+ "repo": "google/mobilenet_v1_1.0_224",
+ "pkg": {
+ "0": {
+ "transformers": "MobileNetV1Model"
+ }
+ },
+ "tasks": [
+ "MobileNetV1ForImageClassification",
+ "MobileNetV1Model",
+ "MobileNetV1PreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.mobilenet-v2-1--224": {
+ "*": {
+ "repo": "google/mobilenet_v2_1.0_224",
+ "pkg": {
+ "0": {
+ "transformers": "MobileNetV2Model"
+ }
+ },
+ "tasks": [
+ "MobileNetV2ForImageClassification",
+ "MobileNetV2ForSemanticSegmentation",
+ "MobileNetV2Model",
+ "MobileNetV2PreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.mobilevit": {
+ "*": {
+ "repo": "apple/mobilevit-small",
+ "pkg": {
+ "0": {
+ "transformers": "MobileViTModel"
+ }
+ },
+ "tasks": [
+ "MobileViTForImageClassification",
+ "MobileViTForSemanticSegmentation",
+ "MobileViTModel",
+ "MobileViTPreTrainedModel"
+ ]
+ }
+ },
+ "info.vit.mobilevitv2-1": {
+ "*": {
+ "repo": "apple/mobilevitv2-1.0-imagenet1k-256",
+ "pkg": {
+ "0": {
+ "transformers": "MobileViTV2Model"
+ }
+ },
+ "tasks": [
+ "MobileViTV2ForImageClassification",
+ "MobileViTV2ForSemanticSegmentation",
+ "MobileViTV2Model",
+ "MobileViTV2PreTrainedModel"
+ ]
+ }
+ },
+ "info.aet.modernbert": {
+ "*": {
+ "repo": "answerdotai/ModernBERT-base",
+ "pkg": {
+ "0": {
+ "transformers": "ModernBertModel"
+ }
+ },
+ "tasks": [
+ "ModernBertModel",
+ "ModernBertPreTrainedModel",
+ "ModernBertForMaskedLM",
+ "ModernBertForSequenceClassification",
+ "ModernBertForTokenClassification",
+ "ModernBertForQuestionAnswering",
+ "ModernBertForMultipleChoice"
+ ]
+ }
+ },
+ "info.aet.test-dec": {
+ "*": {
+ "repo": "blab-jhu/test-32m-dec",
+ "pkg": {
+ "0": {
+ "transformers": "ModernBertDecoderModel"
+ }
+ },
+ "tasks": [
+ "ModernBertDecoderModel",
+ "ModernBertDecoderPreTrainedModel",
+ "ModernBertDecoderForCausalLM",
+ "ModernBertDecoderForSequenceClassification"
+ ]
+ }
+ },
+ "info.stst.moonshine": {
+ "*": {
+ "repo": "UsefulSensors/moonshine-tiny",
+ "pkg": {
+ "0": {
+ "transformers": "MoonshineModel"
+ }
+ },
+ "tasks": [
+ "MoonshineModel",
+ "MoonshinePreTrainedModel",
+ "MoonshineForConditionalGeneration"
+ ]
+ }
+ },
+ "info.stst.hf-moshiko": {
+ "*": {
+ "repo": "kmhf/hf-moshiko",
+ "pkg": {
+ "0": {
+ "transformers": "MoshiModel"
+ }
+ },
+ "tasks": [
+ "MoshiForCausalLM",
+ "MoshiForConditionalGeneration",
+ "MoshiModel",
+ "MoshiPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.mpnet": {
+ "*": {
+ "repo": "microsoft/mpnet-base",
+ "pkg": {
+ "0": {
+ "transformers": "MPNetModel"
+ }
+ },
+ "tasks": [
+ "MPNetForMaskedLM",
+ "MPNetForMultipleChoice",
+ "MPNetForQuestionAnswering",
+ "MPNetForSequenceClassification",
+ "MPNetForTokenClassification",
+ "MPNetLayer",
+ "MPNetModel",
+ "MPNetPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.mpt": {
+ "*": {
+ "repo": "mosaicml/mpt-7b",
+ "pkg": {
+ "0": {
+ "transformers": "MptModel"
+ }
+ },
+ "tasks": [
+ "MptForCausalLM",
+ "MptModel",
+ "MptPreTrainedModel",
+ "MptForSequenceClassification",
+ "MptForTokenClassification",
+ "MptForQuestionAnswering"
+ ]
+ }
+ },
+ "info.art.mra-512-4": {
+ "*": {
+ "repo": "uw-madison/mra-base-512-4",
+ "pkg": {
+ "0": {
+ "transformers": "MraModel"
+ }
+ },
+ "tasks": [
+ "MraForMaskedLM",
+ "MraForMultipleChoice",
+ "MraForQuestionAnswering",
+ "MraForSequenceClassification",
+ "MraForTokenClassification",
+ "MraLayer",
+ "MraModel",
+ "MraPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.mt5": {
+ "*": {
+ "repo": "google/mt5-small",
+ "pkg": {
+ "0": {
+ "transformers": "MT5Model"
+ }
+ },
+ "identifiers": [
+ [
+ 250112,
+ 2048
+ ],
+ "text_encoders.mt5xl.transformer.shared.weight"
+ ],
+ "file_256": [
+ "0524484ec81425ba9deef6fac1393a78ba9b1c9bfed704a4be5f9c7255975cc1",
+ "32f70f1d187e131a5fc3e4f0edc97ce89360d8e2f1d90177a443a05296097acc"
+ ],
+ "layer_b3": [
+ "a1d616c37711ec7b9073d04734af2f5fd02f9035a322eb46efeace922e104c51"
+ ],
+ "layer_256": [
+ "bd337daf0c1aa36896013109b406a0580aa3bb8ab9291d89df3015d737358e95",
+ "2e40c48c96fc7df636aad96d3e78ed0ba9f68c3059e21b7fcf917f284c569a61"
+ ],
+ "tasks": [
+ "MT5EncoderModel",
+ "MT5ForConditionalGeneration",
+ "MT5ForQuestionAnswering",
+ "MT5ForSequenceClassification",
+ "MT5ForTokenClassification",
+ "MT5Model",
+ "MT5PreTrainedModel"
+ ]
+ }
+ },
+ "info.art.musicgen": {
+ "*": {
+ "repo": "facebook/musicgen-small",
+ "pkg": {
+ "0": {
+ "transformers": "MusicgenModel"
+ }
+ },
+ "tasks": [
+ "MusicgenForConditionalGeneration",
+ "MusicgenForCausalLM",
+ "MusicgenModel",
+ "MusicgenPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.musicgen-melody": {
+ "*": {
+ "repo": "facebook/musicgen-melody",
+ "pkg": {
+ "0": {
+ "transformers": "MusicgenMelodyModel"
+ }
+ },
+ "tasks": [
+ "MusicgenMelodyForConditionalGeneration",
+ "MusicgenMelodyForCausalLM",
+ "MusicgenMelodyModel",
+ "MusicgenMelodyPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.mvp": {
+ "*": {
+ "repo": "RUCAIBox/mvp",
+ "pkg": {
+ "0": {
+ "transformers": "MvpModel"
+ }
+ },
+ "tasks": [
+ "MvpForCausalLM",
+ "MvpForConditionalGeneration",
+ "MvpForQuestionAnswering",
+ "MvpForSequenceClassification",
+ "MvpModel",
+ "MvpPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.nanochat-d32": {
+ "*": {
+ "repo": "karpathy/nanochat-d32",
+ "pkg": {
+ "0": {
+ "transformers": "NanoChatModel"
+ }
+ },
+ "tasks": [
+ "NanoChatPreTrainedModel",
+ "NanoChatModel",
+ "NanoChatForCausalLM"
+ ]
+ }
+ },
+ "info.stst.nemotron-3-hf": {
+ "*": {
+ "repo": "mgoin/nemotron-3-8b-chat-4k-sft-hf",
+ "pkg": {
+ "0": {
+ "transformers": "NemotronModel"
+ }
+ },
+ "tasks": [
+ "NemotronForQuestionAnswering",
+ "NemotronForCausalLM",
+ "NemotronModel",
+ "NemotronPreTrainedModel",
+ "NemotronForSequenceClassification",
+ "NemotronForTokenClassification"
+ ]
+ }
+ },
+ "info.moe.nllb-moe": {
+ "*": {
+ "repo": "facebook/nllb-moe-54b",
+ "pkg": {
+ "0": {
+ "transformers": "NllbMoeModel"
+ }
+ },
+ "tasks": [
+ "NllbMoeForConditionalGeneration",
+ "NllbMoeModel",
+ "NllbMoePreTrainedModel",
+ "NllbMoeTop2Router",
+ "NllbMoeSparseMLP"
+ ]
}
},
- "info.stst.opus-mt-en-de": {
+ "info.art.nystromformer-512": {
"*": {
- "repo": "Helsinki-NLP/opus-mt-en-de",
+ "repo": "uw-madison/nystromformer-512",
"pkg": {
"0": {
- "transformers": "MarianModel"
+ "transformers": "NystromformerModel"
}
- }
+ },
+ "tasks": [
+ "NystromformerForMaskedLM",
+ "NystromformerForMultipleChoice",
+ "NystromformerForQuestionAnswering",
+ "NystromformerForSequenceClassification",
+ "NystromformerForTokenClassification",
+ "NystromformerLayer",
+ "NystromformerModel",
+ "NystromformerPreTrainedModel"
+ ]
}
},
- "info.art.markuplm": {
+ "info.stst.olmo-hf": {
"*": {
- "repo": "microsoft/markuplm-base",
+ "repo": "allenai/OLMo-7B-hf",
"pkg": {
"0": {
- "transformers": "MarkupLMModel"
+ "transformers": "OlmoModel"
}
- }
+ },
+ "tasks": [
+ "OlmoForCausalLM",
+ "OlmoModel",
+ "OlmoPreTrainedModel"
+ ]
}
},
- "info.vit.swin-patch4-window7-224": {
+ "info.stst.olmo2-1124-hf": {
"*": {
- "repo": "microsoft/swin-tiny-patch4-window7-224",
+ "repo": "allenai/Olmo-2-1124-7B",
"pkg": {
"0": {
- "transformers": "MaskFormerSwinModel"
+ "transformers": "Olmo2Model"
}
- }
+ },
+ "tasks": [
+ "Olmo2ForCausalLM",
+ "Olmo2Model",
+ "Olmo2PreTrainedModel"
+ ]
}
},
- "info.stst.mbart-cc25": {
+ "info.stst.olmo-3-0725": {
"*": {
- "repo": "facebook/mbart-large-cc25",
+ "repo": "allenai/OLMo-3-0725-1B",
"pkg": {
"0": {
- "transformers": "MBartModel"
+ "transformers": "Olmo3Model"
}
- }
+ },
+ "tasks": [
+ "Olmo3ForCausalLM",
+ "Olmo3Model",
+ "Olmo3PreTrainedModel"
+ ]
}
},
- "info.art.megatron-bert-uncased": {
+ "info.moe.olmoe-0924": {
"*": {
- "repo": "nvidia/megatron-bert-uncased-345m",
+ "repo": "allenai/OLMoE-1B-7B-0924",
"pkg": {
"0": {
- "transformers": "MegatronBertModel"
+ "transformers": "OlmoeModel"
}
- }
+ },
+ "tasks": [
+ "OlmoeForCausalLM",
+ "OlmoeModel",
+ "OlmoePreTrainedModel"
+ ]
}
},
- "info.vit.metaclip-2-worldwide-huge-quickgelu": {
+ "info.detr.omdet-turbo-swin-hf": {
"*": {
- "repo": "facebook/metaclip-2-worldwide-huge-quickgelu",
+ "repo": "omlab/omdet-turbo-swin-tiny-hf",
"pkg": {
"0": {
- "transformers": "MetaClip2Model"
+ "transformers": "OmDetTurboForObjectDetection"
}
- }
+ },
+ "tasks": [
+ "OmDetTurboForObjectDetection",
+ "OmDetTurboPreTrainedModel"
+ ]
}
},
- "info.vit.mgp-str": {
+ "info.detr.oneformer-ade-swin": {
"*": {
- "repo": "alibaba-damo/mgp-str-base",
+ "repo": "shi-labs/oneformer_ade20k_swin_tiny",
"pkg": {
"0": {
- "transformers": "MgpstrForSceneTextRecognition"
+ "transformers": "OneFormerModel"
}
- }
+ },
+ "tasks": [
+ "OneFormerForUniversalSegmentation",
+ "OneFormerModel",
+ "OneFormerPreTrainedModel"
+ ]
}
},
- "info.vit.mistral-3-2503": {
+ "info.art.openai-gpt": {
"*": {
- "repo": "mistralai/Mistral-Small-3.1-24B-Instruct-2503",
+ "repo": "openai-community/openai-gpt",
"pkg": {
"0": {
- "transformers": "Mistral3Model"
+ "transformers": "OpenAIGPTModel"
}
- }
+ },
+ "tasks": [
+ "OpenAIGPTDoubleHeadsModel",
+ "OpenAIGPTForSequenceClassification",
+ "OpenAIGPTLMHeadModel",
+ "OpenAIGPTModel",
+ "OpenAIGPTPreTrainedModel"
+ ]
}
},
- "info.vit.mlcd-vit-bigg-patch14-336": {
+ "info.art.opt": {
"*": {
- "repo": "DeepGlint-AI/mlcd-vit-bigG-patch14-336",
+ "repo": "facebook/opt-350m",
"pkg": {
"0": {
- "transformers": "MLCDVisionModel"
+ "transformers": "OPTModel"
}
- }
+ },
+ "tasks": [
+ "OPTForCausalLM",
+ "OPTModel",
+ "OPTPreTrainedModel",
+ "OPTForSequenceClassification",
+ "OPTForQuestionAnswering"
+ ]
}
},
- "info.vit.llama-3-vision": {
+ "info.vit.ovis2-hf": {
"*": {
- "repo": "meta-llama/Llama-3.2-11B-Vision",
+ "repo": "thisisiron/Ovis2-1B-hf",
"pkg": {
"0": {
- "transformers": "MllamaModel"
+ "transformers": "Ovis2Model"
}
- }
+ },
+ "tasks": [
+ "Ovis2PreTrainedModel",
+ "Ovis2Model",
+ "Ovis2ForConditionalGeneration"
+ ]
}
},
- "info.detr.mm-grounding-dino-o365v1-goldg-v3det": {
+ "info.vit.owlv2-patch16": {
"*": {
- "repo": "openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det",
+ "repo": "google/owlv2-base-patch16",
"pkg": {
"0": {
- "transformers": "MMGroundingDinoModel"
+ "transformers": "Owlv2Model"
}
- }
+ },
+ "tasks": [
+ "Owlv2Model",
+ "Owlv2PreTrainedModel",
+ "Owlv2TextModel",
+ "Owlv2VisionModel",
+ "Owlv2ForObjectDetection"
+ ]
}
},
- "info.art.mobilebert-uncased": {
+ "info.vit.owlvit-patch32": {
"*": {
- "repo": "google/mobilebert-uncased",
+ "repo": "google/owlvit-base-patch32",
"pkg": {
"0": {
- "transformers": "MobileBertModel"
+ "transformers": "OwlViTModel"
}
- }
+ },
+ "tasks": [
+ "OwlViTModel",
+ "OwlViTPreTrainedModel",
+ "OwlViTTextModel",
+ "OwlViTVisionModel",
+ "OwlViTForObjectDetection"
+ ]
}
},
- "info.vit.mobilenet-v1-1--224": {
+ "info.vit.paligemma": {
"*": {
- "repo": "google/mobilenet_v1_1.0_224",
+ "repo": "google/paligemma2-3b-mix-224",
"pkg": {
"0": {
- "transformers": "MobileNetV1Model"
+ "transformers": "PaliGemmaModel"
}
- }
+ },
+ "tasks": [
+ "PaliGemmaForConditionalGeneration",
+ "PaliGemmaPreTrainedModel",
+ "PaliGemmaModel"
+ ]
}
},
- "info.vit.mobilenet-v2-1--224": {
+ "info.aet.parakeet-ctc-b": {
"*": {
- "repo": "google/mobilenet_v2_1.0_224",
+ "repo": "nvidia/parakeet-ctc-1.1b",
"pkg": {
"0": {
- "transformers": "MobileNetV2Model"
+ "transformers": "ParakeetForCTC"
}
- }
+ },
+ "tasks": [
+ "ParakeetForCTC",
+ "ParakeetEncoder",
+ "ParakeetPreTrainedModel"
+ ]
}
},
- "info.vit.mobilevit": {
+ "info.stst.parakeet-ctc-b": {
"*": {
- "repo": "apple/mobilevit-small",
+ "repo": "nvidia/parakeet-ctc-1.1b",
"pkg": {
"0": {
- "transformers": "MobileViTModel"
+ "transformers": "ParakeetEncoder"
}
- }
+ },
+ "tasks": [
+ "ParakeetForCTC",
+ "ParakeetEncoder",
+ "ParakeetPreTrainedModel"
+ ]
}
},
- "info.vit.mobilevitv2-1": {
+ "info.mlp.patchtsmixer-etth1-pretrain": {
"*": {
- "repo": "apple/mobilevitv2-1.0-imagenet1k-256",
+ "repo": "ibm/patchtsmixer-etth1-pretrain",
"pkg": {
"0": {
- "transformers": "MobileViTV2Model"
+ "transformers": "PatchTSMixerModel"
}
- }
+ },
+ "tasks": [
+ "PatchTSMixerPreTrainedModel",
+ "PatchTSMixerModel",
+ "PatchTSMixerForPretraining",
+ "PatchTSMixerForPrediction",
+ "PatchTSMixerForTimeSeriesClassification",
+ "PatchTSMixerForRegression"
+ ]
}
},
- "info.aet.modernbert": {
+ "info.art.patchtst": {
"*": {
- "repo": "answerdotai/ModernBERT-base",
+ "repo": "ibm/patchtst",
"pkg": {
"0": {
- "transformers": "ModernBertModel"
+ "transformers": "PatchTSTModel"
}
- }
+ },
+ "tasks": [
+ "PatchTSTModel",
+ "PatchTSTPreTrainedModel",
+ "PatchTSTForPrediction",
+ "PatchTSTForPretraining",
+ "PatchTSTForRegression",
+ "PatchTSTForClassification"
+ ]
}
},
- "info.art.mpnet": {
+ "info.stst.pe-av": {
"*": {
- "repo": "microsoft/mpnet-base",
+ "repo": "facebook/pe-av-large",
"pkg": {
"0": {
- "transformers": "MPNetModel"
+ "transformers": "PeAudioModel"
}
- }
+ },
+ "tasks": [
+ "PeAudioFrameLevelModel",
+ "PeAudioModel",
+ "PeAudioEncoder"
+ ]
}
},
- "info.art.mpt": {
+ "info.aet.pe-av": {
"*": {
- "repo": "mosaicml/mpt-7b",
+ "repo": "facebook/pe-av-large",
"pkg": {
"0": {
- "transformers": "MptModel"
+ "transformers": "PeAudioVideoModel"
}
- }
+ },
+ "tasks": [
+ "PeAudioVideoModel",
+ "PeAudioVideoEncoder"
+ ]
}
},
- "info.art.mra-512-4": {
+ "info.vit.pe-av": {
"*": {
- "repo": "uw-madison/mra-base-512-4",
+ "repo": "facebook/pe-av-large",
"pkg": {
"0": {
- "transformers": "MraModel"
+ "transformers": "PeVideoEncoder"
}
- }
+ },
+ "tasks": [
+ "PeVideoEncoder",
+ "PeVideoModel"
+ ]
}
},
- "info.stst.mt5": {
+ "info.stst.pegasus": {
"*": {
- "repo": "google/mt5-small",
+ "repo": "google/pegasus-large",
"pkg": {
"0": {
- "transformers": "MT5Model"
+ "transformers": "PegasusModel"
}
- }
+ },
+ "tasks": [
+ "PegasusForCausalLM",
+ "PegasusForConditionalGeneration",
+ "PegasusModel",
+ "PegasusPreTrainedModel"
+ ]
}
},
- "info.art.musicgen": {
+ "info.stst.pegasus-x": {
"*": {
- "repo": "facebook/musicgen-small",
+ "repo": "google/pegasus-x-large",
"pkg": {
"0": {
- "transformers": "MusicgenModel"
+ "transformers": "PegasusXModel"
}
- }
+ },
+ "tasks": [
+ "PegasusXForConditionalGeneration",
+ "PegasusXModel",
+ "PegasusXPreTrainedModel"
+ ]
}
},
- "info.art.musicgen-melody": {
+ "info.vit.language-perceiver": {
"*": {
- "repo": "facebook/musicgen-melody",
+ "repo": "deepmind/language-perceiver",
"pkg": {
"0": {
- "transformers": "MusicgenMelodyModel"
+ "transformers": "PerceiverModel"
}
- }
+ },
+ "tasks": [
+ "PerceiverForImageClassificationConvProcessing",
+ "PerceiverForImageClassificationFourier",
+ "PerceiverForImageClassificationLearned",
+ "PerceiverForMaskedLM",
+ "PerceiverForMultimodalAutoencoding",
+ "PerceiverForOpticalFlow",
+ "PerceiverForSequenceClassification",
+ "PerceiverLayer",
+ "PerceiverModel",
+ "PerceiverPreTrainedModel"
+ ]
}
},
- "info.stst.mvp": {
+ "info.vit.perception-lm": {
"*": {
- "repo": "RUCAIBox/mvp",
+ "repo": "facebook/Perception-LM-1B",
"pkg": {
"0": {
- "transformers": "MvpModel"
+ "transformers": "PerceptionLMModel"
}
- }
+ },
+ "tasks": [
+ "PerceptionLMForConditionalGeneration",
+ "PerceptionLMPreTrainedModel",
+ "PerceptionLMModel"
+ ]
}
},
- "info.stst.nllb-moe": {
+ "info.stst.persimmon": {
"*": {
- "repo": "facebook/nllb-moe-54b",
+ "repo": "adept/persimmon-8b-base",
"pkg": {
"0": {
- "transformers": "NllbMoeModel"
+ "transformers": "PersimmonModel"
}
- }
+ },
+ "tasks": [
+ "PersimmonForCausalLM",
+ "PersimmonModel",
+ "PersimmonPreTrainedModel",
+ "PersimmonForSequenceClassification",
+ "PersimmonForTokenClassification"
+ ]
}
},
- "info.art.nystromformer-512": {
+ "info.stst.phi-1": {
"*": {
- "repo": "uw-madison/nystromformer-512",
+ "repo": "microsoft/phi-1",
"pkg": {
"0": {
- "transformers": "NystromformerModel"
+ "transformers": "PhiModel"
}
- }
+ },
+ "tasks": [
+ "PhiPreTrainedModel",
+ "PhiModel",
+ "PhiForCausalLM",
+ "PhiForSequenceClassification",
+ "PhiForTokenClassification"
+ ]
}
},
- "info.detr.omdet-turbo-swin-hf": {
+ "info.stst.phi-3": {
"*": {
- "repo": "omlab/omdet-turbo-swin-tiny-hf",
+ "repo": "microsoft/Phi-3-mini-4k-instruct",
"pkg": {
"0": {
- "transformers": "OmDetTurboForObjectDetection"
+ "transformers": "Phi3Model"
}
- }
+ },
+ "tasks": [
+ "Phi3PreTrainedModel",
+ "Phi3Model",
+ "Phi3ForCausalLM",
+ "Phi3ForSequenceClassification",
+ "Phi3ForTokenClassification"
+ ]
}
},
- "info.art.openai-gpt": {
+ "info.vit.phi-4": {
"*": {
- "repo": "openai-community/openai-gpt",
+ "repo": "microsoft/Phi-4-multimodal-instruct",
"pkg": {
"0": {
- "transformers": "OpenAIGPTModel"
+ "transformers": "Phi4MultimodalModel"
}
- }
+ },
+ "file_256": [
+ "bc703090b63eda16f639fa4de7ac54635c23105ab1da2f6ec4d3403151d38ee6"
+ ],
+ "layer_b3": [
+ "cf4add4ada6082f448788eaf2937f645b5212db88e06ee81475b8be0e99063dc"
+ ],
+ "layer_256": [
+ "7ff992b780b2f8993dd6bb9612207943638b2a42badc976ce80893bc205e801b"
+ ],
+ "tasks": [
+ "Phi4MultimodalAudioPreTrainedModel",
+ "Phi4MultimodalAudioModel",
+ "Phi4MultimodalVisionPreTrainedModel",
+ "Phi4MultimodalVisionModel",
+ "Phi4MultimodalPreTrainedModel",
+ "Phi4MultimodalModel",
+ "Phi4MultimodalForCausalLM"
+ ]
}
},
- "info.art.opt": {
+ "info.moe.phi-3-moe": {
"*": {
- "repo": "facebook/opt-350m",
+ "repo": "microsoft/Phi-3.5-MoE-instruct",
"pkg": {
"0": {
- "transformers": "OPTModel"
+ "transformers": "PhimoeModel"
}
- }
+ },
+ "tasks": [
+ "PhimoePreTrainedModel",
+ "PhimoeModel",
+ "PhimoeForCausalLM",
+ "PhimoeForSequenceClassification"
+ ]
}
},
- "info.vit.ovis2-hf": {
+ "info.vit.pixio-huge": {
"*": {
- "repo": "thisisiron/Ovis2-1B-hf",
+ "repo": "facebook/pixio-huge",
"pkg": {
"0": {
- "transformers": "Ovis2Model"
+ "transformers": "PixioModel"
}
- }
+ },
+ "tasks": [
+ "PixioModel",
+ "PixioPreTrainedModel",
+ "PixioBackbone"
+ ]
}
},
- "info.vit.owlv2-patch16": {
+ "info.vit.pixtral": {
"*": {
- "repo": "google/owlv2-base-patch16",
+ "repo": "mistralai/Pixtral-12B-Base-2409",
"pkg": {
"0": {
- "transformers": "Owlv2Model"
+ "transformers": "PixtralVisionModel"
}
- }
+ },
+ "tasks": [
+ "PixtralVisionModel",
+ "PixtralPreTrainedModel"
+ ]
}
},
- "info.vit.owlvit-patch32": {
+ "info.stst.plbart": {
"*": {
- "repo": "google/owlvit-base-patch32",
+ "repo": "uclanlp/plbart-base",
"pkg": {
"0": {
- "transformers": "OwlViTModel"
+ "transformers": "PLBartModel"
}
- }
+ },
+ "tasks": [
+ "PLBartForCausalLM",
+ "PLBartForConditionalGeneration",
+ "PLBartForSequenceClassification",
+ "PLBartModel",
+ "PLBartPreTrainedModel"
+ ]
}
},
- "info.vit.paligemma": {
+ "info.vit.poolformer-s12": {
"*": {
- "repo": "google/paligemma2-3b-mix-224",
+ "repo": "sail/poolformer_s12",
"pkg": {
"0": {
- "transformers": "PaliGemmaModel"
+ "transformers": "PoolFormerModel"
}
- }
+ },
+ "tasks": [
+ "PoolFormerForImageClassification",
+ "PoolFormerModel",
+ "PoolFormerPreTrainedModel"
+ ]
}
},
- "info.aet.parakeet-ctc-b": {
+ "info.stst.phetnet-uncased": {
"*": {
- "repo": "nvidia/parakeet-ctc-1.1b",
+ "repo": "microsoft/prophetnet-large-uncased",
"pkg": {
"0": {
- "transformers": "ParakeetForCTC"
+ "transformers": "ProphetNetModel"
}
- }
+ },
+ "tasks": [
+ "ProphetNetDecoder",
+ "ProphetNetEncoder",
+ "ProphetNetForCausalLM",
+ "ProphetNetForConditionalGeneration",
+ "ProphetNetModel",
+ "ProphetNetPreTrainedModel"
+ ]
}
},
- "info.stst.parakeet-ctc-b": {
+ "info.vit.pvt-224": {
"*": {
- "repo": "nvidia/parakeet-ctc-1.1b",
+ "repo": "Xrenya/pvt-tiny-224",
"pkg": {
"0": {
- "transformers": "ParakeetEncoder"
+ "transformers": "PvtModel"
}
- }
+ },
+ "tasks": [
+ "PvtForImageClassification",
+ "PvtModel",
+ "PvtPreTrainedModel"
+ ]
}
},
- "info.stst.pe-av": {
+ "info.vit.pvt-v2-b0": {
"*": {
- "repo": "facebook/pe-av-large",
+ "repo": "OpenGVLab/pvt_v2_b0",
"pkg": {
"0": {
- "transformers": "PeAudioModel"
+ "transformers": "PvtV2Model"
}
- }
+ },
+ "tasks": [
+ "PvtV2ForImageClassification",
+ "PvtV2Model",
+ "PvtV2PreTrainedModel",
+ "PvtV2Backbone"
+ ]
}
},
- "info.aet.pe-av": {
+ "info.stst.qwen2": {
"*": {
- "repo": "facebook/pe-av-large",
+ "repo": "Qwen/Qwen2-7B",
"pkg": {
"0": {
- "transformers": "PeAudioVideoModel"
+ "transformers": "Qwen2Model"
}
- }
+ },
+ "tasks": [
+ "Qwen2PreTrainedModel",
+ "Qwen2Model",
+ "Qwen2ForCausalLM",
+ "Qwen2RMSNorm",
+ "Qwen2ForSequenceClassification",
+ "Qwen2ForTokenClassification",
+ "Qwen2ForQuestionAnswering"
+ ]
}
},
- "info.stst.pegasus": {
+ "info.vit.qwen2-vl": {
"*": {
- "repo": "google/pegasus-large",
+ "repo": "Qwen/Qwen2-VL-7B-Instruct",
"pkg": {
"0": {
- "transformers": "PegasusModel"
+ "transformers": "Qwen2_5_VLModel"
}
- }
+ },
+ "tasks": [
+ "Qwen2_5_VLForConditionalGeneration",
+ "Qwen2_5_VLModel",
+ "Qwen2_5_VLPreTrainedModel",
+ "Qwen2_5_VLTextModel"
+ ]
}
},
- "info.stst.pegasus-x": {
+ "info.stst.qwen2-vl": {
"*": {
- "repo": "google/pegasus-x-large",
+ "repo": "Qwen/Qwen2-VL-7B-Instruct",
"pkg": {
"0": {
- "transformers": "PegasusXModel"
+ "transformers": "Qwen2_5_VLTextModel"
}
- }
+ },
+ "file_256": [
+ "1f48ac458d6fbd0aec53a116065a7ee3f1d34bddde544e25c16a05c9d5392b78",
+ "0e85c7111ce849293e97aa09ce1172352ecece023a3ecea7ac8311e326b47f3a",
+ "d725335e4ea2399be706469e4b8807716a8fa64bd03468252e9f7acf2415fee4",
+ "e10bd9583a77250376d9134cd6b46799029dfa3b4d7989c1050b3ec149cc7cf5"
+ ],
+ "layer_b3": [
+ "e4f681bde70a753f30f83495a2aa340d251bf3d818eb5a1cbe58f85fd6ea0d40",
+ "47b062ce8ddb14845fb1a71d2fd88fd52a82e26561ba3eb05be057915a867775",
+ "b6386f70b528ffa9e09fdd8db8a7b91a7c462ed97b06963576c6139e25fdcf31",
+ "4cd449df9f9004a7e53005583a7e4cfa6de42912f03647d2ea799d489e9c1406"
+ ],
+ "layer_256": [
+ "ed36a4a11c4ebebb10d1e010cb93e2e43fcaf975cd42bb6c9958537593d0d44d",
+ "f7f6f64e7b6d7826400a2fc0eef942a47c47bd5914e051ad0c8cd9ff5ff7982b",
+ "f341ed0f792cf0570ceb21d3b64ed14bf9875e9fcb90116851364eeed683a6ca",
+ "ba031d0da78afe24ae63558ad29b8028244a7bd4750a5615dab9079fe32a5fd7"
+ ],
+ "tasks": [
+ "Qwen2_5_VLForConditionalGeneration",
+ "Qwen2_5_VLModel",
+ "Qwen2_5_VLPreTrainedModel",
+ "Qwen2_5_VLTextModel"
+ ]
}
},
- "info.vit.language-perceiver": {
+ "info.aet.qwen2-audio": {
"*": {
- "repo": "deepmind/language-perceiver",
+ "repo": "Qwen/Qwen2-Audio-7B",
"pkg": {
"0": {
- "transformers": "PerceiverModel"
+ "transformers": "Qwen2AudioEncoder"
}
- }
+ },
+ "tasks": [
+ "Qwen2AudioForConditionalGeneration",
+ "Qwen2AudioPreTrainedModel",
+ "Qwen2AudioEncoder"
+ ]
}
},
- "info.vit.perception-lm": {
+ "info.moe.qwen15-moe-a": {
"*": {
- "repo": "facebook/Perception-LM-1B",
+ "repo": "Qwen/Qwen1.5-MoE-A2.7B",
"pkg": {
"0": {
- "transformers": "PerceptionLMModel"
+ "transformers": "Qwen2MoeModel"
}
- }
+ },
+ "tasks": [
+ "Qwen2MoeForCausalLM",
+ "Qwen2MoeForQuestionAnswering",
+ "Qwen2MoeModel",
+ "Qwen2MoePreTrainedModel",
+ "Qwen2MoeForSequenceClassification",
+ "Qwen2MoeForTokenClassification"
+ ]
}
},
- "info.vit.pixio-huge": {
+ "info.stst.qwen3": {
"*": {
- "repo": "facebook/pixio-huge",
+ "repo": "Qwen/Qwen3-8B",
"pkg": {
"0": {
- "transformers": "PixioModel"
+ "transformers": "Qwen3Model"
}
- }
+ },
+ "tasks": [
+ "Qwen3ForCausalLM",
+ "Qwen3ForQuestionAnswering",
+ "Qwen3PreTrainedModel",
+ "Qwen3Model",
+ "Qwen3ForSequenceClassification",
+ "Qwen3ForTokenClassification"
+ ]
}
},
- "info.stst.plbart": {
+ "info.moe.qwen3-a": {
"*": {
- "repo": "uclanlp/plbart-base",
+ "repo": "Qwen/Qwen3-30B-A3B",
"pkg": {
"0": {
- "transformers": "PLBartModel"
+ "transformers": "Qwen3MoeModel"
}
- }
+ },
+ "file_256": [
+ "c56947057481fb5e7cdf766e442da81717b34addc88bbe8f3728fd25bd03cbae"
+ ],
+ "layer_b3": [
+ "d2d1e0875202f5c9c84c781a2105620250733bd01832f67b2c17bc981d1eb508"
+ ],
+ "layer_256": [
+ "408c01da57c4968b7b0e36d98a74e321153e7aeb058fea63ffd140e323526476"
+ ],
+ "tasks": [
+ "Qwen3MoeForCausalLM",
+ "Qwen3MoeForQuestionAnswering",
+ "Qwen3MoeModel",
+ "Qwen3MoePreTrainedModel",
+ "Qwen3MoeForSequenceClassification",
+ "Qwen3MoeForTokenClassification"
+ ]
}
},
- "info.vit.poolformer-s12": {
+ "info.moe.qwen3-next-a": {
"*": {
- "repo": "sail/poolformer_s12",
+ "repo": "Qwen/Qwen3-Next-80B-A3B-Instruct",
"pkg": {
"0": {
- "transformers": "PoolFormerModel"
+ "transformers": "Qwen3NextModel"
}
- }
+ },
+ "tasks": [
+ "Qwen3NextForCausalLM",
+ "Qwen3NextForQuestionAnswering",
+ "Qwen3NextModel",
+ "Qwen3NextPreTrainedModel",
+ "Qwen3NextForSequenceClassification",
+ "Qwen3NextForTokenClassification"
+ ]
}
},
- "info.vit.pvt-v2-b0": {
+ "info.vit.qwen3-vl": {
"*": {
- "repo": "OpenGVLab/pvt_v2_b0",
+ "repo": "Qwen/Qwen3-VL-4B-Instruct",
"pkg": {
"0": {
- "transformers": "PvtV2Model"
+ "transformers": "Qwen3VLModel"
}
- }
+ },
+ "tasks": [
+ "Qwen3VLVisionModel",
+ "Qwen3VLForConditionalGeneration",
+ "Qwen3VLModel",
+ "Qwen3VLPreTrainedModel",
+ "Qwen3VLTextModel"
+ ]
}
},
- "info.vit.qwen2-vl": {
+ "info.vit.qwen3-vl-a": {
"*": {
- "repo": "Qwen/Qwen2-VL-7B-Instruct",
+ "repo": "Qwen/Qwen3-VL-30B-A3B-Instruct",
"pkg": {
"0": {
- "transformers": "Qwen2_5_VLModel"
+ "transformers": "Qwen3VLMoeModel"
}
- }
+ },
+ "tasks": [
+ "Qwen3VLMoeVisionModel",
+ "Qwen3VLMoeForConditionalGeneration",
+ "Qwen3VLMoeModel",
+ "Qwen3VLMoePreTrainedModel",
+ "Qwen3VLMoeTextModel"
+ ]
}
},
- "info.aet.qwen2-audio": {
+ "info.moe.qwen3-vl-a": {
"*": {
- "repo": "Qwen/Qwen2-Audio-7B",
+ "repo": "Qwen/Qwen3-VL-30B-A3B-Instruct",
"pkg": {
"0": {
- "transformers": "Qwen2AudioEncoder"
+ "transformers": "Qwen3VLMoeTextModel"
}
- }
+ },
+ "tasks": [
+ "Qwen3VLMoeVisionModel",
+ "Qwen3VLMoeForConditionalGeneration",
+ "Qwen3VLMoeModel",
+ "Qwen3VLMoePreTrainedModel",
+ "Qwen3VLMoeTextModel"
+ ]
}
},
- "info.vit.qwen3-vl": {
+ "info.stst.qwen3-vl": {
"*": {
"repo": "Qwen/Qwen3-VL-4B-Instruct",
"pkg": {
"0": {
- "transformers": "Qwen3VLModel"
+ "transformers": "Qwen3VLTextModel"
}
- }
+ },
+ "tasks": [
+ "Qwen3VLVisionModel",
+ "Qwen3VLForConditionalGeneration",
+ "Qwen3VLModel",
+ "Qwen3VLPreTrainedModel",
+ "Qwen3VLTextModel"
+ ]
}
},
- "info.vit.qwen3-vl-a": {
+ "info.rnn.recurrentgemma": {
"*": {
- "repo": "Qwen/Qwen3-VL-30B-A3B-Instruct",
+ "repo": "google/recurrentgemma-2b",
"pkg": {
"0": {
- "transformers": "Qwen3VLMoeModel"
+ "transformers": "RecurrentGemmaModel"
}
- }
+ },
+ "tasks": [
+ "RecurrentGemmaForCausalLM",
+ "RecurrentGemmaModel",
+ "RecurrentGemmaPreTrainedModel"
+ ]
}
},
"info.art.reformer-crime-and-punishment": {
@@ -4364,7 +10724,17 @@
"0": {
"transformers": "ReformerModel"
}
- }
+ },
+ "tasks": [
+ "ReformerAttention",
+ "ReformerForMaskedLM",
+ "ReformerForQuestionAnswering",
+ "ReformerForSequenceClassification",
+ "ReformerLayer",
+ "ReformerModel",
+ "ReformerModelWithLMHead",
+ "ReformerPreTrainedModel"
+ ]
}
},
"info.vit.regnet-y-040": {
@@ -4374,7 +10744,12 @@
"0": {
"transformers": "RegNetModel"
}
- }
+ },
+ "tasks": [
+ "RegNetForImageClassification",
+ "RegNetModel",
+ "RegNetPreTrainedModel"
+ ]
}
},
"info.art.rembert": {
@@ -4384,7 +10759,18 @@
"0": {
"transformers": "RemBertModel"
}
- }
+ },
+ "tasks": [
+ "RemBertForCausalLM",
+ "RemBertForMaskedLM",
+ "RemBertForMultipleChoice",
+ "RemBertForQuestionAnswering",
+ "RemBertForSequenceClassification",
+ "RemBertForTokenClassification",
+ "RemBertLayer",
+ "RemBertModel",
+ "RemBertPreTrainedModel"
+ ]
}
},
"info.vit.resnet-50": {
@@ -4394,7 +10780,13 @@
"0": {
"transformers": "ResNetModel"
}
- }
+ },
+ "tasks": [
+ "ResNetForImageClassification",
+ "ResNetModel",
+ "ResNetPreTrainedModel",
+ "ResNetBackbone"
+ ]
}
},
"info.art.roberta": {
@@ -4404,7 +10796,17 @@
"0": {
"transformers": "RobertaModel"
}
- }
+ },
+ "tasks": [
+ "RobertaForCausalLM",
+ "RobertaForMaskedLM",
+ "RobertaForMultipleChoice",
+ "RobertaForQuestionAnswering",
+ "RobertaForSequenceClassification",
+ "RobertaForTokenClassification",
+ "RobertaModel",
+ "RobertaPreTrainedModel"
+ ]
}
},
"info.art.efficient-mlm-m0-0": {
@@ -4414,7 +10816,17 @@
"0": {
"transformers": "RobertaPreLayerNormModel"
}
- }
+ },
+ "tasks": [
+ "RobertaPreLayerNormForCausalLM",
+ "RobertaPreLayerNormForMaskedLM",
+ "RobertaPreLayerNormForMultipleChoice",
+ "RobertaPreLayerNormForQuestionAnswering",
+ "RobertaPreLayerNormForSequenceClassification",
+ "RobertaPreLayerNormForTokenClassification",
+ "RobertaPreLayerNormModel",
+ "RobertaPreLayerNormPreTrainedModel"
+ ]
}
},
"info.art.roc-bert-zh": {
@@ -4424,7 +10836,19 @@
"0": {
"transformers": "RoCBertModel"
}
- }
+ },
+ "tasks": [
+ "RoCBertForCausalLM",
+ "RoCBertForMaskedLM",
+ "RoCBertForMultipleChoice",
+ "RoCBertForPreTraining",
+ "RoCBertForQuestionAnswering",
+ "RoCBertForSequenceClassification",
+ "RoCBertForTokenClassification",
+ "RoCBertLayer",
+ "RoCBertModel",
+ "RoCBertPreTrainedModel"
+ ]
}
},
"info.art.roformer-chinese": {
@@ -4434,7 +10858,18 @@
"0": {
"transformers": "RoFormerModel"
}
- }
+ },
+ "tasks": [
+ "RoFormerForCausalLM",
+ "RoFormerForMaskedLM",
+ "RoFormerForMultipleChoice",
+ "RoFormerForQuestionAnswering",
+ "RoFormerForSequenceClassification",
+ "RoFormerForTokenClassification",
+ "RoFormerLayer",
+ "RoFormerModel",
+ "RoFormerPreTrainedModel"
+ ]
}
},
"info.detr.rtdetr-r50vd": {
@@ -4444,7 +10879,12 @@
"0": {
"transformers": "RTDetrModel"
}
- }
+ },
+ "tasks": [
+ "RTDetrForObjectDetection",
+ "RTDetrModel",
+ "RTDetrPreTrainedModel"
+ ]
}
},
"info.detr.rtdetr-r18vd": {
@@ -4454,7 +10894,12 @@
"0": {
"transformers": "RTDetrV2Model"
}
- }
+ },
+ "tasks": [
+ "RTDetrV2Model",
+ "RTDetrV2PreTrainedModel",
+ "RTDetrV2ForObjectDetection"
+ ]
}
},
"info.rnn.rwkv-4-pile": {
@@ -4464,7 +10909,12 @@
"0": {
"transformers": "RwkvModel"
}
- }
+ },
+ "tasks": [
+ "RwkvForCausalLM",
+ "RwkvModel",
+ "RwkvPreTrainedModel"
+ ]
}
},
"info.vit.sam-vit-huge": {
@@ -4474,7 +10924,12 @@
"0": {
"transformers": "SamModel"
}
- }
+ },
+ "tasks": [
+ "SamVisionModel",
+ "SamModel",
+ "SamPreTrainedModel"
+ ]
}
},
"info.vit.sam2-hiera": {
@@ -4484,7 +10939,13 @@
"0": {
"transformers": "Sam2Model"
}
- }
+ },
+ "tasks": [
+ "Sam2Model",
+ "Sam2VisionModel",
+ "Sam2PreTrainedModel",
+ "Sam2HieraDetModel"
+ ]
}
},
"info.vit.sam3": {
@@ -4494,7 +10955,13 @@
"0": {
"transformers": "Sam3Model"
}
- }
+ },
+ "tasks": [
+ "Sam3Model",
+ "Sam3VisionModel",
+ "Sam3ViTModel",
+ "Sam3PreTrainedModel"
+ ]
}
},
"info.vit.sam3-tracker1-hiera": {
@@ -4504,7 +10971,11 @@
"0": {
"transformers": "Sam3TrackerModel"
}
- }
+ },
+ "tasks": [
+ "Sam3TrackerModel",
+ "Sam3TrackerPreTrainedModel"
+ ]
}
},
"info.stst.sam3": {
@@ -4514,7 +10985,13 @@
"0": {
"transformers": "Sam3VideoModel"
}
- }
+ },
+ "tasks": [
+ "Sam3VideoModel",
+ "Sam3VideoPreTrainedModel",
+ "Sam3VideoInferenceSession",
+ "Sam3VideoSegmentationOutput"
+ ]
}
},
"info.vit.sam-hq-vit-h": {
@@ -4524,7 +11001,12 @@
"0": {
"transformers": "SamHQModel"
}
- }
+ },
+ "tasks": [
+ "SamHQModel",
+ "SamHQPreTrainedModel",
+ "SamHQVisionModel"
+ ]
}
},
"info.vit.sam-hq-vit-huge": {
@@ -4534,7 +11016,12 @@
"0": {
"transformers": "SamHQVisionModel"
}
- }
+ },
+ "tasks": [
+ "SamHQModel",
+ "SamHQPreTrainedModel",
+ "SamHQVisionModel"
+ ]
}
},
"info.aet.hf-seamless-m4t": {
@@ -4544,7 +11031,19 @@
"0": {
"transformers": "SeamlessM4TModel"
}
- }
+ },
+ "tasks": [
+ "SeamlessM4TForTextToSpeech",
+ "SeamlessM4TForSpeechToSpeech",
+ "SeamlessM4TForTextToText",
+ "SeamlessM4TForSpeechToText",
+ "SeamlessM4TModel",
+ "SeamlessM4TPreTrainedModel",
+ "SeamlessM4TCodeHifiGan",
+ "SeamlessM4THifiGan",
+ "SeamlessM4TTextToUnitForConditionalGeneration",
+ "SeamlessM4TTextToUnitModel"
+ ]
}
},
"info.stst.seamless-m4t-v2": {
@@ -4554,7 +11053,33 @@
"0": {
"transformers": "SeamlessM4Tv2Model"
}
- }
+ },
+ "tasks": [
+ "SeamlessM4Tv2ForTextToSpeech",
+ "SeamlessM4Tv2ForSpeechToSpeech",
+ "SeamlessM4Tv2ForTextToText",
+ "SeamlessM4Tv2ForSpeechToText",
+ "SeamlessM4Tv2Model",
+ "SeamlessM4Tv2PreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.seedoss": {
+ "*": {
+ "repo": "ByteDance-Seed/SeedOss-36B",
+ "pkg": {
+ "0": {
+ "transformers": "SeedOssModel"
+ }
+ },
+ "tasks": [
+ "SeedOssForCausalLM",
+ "SeedOssForQuestionAnswering",
+ "SeedOssPreTrainedModel",
+ "SeedOssModel",
+ "SeedOssForSequenceClassification",
+ "SeedOssForTokenClassification"
+ ]
}
},
"info.vit.segformer-b0-finetuned-ade-512-512": {
@@ -4564,7 +11089,15 @@
"0": {
"transformers": "SegformerModel"
}
- }
+ },
+ "tasks": [
+ "SegformerDecodeHead",
+ "SegformerForImageClassification",
+ "SegformerForSemanticSegmentation",
+ "SegformerLayer",
+ "SegformerModel",
+ "SegformerPreTrainedModel"
+ ]
}
},
"info.vit.seggpt-vit": {
@@ -4574,7 +11107,12 @@
"0": {
"transformers": "SegGptModel"
}
- }
+ },
+ "tasks": [
+ "SegGptModel",
+ "SegGptPreTrainedModel",
+ "SegGptForImageSegmentation"
+ ]
}
},
"info.aet.sew": {
@@ -4584,7 +11122,13 @@
"0": {
"transformers": "SEWModel"
}
- }
+ },
+ "tasks": [
+ "SEWForCTC",
+ "SEWForSequenceClassification",
+ "SEWModel",
+ "SEWPreTrainedModel"
+ ]
}
},
"info.aet.sew-d": {
@@ -4594,7 +11138,13 @@
"0": {
"transformers": "SEWDModel"
}
- }
+ },
+ "tasks": [
+ "SEWDForCTC",
+ "SEWDForSequenceClassification",
+ "SEWDModel",
+ "SEWDPreTrainedModel"
+ ]
}
},
"info.vit.siglip2-patch16-224": {
@@ -4604,7 +11154,14 @@
"0": {
"transformers": "Siglip2Model"
}
- }
+ },
+ "tasks": [
+ "Siglip2Model",
+ "Siglip2PreTrainedModel",
+ "Siglip2TextModel",
+ "Siglip2VisionModel",
+ "Siglip2ForImageClassification"
+ ]
}
},
"info.vit.siglip2-patch16-naflex": {
@@ -4614,7 +11171,32 @@
"0": {
"transformers": "Siglip2VisionModel"
}
- }
+ },
+ "tasks": [
+ "Siglip2Model",
+ "Siglip2PreTrainedModel",
+ "Siglip2TextModel",
+ "Siglip2VisionModel",
+ "Siglip2ForImageClassification"
+ ]
+ }
+ },
+ "info.stst.smollm3": {
+ "*": {
+ "repo": "HuggingFaceTB/SmolLM3-3B",
+ "pkg": {
+ "0": {
+ "transformers": "SmolLM3Model"
+ }
+ },
+ "tasks": [
+ "SmolLM3PreTrainedModel",
+ "SmolLM3Model",
+ "SmolLM3ForCausalLM",
+ "SmolLM3ForSequenceClassification",
+ "SmolLM3ForTokenClassification",
+ "SmolLM3ForQuestionAnswering"
+ ]
}
},
"info.vit.smolvlm": {
@@ -4624,7 +11206,13 @@
"0": {
"transformers": "SmolVLMModel"
}
- }
+ },
+ "tasks": [
+ "SmolVLMForConditionalGeneration",
+ "SmolVLMPreTrainedModel",
+ "SmolVLMModel",
+ "SmolVLMVisionTransformer"
+ ]
}
},
"info.vit.siglip-so-patch14-384": {
@@ -4634,7 +11222,13 @@
"0": {
"transformers": "SmolVLMVisionTransformer"
}
- }
+ },
+ "tasks": [
+ "SmolVLMForConditionalGeneration",
+ "SmolVLMPreTrainedModel",
+ "SmolVLMModel",
+ "SmolVLMVisionTransformer"
+ ]
}
},
"info.aet.s2t-librispeech-asr": {
@@ -4644,7 +11238,12 @@
"0": {
"transformers": "Speech2TextModel"
}
- }
+ },
+ "tasks": [
+ "Speech2TextForConditionalGeneration",
+ "Speech2TextModel",
+ "Speech2TextPreTrainedModel"
+ ]
}
},
"info.stst.speecht5-asr": {
@@ -4654,7 +11253,15 @@
"0": {
"transformers": "SpeechT5Model"
}
- }
+ },
+ "tasks": [
+ "SpeechT5ForSpeechToText",
+ "SpeechT5ForSpeechToSpeech",
+ "SpeechT5ForTextToSpeech",
+ "SpeechT5Model",
+ "SpeechT5PreTrainedModel",
+ "SpeechT5HifiGan"
+ ]
}
},
"info.art.splinter": {
@@ -4664,17 +11271,68 @@
"0": {
"transformers": "SplinterModel"
}
- }
+ },
+ "tasks": [
+ "SplinterForQuestionAnswering",
+ "SplinterForPreTraining",
+ "SplinterLayer",
+ "SplinterModel",
+ "SplinterPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.squeezebert-uncased": {
+ "*": {
+ "repo": "squeezebert/squeezebert-uncased",
+ "pkg": {
+ "0": {
+ "transformers": "SqueezeBertModel"
+ }
+ },
+ "tasks": [
+ "SqueezeBertForMaskedLM",
+ "SqueezeBertForMultipleChoice",
+ "SqueezeBertForQuestionAnswering",
+ "SqueezeBertForSequenceClassification",
+ "SqueezeBertForTokenClassification",
+ "SqueezeBertModel",
+ "SqueezeBertModule",
+ "SqueezeBertPreTrainedModel"
+ ]
+ }
+ },
+ "info.stst.stablelm-4e1t": {
+ "*": {
+ "repo": "stabilityai/stablelm-3b-4e1t",
+ "pkg": {
+ "0": {
+ "transformers": "StableLmModel"
+ }
+ },
+ "tasks": [
+ "StableLmForCausalLM",
+ "StableLmModel",
+ "StableLmPreTrainedModel",
+ "StableLmForSequenceClassification",
+ "StableLmForTokenClassification"
+ ]
}
},
- "info.art.squeezebert-uncased": {
+ "info.stst.starcoder2": {
"*": {
- "repo": "squeezebert/squeezebert-uncased",
+ "repo": "bigcode/starcoder2-7b",
"pkg": {
"0": {
- "transformers": "SqueezeBertModel"
+ "transformers": "Starcoder2Model"
}
- }
+ },
+ "tasks": [
+ "Starcoder2ForCausalLM",
+ "Starcoder2Model",
+ "Starcoder2PreTrainedModel",
+ "Starcoder2ForSequenceClassification",
+ "Starcoder2ForTokenClassification"
+ ]
}
},
"info.vit.swiftformer-xs": {
@@ -4684,7 +11342,12 @@
"0": {
"transformers": "SwiftFormerModel"
}
- }
+ },
+ "tasks": [
+ "SwiftFormerForImageClassification",
+ "SwiftFormerModel",
+ "SwiftFormerPreTrainedModel"
+ ]
}
},
"info.vit.swin2sr-classicalsr-x2-64": {
@@ -4694,7 +11357,12 @@
"0": {
"transformers": "Swin2SRModel"
}
- }
+ },
+ "tasks": [
+ "Swin2SRForImageSuperResolution",
+ "Swin2SRModel",
+ "Swin2SRPreTrainedModel"
+ ]
}
},
"info.vit.swinv2-patch4-window8-256": {
@@ -4704,17 +11372,32 @@
"0": {
"transformers": "Swinv2Model"
}
- }
+ },
+ "tasks": [
+ "Swinv2ForImageClassification",
+ "Swinv2ForMaskedImageModeling",
+ "Swinv2Model",
+ "Swinv2PreTrainedModel",
+ "Swinv2Backbone"
+ ]
}
},
- "info.stst.switch-8": {
+ "info.moe.switch-8": {
"*": {
"repo": "google/switch-base-8",
"pkg": {
"0": {
"transformers": "SwitchTransformersModel"
}
- }
+ },
+ "tasks": [
+ "SwitchTransformersEncoderModel",
+ "SwitchTransformersForConditionalGeneration",
+ "SwitchTransformersModel",
+ "SwitchTransformersPreTrainedModel",
+ "SwitchTransformersTop1Router",
+ "SwitchTransformersSparseMLP"
+ ]
}
},
"info.stst.t5": {
@@ -4724,7 +11407,151 @@
"0": {
"transformers": "T5Model"
}
- }
+ },
+ "identifiers": [
+ [
+ 4096
+ ],
+ "encoder.embed_tokens.weight",
+ "text_encoders.t5xxl.transformer.shared.weight",
+ "t5xxl",
+ "encoder.block.0.layer.1.DenseReluDense.wi.weight"
+ ],
+ "file_256": [
+ "ec87bffd1923e8b2774a6d240c922a41f6143081d52cf83b8fe39e9d838c893e",
+ "565cb2487351282e8e4dbeb88e63f4ad28217ce0439f5a8e6525a924807d2d9b",
+ "6e480b09fae049a72d2a8c5fbccb8d3e92febeb233bbe9dfe7256958a9167635",
+ "4f2751ceeb2a96edd693e539dc5d6bba0b8d3814f49a9b3798403a0cec4b2e3d",
+ "83690f3cc37cecb5e907f41ab0f7abb0855ef24a0a8aab9259f2888ce85a34e2",
+ "7d330da4816157540d6bb7838bf63a0f02f573fc48ca4d8de34bb0cbfd514f09",
+ "8490f7a22615c20651a63dbe7b4241929826a4de20292dc8e63bfc3c61e3654f",
+ "d8720addef2596fef86b1b22e4b62875c9118779ba8723759a75dfcbc649ffd5",
+ "7d0eac95abe8daae454bcd3d166b8bfc6a35fe68278f97479d62dbb6850f38c0",
+ "ceabd6f71c7112cfaa4dfca8711dda97b79fb9b25983f1c95532de226045f1f8",
+ "49e139f50824fef40908ef4307c851e7adaa8b91bed44054c4829600dbedfdda",
+ "211ade1d474f5dc83190aec8be5c4baf52643777790d64de0cbd84f63613e5e9",
+ "7894547154ba3fd6e364e66e2951ee82b4c3fc1ae0f95df6a4f9d1c5a4e98f17",
+ "eb529f693f4b17773a24e787fcba29486d5e1700dadcc20bb91e4c8b00212d08",
+ "d80116f6fc39801e4eef425a584e7a7a41cbe5119797bef2dad67299909fe2ae",
+ "31ebe18e901bfb6e5709a20ec1c95fce29bce2b9545073231e0f909a53239f5c",
+ "6be2b0b7e2de7cf2919340c88cb802a103a997ce46c53131cec91958c1db1af4",
+ "b51cbb10b1a7aac6dd1c3b62f0ed908bfd06e0b42d2f3577d43e061361f51dae",
+ "9ec60f6028534b7fe5af439fcb535d75a68592a9ca3fcdeb175ef89e3ee99825",
+ "8f5ab879234384235d56732f0cda07bf8801f30a49645248c5bfdeeb1665f64b",
+ "86427a1f4dba48940e45bf78d6db5bf0d48fce8b4656f5aba27955f06af9628e",
+ "88b696cfae098f03bb078cc5944ef03aec1e91ec020a6b016b723a0f0532558c",
+ "1dc600961d3c5ed081f6700485cdc7ed9cfb4631f2dc385b7ac6bd3c80846d0d",
+ "f28631189911f8d7931e8fe642a4cb2a3c51f50da7cabbfa06b89bafc19c00d0",
+ "de9dfdd19d7ba6859993cadec5100665dc7a4fb71e1c6c8970959cbdaf4366e3",
+ "7a68b2c8c080696a10109612a649bc69330991ecfea65930ccfdfbdb011f2686",
+ "2c0c539ab8e8fba3877cc94bc483e427f74c525f817a809b028ebc8d96d75a94"
+ ],
+ "layer_b3": [
+ "ca94e03b7b1fdcb0d6ff5205eac56f145d2dff8a9c489faf80935bfec8387f18",
+ "c0e2b054bedd782909191b05748a88c28d1538fa91789fec63f036ba01dcc001",
+ "672de9b79d14001de7d1109ffc52e4d0cccc3bfee6f45648fa347703b58e2b99",
+ "abdb187a996c51cb0469630c124b14eeb0bb8f5f635aca6c71dea264f8bd61ae",
+ "8926f862b7763fd9688af317eba7809aa71a478484be0c738c269de368ace4a7",
+ "e616b754cf55e55b3f9f17ab7e1fff95f0607c81782822fc1223ae22fb1e9f36",
+ "b79e5f1878a62cd726bb4f9fc1415cacb071d278440e9026290c7b36cb41e1d4",
+ "77619d5278d9f547ddac17d4d99df56cb6a3a9e660ae31b2f896a4297907e62e",
+ "c87c9d3cc7becc46ee34821299cf8551a6df5541582a45469a031bccdc4bd340",
+ "7e6c32c01c89fc5d1610c410135aa9708e77a7444510e5e479fa677ff2b53643",
+ "a49c2bc301733967ddff113790e301773dc5dd71368b657af4141458de593ced",
+ "c2ea94030ea362e03d73d448fa5353ace0a449dc38c51a4a49fb148444ebb8ef",
+ "4a90463350f08ef41479da1d561ab41b8f8b792f1603a092226a838156aebfb0",
+ "f86cd0324eebbffb81b15ad47dc8b63fedfa51dc222e44e1a958a7becce2bcb0",
+ "48c54c61c5f14e42761c6177539b2da3a22222516dab053952ca8d8e92f93d65",
+ "311332d9738773669128814d944b1e860a8e3176b37abf43370bc06b43b454d0",
+ "3f4e51dec6d542759cdea49b3bec14c090a4908f953fa3e182e2ea43b5b05402",
+ "beb25461e168359108add77263ea5cc121b7584cc4aa304ffc4e134783bb1d88",
+ "43313f90a359c8c1c787a7a833b1ab9f7a38204ba36d0ba587c658d0d9bf0852",
+ "fa9e97cdad26f55fedab83a3f114e0338c9cca3ea2bf8f1b168a6dfc5919bf8e",
+ "93108d67f8829a7e1e8f3773e9ce53c67f365889c2acfd69816ac80fd43f8e08",
+ "fc65a6cc55e89394d7bc0fa4ee952d63ce3bdc143b84b5aa4bb3edf7722a6b83",
+ "8163bc781a7e013dfeb806bbb828a36913cf119363ea5fcd9071d87a0c227cda",
+ "ad2ba63e1134bad1b15ee339313bc130708b2995e8b4b76fb44d727f28c26ad9",
+ "4a844772638ffed2f61d45eaac984094b92540fa1391a4098608fc73a6cd4fd8",
+ "76c31e1fd35da7de7cee97c1e7c5ccde640e6fac3e17a62e115ecf484c7196c3",
+ "a4d672e22b5bdd8f8b0885cec4a173d0466bb1dcbfbf8400cedcc41c2494f16c",
+ "d1860c3f01dc9f260d98b50d3d2bbc8dc2d3eefaa93778a8de9d7adfb897fc6e",
+ "b8719092fc58487406211f52dc55bf40b573ccfd29933a989c33a36b694f6f0a",
+ "795e272409bc4fa55f402485acf86b607256f91aa965295c5bb771c61f8e9e74"
+ ],
+ "layer_256": [
+ "bb20f7805209379aea4d6548f17e551cf27d0f8426ca169e4df8234f718ed5ef",
+ "431580c2d86f9a9ed3500f776a4c997223e5644aed211f965354869ccfa4d76e",
+ "2ccd548c4ffe34168c60779ebd497b9b410981a2fda813c8723a24a805c94ea0",
+ "a608fc4e1cc9762e46187a1ce66e98e8ba4bc3a604cbfd96174bd876baea0fa1",
+ "dc9e74cdf535e0b7a17e1335d0d8b38a00f94facf0cb01363baee09945a25278",
+ "f07409710a69b2247aa4723a9b40d2225d5e5bfba7b60c51f0ea901fc2ef5ad9",
+ "ed28f8b6cc472f352fc840b5a9f841ff17d76ae6918f0676464dca20529aa92b",
+ "97c1a08f87c59b4c55ad4672841977cfce43ca7730bcd11d8c178a9330de1855",
+ "968972839b859a9c4457f190fad2e17e8585ce27d9ef318df4f5b4e902143944",
+ "4dbdeadc957c898c327197a3d8770188535672e9208beb29bbf48dfdf51c8955",
+ "669172c2b5e8b97774d9dd0227ede40c4d25cae3adae97d9f281d03531e7e137",
+ "39fff130b9ee240102c28a78ee1c4a643e9f800b734ff133f3ab2ad1357bd2f6",
+ "6e047ed8cb7007034ff15840dd53c92096f0e7ed5befa07808de8afa35d35874",
+ "adbd0baa059074501b7686db2b0c01715f3a317275c2657c5dfbfd6ee92389b7",
+ "eb63790fb32b5660de34fa42c2e608df58f7aa3680b4984f0ee9008fe613729c",
+ "f125c20a33b0ff2dbd4e8ad9acebc34383cb2ef98668169ef79a8c06655ced35",
+ "e64e0ac83a785ef584a0e86b347fae8f9e2bd84324a49396ca8a9fe7532a947b",
+ "70001b3ac1b66522142bb86e4c3e87e20c2bbd07276c763878e0838ef6184aad",
+ "f46fd1e2b5fef3b9f7ae80d183cc77f7be181117a72a0bb933bdef0bc6cd679e",
+ "83676d73726d101325a47c7f8a60cedf10bab99ea79a6bedad7761220cb4a625",
+ "a621a907586e5e270e7c7873b167364d8a935ff347d8240fa9bab319678da690",
+ "f0af1a089f40d8611db5c59469314f1547e2df23c6eff24860359b37ea9bd966",
+ "72478320b8dbfd9aeaea010dcf0896e3116fa5ab940f3b472882d9f9d2d7333f",
+ "9c1a88e36334a48d8482fec54b14ea1d5fd31f0dbb65d13cc616e63dc7c42be5",
+ "d0689f727e8ac4fef3ec4b1f29e8a3bd12e1116559eeefb2a1a457cd4e676d1e",
+ "fea158a4afcfaa6e95e04799bae0287de0c4fcb188f3b41768a46ce48c71c9df",
+ "2e5bc4e73312b5aec4c1a55631cb4ed69cf34ccaa6d1f28f7045f137a579b439",
+ "015fdecbc3b5369dbcb2302e4b79985437ac4496d1b9ad63316423a222fb0803"
+ ],
+ "tasks": [
+ "T5EncoderModel",
+ "T5ForConditionalGeneration",
+ "T5Model",
+ "T5PreTrainedModel",
+ "T5ForQuestionAnswering",
+ "T5ForSequenceClassification",
+ "T5ForTokenClassification"
+ ]
+ }
+ },
+ "info.stst.t5gemma-prefixlm": {
+ "*": {
+ "repo": "google/t5gemma-2b-2b-prefixlm-it",
+ "pkg": {
+ "0": {
+ "transformers": "T5GemmaModel"
+ }
+ },
+ "tasks": [
+ "T5GemmaForConditionalGeneration",
+ "T5GemmaModel",
+ "T5GemmaEncoderModel",
+ "T5GemmaPreTrainedModel",
+ "T5GemmaForSequenceClassification",
+ "T5GemmaForTokenClassification"
+ ]
+ }
+ },
+ "info.stst.t5gemma-2": {
+ "*": {
+ "repo": "google/t5gemma-2-270m-270m",
+ "pkg": {
+ "0": {
+ "transformers": "T5Gemma2Model"
+ }
+ },
+ "tasks": [
+ "T5Gemma2ForConditionalGeneration",
+ "T5Gemma2Model",
+ "T5Gemma2PreTrainedModel",
+ "T5Gemma2ForSequenceClassification",
+ "T5Gemma2ForTokenClassification"
+ ]
}
},
"info.detr.table-transformer-detection": {
@@ -4734,7 +11561,12 @@
"0": {
"transformers": "TableTransformerModel"
}
- }
+ },
+ "tasks": [
+ "TableTransformerForObjectDetection",
+ "TableTransformerModel",
+ "TableTransformerPreTrainedModel"
+ ]
}
},
"info.art.tapas-finetuned-sqa": {
@@ -4744,7 +11576,14 @@
"0": {
"transformers": "TapasModel"
}
- }
+ },
+ "tasks": [
+ "TapasForMaskedLM",
+ "TapasForQuestionAnswering",
+ "TapasForSequenceClassification",
+ "TapasModel",
+ "TapasPreTrainedModel"
+ ]
}
},
"info.vit.textnet": {
@@ -4754,7 +11593,43 @@
"0": {
"transformers": "TextNetModel"
}
- }
+ },
+ "tasks": [
+ "TextNetBackbone",
+ "TextNetModel",
+ "TextNetPreTrainedModel",
+ "TextNetForImageClassification"
+ ]
+ }
+ },
+ "info.stst.time-series-transformer-tourism-monthly": {
+ "*": {
+ "repo": "huggingface/time-series-transformer-tourism-monthly",
+ "pkg": {
+ "0": {
+ "transformers": "TimeSeriesTransformerModel"
+ }
+ },
+ "tasks": [
+ "TimeSeriesTransformerForPrediction",
+ "TimeSeriesTransformerModel",
+ "TimeSeriesTransformerPreTrainedModel"
+ ]
+ }
+ },
+ "info.art.timesfm-2-pytorch": {
+ "*": {
+ "repo": "google/timesfm-2.0-500m-pytorch",
+ "pkg": {
+ "0": {
+ "transformers": "TimesFmModel"
+ }
+ },
+ "tasks": [
+ "TimesFmModelForPrediction",
+ "TimesFmPreTrainedModel",
+ "TimesFmModel"
+ ]
}
},
"info.vit.timesformer-finetuned-k600": {
@@ -4764,7 +11639,27 @@
"0": {
"transformers": "TimesformerModel"
}
- }
+ },
+ "tasks": [
+ "TimesformerModel",
+ "TimesformerForVideoClassification",
+ "TimesformerPreTrainedModel"
+ ]
+ }
+ },
+ "info.detr.resnet18-a1-in": {
+ "*": {
+ "repo": "timm/resnet18.a1_in1k",
+ "pkg": {
+ "0": {
+ "transformers": "TimmWrapperModel"
+ }
+ },
+ "tasks": [
+ "TimmWrapperPreTrainedModel",
+ "TimmWrapperModel",
+ "TimmWrapperForImageClassification"
+ ]
}
},
"info.detr.tvp": {
@@ -4774,7 +11669,12 @@
"0": {
"transformers": "TvpModel"
}
- }
+ },
+ "tasks": [
+ "TvpModel",
+ "TvpPreTrainedModel",
+ "TvpForVideoGrounding"
+ ]
}
},
"info.vit.udop": {
@@ -4784,7 +11684,13 @@
"0": {
"transformers": "UdopModel"
}
- }
+ },
+ "tasks": [
+ "UdopForConditionalGeneration",
+ "UdopPreTrainedModel",
+ "UdopModel",
+ "UdopEncoderModel"
+ ]
}
},
"info.stst.umt5": {
@@ -4794,7 +11700,77 @@
"0": {
"transformers": "UMT5Model"
}
- }
+ },
+ "identifiers": [
+ "encoder.block.1.layer.0.SelfAttention.relative_attention_bias.weight"
+ ],
+ "file_256": [
+ "a8e861969c7433e707cc5a74065d795d36cca07ec96eb6763eb4083df7248f58",
+ "decf9b70814ed5e9965bfca9fbd0483462e2bf743790663025b7742f8c014c72",
+ "0a07449cf1141c0ec86e653c00465f6f0d79c6e58a2c60c8bcf4203d0e4ec4f6",
+ "c0ef3a140898e228a3520c9adec60743d2e8e5b3d229651bb37f1a3921919f99",
+ "7b8850f1961e1cf8a77cca4c964a358d303f490833c6c087d0cff4b2f99db2af",
+ "c3355d30191f1f066b26d93fba017ae9809dce6c627dda5f6a66eaa651204f68",
+ "fa1d36fd54f171ae60fea915c23bd77986b330bbed9729f0d2f8ecbe9168bc48",
+ "4a3176f32fd70c0a335b4419fcbf8c86cc875e23498c0fc06f5b4aa0930889e0",
+ "adbc782b9145a27e15d63dfa25057efca0ac75e2db7d372c901ddaa130ca2def",
+ "b7e2ca4c493c9d51fa951005e8ceba2f4b6b6877cfb4c36a8955c6cd68a1dba7",
+ "2521d4de0bf9e1cc6549866463ceae85e4ec3239bc6063f7488810be39033bbc",
+ "9209b4c77b34ad8cf3f06b04c6eaa27e7beeebb348a31f85e3b38a1d719b09ed",
+ "8bc12d80bc0413573fa58a93626117440b4528f640dd9cb310732e05fa9e6c3e",
+ "f64f8d6dc4d8a24276df69d0ccea789aae686f7417950a41e6568c30cb478a5c",
+ "17cf97a5bbbc60a646d6105b832b6f657ce904a8a1ad970e4b59df0c67584a40",
+ "eaea358bb438c5d211721a4feecc162000e3636e9cb96f51e216f1f44ebd12ce"
+ ],
+ "layer_b3": [
+ "cd92b29c9099a640e3f5d4a76e64b3467f87f6c056119e0defdff94d311ad6de",
+ "1c943dbcb8b328a7c6c852921ddaefbd84c9df8c83bc51fe303c1f06cb734102",
+ "1639a6467af0db1e15828d33b878e568cba1335947eeadd481170bcdc9ba8e33",
+ "72a0329740dee29a2c099eec3c320b3945590a74293356014c30249fe69652e5",
+ "0374cba03c607ffe8ab8f04994d82f82e80901dc7578f1a9a6cb2637608be5d5",
+ "d75a407f873e1cfa1a0a36214b53b14bfebe9253ea263465151c07f0d57f3f29",
+ "621153502b985c143d304318c91dc3d10296d24268c81e3538fc336fdc84c915",
+ "43bb052945d38a68bec27c3d26162e88e306e6074d027d3b4b2b8ae2b1851691",
+ "98f50ea5d55e61c1478df47e567e48bdd036d240b9129e64d53a826406900adc",
+ "9400313b8eae31699473daa5f840d25a4ef660f68de9a7894f1a28f214f23384",
+ "9f13826b8e4ddde24d80de6a947a7868e26cea25dda52790ee6ed695ff72b9bb",
+ "475773ab108a537ff904b84e7f3a80129ba4983deb7170b6b52c922ece6069ce",
+ "5ef27b3c1eddb08cfe41b452cf9529d86dff811645d40c165bae324486d19e96",
+ "e170559d8551cfe651344594e54c0a9a90c0068b00f3866f6e9a3737e20925cb",
+ "e8dc7442a20bcdc7b6e5dd0265939d88896eab5ddd33ee16f1f09537e65914b8",
+ "4d3d5049857d01741780daf01e96617092973305637b435f4895499a26bbaede",
+ "7a2adadc2372feda23b2169337276adda6d1fdef82ba69f0d3321c4c6ba8c604",
+ "0a7c61a85bb3f51f75924de48ef3f5e87cbf8901f600cbfcae97f5e2919c4148"
+ ],
+ "layer_256": [
+ "467916d35f3053dce1d40d998fcaf6aa03feda75aa578d964dd61461e23641a3",
+ "58deeef888d4ded4ffababfbf8da27227a4a6ff8adfa42016e12c0180f713816",
+ "178ebd3fa3418d33a2e45a80d8b9d3662ff4a8e75f3de3f0332f82c505d8152a",
+ "8700dcb651465fe6c925b7ad6068b58b32951832fff0ed19819510f8d0713ee5",
+ "954f2129ba166e746c71433f717b572d8869ec14b32b7f214d1701d3b1120047",
+ "32f5fc1daea014b6488b96c2a1330e0aad87e074844fa3e2e3f20b9e58440395",
+ "9245abaf6df8a4b5fcc828ecbcd7b21a1b19bf5f3c4388fb5c8eabc140276dce",
+ "172d0fbbd379ae014a7008e148813818494e9e645db802fd000d443369df9d17",
+ "2fa68a26b0386aaf9123d2b4067dafc8631ee724602197dd353f3ea5a61dac8a",
+ "16f0054014e6d07b86b0526d5bcfed7d2aa3aebe3e44e6758933d90cbd3da46e",
+ "fd62047f5d27ff43210c117dc0f253c101e694a5331d6b684688606c92c65ccf",
+ "ddc4f38db9f132fb1b736c1d693b5c039a2d6fe83bdf4f1c1e7a2745b5d79124",
+ "9e9ab11b3ea059b84ae2bcc5be76ab3f730a486d92a16f1fd2a959bdc2ede08f",
+ "bfb178b1ce27f00e122d2328c662fdef6cc239c07efc749aa61ae2d395441b02",
+ "50addf6a911b90194a75b0212429d1af55eb2f9d24715479b9ccc4a40adc299b",
+ "2e46e9f1b714d72160d3b3b775a845b3049a01396fab935f1278d9e8de2ef0c6",
+ "db8d2b49d9042e39d6531b33ec3bebb9cdf42b9e6ad56163f08da2a7da2a53cd",
+ "2d81d19ad5440422b85e0b17c71914269f6c25c9b1fa321c0dd6119ddb41d62d"
+ ],
+ "tasks": [
+ "UMT5EncoderModel",
+ "UMT5ForConditionalGeneration",
+ "UMT5ForQuestionAnswering",
+ "UMT5ForSequenceClassification",
+ "UMT5ForTokenClassification",
+ "UMT5Model",
+ "UMT5PreTrainedModel"
+ ]
}
},
"info.aet.unispeech-1500h-cv": {
@@ -4804,7 +11780,14 @@
"0": {
"transformers": "UniSpeechModel"
}
- }
+ },
+ "tasks": [
+ "UniSpeechForCTC",
+ "UniSpeechForPreTraining",
+ "UniSpeechForSequenceClassification",
+ "UniSpeechModel",
+ "UniSpeechPreTrainedModel"
+ ]
}
},
"info.aet.unispeech-sat-100h-libri-ft": {
@@ -4814,7 +11797,16 @@
"0": {
"transformers": "UniSpeechSatModel"
}
- }
+ },
+ "tasks": [
+ "UniSpeechSatForAudioFrameClassification",
+ "UniSpeechSatForCTC",
+ "UniSpeechSatForPreTraining",
+ "UniSpeechSatForSequenceClassification",
+ "UniSpeechSatForXVector",
+ "UniSpeechSatModel",
+ "UniSpeechSatPreTrainedModel"
+ ]
}
},
"info.gan.univnet-dev": {
@@ -4824,7 +11816,25 @@
"0": {
"transformers": "UnivNetModel"
}
- }
+ },
+ "tasks": [
+ "UnivNetModel"
+ ]
+ }
+ },
+ "info.stst.vaultgemma": {
+ "*": {
+ "repo": "google/vaultgemma-7b",
+ "pkg": {
+ "0": {
+ "transformers": "VaultGemmaModel"
+ }
+ },
+ "tasks": [
+ "VaultGemmaForCausalLM",
+ "VaultGemmaModel",
+ "VaultGemmaPreTrainedModel"
+ ]
}
},
"info.vit.videollama3-image-hf": {
@@ -4834,7 +11844,13 @@
"0": {
"transformers": "VideoLlama3Model"
}
- }
+ },
+ "tasks": [
+ "VideoLlama3VisionModel",
+ "VideoLlama3PreTrainedModel",
+ "VideoLlama3Model",
+ "VideoLlama3ForConditionalGeneration"
+ ]
}
},
"info.vit.video-llava-hf": {
@@ -4844,7 +11860,12 @@
"0": {
"transformers": "VideoLlavaModel"
}
- }
+ },
+ "tasks": [
+ "VideoLlavaPreTrainedModel",
+ "VideoLlavaModel",
+ "VideoLlavaForConditionalGeneration"
+ ]
}
},
"info.vit.videomae": {
@@ -4854,7 +11875,13 @@
"0": {
"transformers": "VideoMAEModel"
}
- }
+ },
+ "tasks": [
+ "VideoMAEForPreTraining",
+ "VideoMAEModel",
+ "VideoMAEPreTrainedModel",
+ "VideoMAEForVideoClassification"
+ ]
}
},
"info.vit.vilt-b32-mlm": {
@@ -4864,7 +11891,17 @@
"0": {
"transformers": "ViltModel"
}
- }
+ },
+ "tasks": [
+ "ViltForImageAndTextRetrieval",
+ "ViltForImagesAndTextClassification",
+ "ViltForTokenClassification",
+ "ViltForMaskedLM",
+ "ViltForQuestionAnswering",
+ "ViltLayer",
+ "ViltModel",
+ "ViltPreTrainedModel"
+ ]
}
},
"info.vit.vip-llava-hf": {
@@ -4874,7 +11911,12 @@
"0": {
"transformers": "VipLlavaModel"
}
- }
+ },
+ "tasks": [
+ "VipLlavaModel",
+ "VipLlavaForConditionalGeneration",
+ "VipLlavaPreTrainedModel"
+ ]
}
},
"info.vit.japanese-clip-vit-h-14-bert-wider": {
@@ -4884,7 +11926,10 @@
"0": {
"transformers": "VisionTextDualEncoderModel"
}
- }
+ },
+ "tasks": [
+ "VisionTextDualEncoderModel"
+ ]
}
},
"info.art.visualbert-vqa-coco-pre": {
@@ -4894,7 +11939,17 @@
"0": {
"transformers": "VisualBertModel"
}
- }
+ },
+ "tasks": [
+ "VisualBertForMultipleChoice",
+ "VisualBertForPreTraining",
+ "VisualBertForQuestionAnswering",
+ "VisualBertForRegionToPhraseAlignment",
+ "VisualBertForVisualReasoning",
+ "VisualBertLayer",
+ "VisualBertModel",
+ "VisualBertPreTrainedModel"
+ ]
}
},
"info.vit.vit-patch16-224": {
@@ -4904,7 +11959,13 @@
"0": {
"transformers": "ViTModel"
}
- }
+ },
+ "tasks": [
+ "ViTForImageClassification",
+ "ViTForMaskedImageModeling",
+ "ViTModel",
+ "ViTPreTrainedModel"
+ ]
}
},
"info.vit.vit-mae": {
@@ -4914,7 +11975,13 @@
"0": {
"transformers": "ViTMAEModel"
}
- }
+ },
+ "tasks": [
+ "ViTMAEForPreTraining",
+ "ViTMAELayer",
+ "ViTMAEModel",
+ "ViTMAEPreTrainedModel"
+ ]
}
},
"info.vit.vit-msn": {
@@ -4924,7 +11991,12 @@
"0": {
"transformers": "ViTMSNModel"
}
- }
+ },
+ "tasks": [
+ "ViTMSNModel",
+ "ViTMSNForImageClassification",
+ "ViTMSNPreTrainedModel"
+ ]
}
},
"info.vit.vitdet-patch16-224": {
@@ -4934,7 +12006,12 @@
"0": {
"transformers": "VitDetModel"
}
- }
+ },
+ "tasks": [
+ "VitDetModel",
+ "VitDetPreTrainedModel",
+ "VitDetBackbone"
+ ]
}
},
"info.art.mms-tts-eng": {
@@ -4944,7 +12021,11 @@
"0": {
"transformers": "VitsModel"
}
- }
+ },
+ "tasks": [
+ "VitsModel",
+ "VitsPreTrainedModel"
+ ]
}
},
"info.vit.vivit16x2-kinetics400": {
@@ -4954,7 +12035,12 @@
"0": {
"transformers": "VivitModel"
}
- }
+ },
+ "tasks": [
+ "VivitModel",
+ "VivitPreTrainedModel",
+ "VivitForVideoClassification"
+ ]
}
},
"info.vit.vjepa2-vitl-fpc64-256": {
@@ -4964,7 +12050,12 @@
"0": {
"transformers": "VJEPA2Model"
}
- }
+ },
+ "tasks": [
+ "VJEPA2Model",
+ "VJEPA2PreTrainedModel",
+ "VJEPA2ForVideoClassification"
+ ]
}
},
"info.stst.voxtral-2507": {
@@ -4974,7 +12065,12 @@
"0": {
"transformers": "VoxtralForConditionalGeneration"
}
- }
+ },
+ "tasks": [
+ "VoxtralPreTrainedModel",
+ "VoxtralEncoder",
+ "VoxtralForConditionalGeneration"
+ ]
}
},
"info.aet.voxtral-2507": {
@@ -4984,7 +12080,12 @@
"0": {
"transformers": "VoxtralEncoder"
}
- }
+ },
+ "tasks": [
+ "VoxtralPreTrainedModel",
+ "VoxtralEncoder",
+ "VoxtralForConditionalGeneration"
+ ]
}
},
"info.aet.wav2vec2-960h": {
@@ -4994,7 +12095,17 @@
"0": {
"transformers": "Wav2Vec2Model"
}
- }
+ },
+ "tasks": [
+ "Wav2Vec2ForAudioFrameClassification",
+ "Wav2Vec2ForCTC",
+ "Wav2Vec2ForMaskedLM",
+ "Wav2Vec2ForPreTraining",
+ "Wav2Vec2ForSequenceClassification",
+ "Wav2Vec2ForXVector",
+ "Wav2Vec2Model",
+ "Wav2Vec2PreTrainedModel"
+ ]
}
},
"info.aet.wav2vec2-bert-rel-pos": {
@@ -5004,7 +12115,15 @@
"0": {
"transformers": "Wav2Vec2BertModel"
}
- }
+ },
+ "tasks": [
+ "Wav2Vec2BertForAudioFrameClassification",
+ "Wav2Vec2BertForCTC",
+ "Wav2Vec2BertForSequenceClassification",
+ "Wav2Vec2BertForXVector",
+ "Wav2Vec2BertModel",
+ "Wav2Vec2BertPreTrainedModel"
+ ]
}
},
"info.aet.wav2vec2-conformer-rel-pos": {
@@ -5014,7 +12133,16 @@
"0": {
"transformers": "Wav2Vec2ConformerModel"
}
- }
+ },
+ "tasks": [
+ "Wav2Vec2ConformerForAudioFrameClassification",
+ "Wav2Vec2ConformerForCTC",
+ "Wav2Vec2ConformerForPreTraining",
+ "Wav2Vec2ConformerForSequenceClassification",
+ "Wav2Vec2ConformerForXVector",
+ "Wav2Vec2ConformerModel",
+ "Wav2Vec2ConformerPreTrainedModel"
+ ]
}
},
"info.aet.wavlm": {
@@ -5024,7 +12152,15 @@
"0": {
"transformers": "WavLMModel"
}
- }
+ },
+ "tasks": [
+ "WavLMForAudioFrameClassification",
+ "WavLMForCTC",
+ "WavLMForSequenceClassification",
+ "WavLMForXVector",
+ "WavLMModel",
+ "WavLMPreTrainedModel"
+ ]
}
},
"info.aet.whisper": {
@@ -5034,7 +12170,14 @@
"0": {
"transformers": "WhisperModel"
}
- }
+ },
+ "tasks": [
+ "WhisperForCausalLM",
+ "WhisperForConditionalGeneration",
+ "WhisperModel",
+ "WhisperPreTrainedModel",
+ "WhisperForAudioClassification"
+ ]
}
},
"info.vit.xclip-patch32": {
@@ -5044,7 +12187,27 @@
"0": {
"transformers": "XCLIPModel"
}
- }
+ },
+ "tasks": [
+ "XCLIPModel",
+ "XCLIPPreTrainedModel",
+ "XCLIPTextModel",
+ "XCLIPVisionModel"
+ ]
+ }
+ },
+ "info.gan.x-codec": {
+ "*": {
+ "repo": "Manel/X-Codec",
+ "pkg": {
+ "0": {
+ "transformers": "XcodecModel"
+ }
+ },
+ "tasks": [
+ "XcodecModel",
+ "XcodecPreTrainedModel"
+ ]
}
},
"info.art.xglm": {
@@ -5054,7 +12217,12 @@
"0": {
"transformers": "XGLMModel"
}
- }
+ },
+ "tasks": [
+ "XGLMForCausalLM",
+ "XGLMModel",
+ "XGLMPreTrainedModel"
+ ]
}
},
"info.art.xlm-mlm-en-2048": {
@@ -5064,7 +12232,17 @@
"0": {
"transformers": "XLMModel"
}
- }
+ },
+ "tasks": [
+ "XLMForMultipleChoice",
+ "XLMForQuestionAnswering",
+ "XLMForQuestionAnsweringSimple",
+ "XLMForSequenceClassification",
+ "XLMForTokenClassification",
+ "XLMModel",
+ "XLMPreTrainedModel",
+ "XLMWithLMHeadModel"
+ ]
}
},
"info.art.xlm-roberta": {
@@ -5074,7 +12252,17 @@
"0": {
"transformers": "XLMRobertaModel"
}
- }
+ },
+ "tasks": [
+ "XLMRobertaForCausalLM",
+ "XLMRobertaForMaskedLM",
+ "XLMRobertaForMultipleChoice",
+ "XLMRobertaForQuestionAnswering",
+ "XLMRobertaForSequenceClassification",
+ "XLMRobertaForTokenClassification",
+ "XLMRobertaModel",
+ "XLMRobertaPreTrainedModel"
+ ]
}
},
"info.art.xlm-roberta-xl": {
@@ -5084,7 +12272,17 @@
"0": {
"transformers": "XLMRobertaXLModel"
}
- }
+ },
+ "tasks": [
+ "XLMRobertaXLForCausalLM",
+ "XLMRobertaXLForMaskedLM",
+ "XLMRobertaXLForMultipleChoice",
+ "XLMRobertaXLForQuestionAnswering",
+ "XLMRobertaXLForSequenceClassification",
+ "XLMRobertaXLForTokenClassification",
+ "XLMRobertaXLModel",
+ "XLMRobertaXLPreTrainedModel"
+ ]
}
},
"info.art.xlnet-cased": {
@@ -5094,7 +12292,17 @@
"0": {
"transformers": "XLNetModel"
}
- }
+ },
+ "tasks": [
+ "XLNetForMultipleChoice",
+ "XLNetForQuestionAnswering",
+ "XLNetForQuestionAnsweringSimple",
+ "XLNetForSequenceClassification",
+ "XLNetForTokenClassification",
+ "XLNetLMHeadModel",
+ "XLNetModel",
+ "XLNetPreTrainedModel"
+ ]
}
},
"info.lstm.xlstm": {
@@ -5104,7 +12312,12 @@
"0": {
"transformers": "xLSTMModel"
}
- }
+ },
+ "tasks": [
+ "xLSTMForCausalLM",
+ "xLSTMModel",
+ "xLSTMPreTrainedModel"
+ ]
}
},
"info.art.xmod": {
@@ -5114,7 +12327,17 @@
"0": {
"transformers": "XmodModel"
}
- }
+ },
+ "tasks": [
+ "XmodForCausalLM",
+ "XmodForMaskedLM",
+ "XmodForMultipleChoice",
+ "XmodForQuestionAnswering",
+ "XmodForSequenceClassification",
+ "XmodForTokenClassification",
+ "XmodModel",
+ "XmodPreTrainedModel"
+ ]
}
},
"info.cnn.yolos": {
@@ -5124,7 +12347,12 @@
"0": {
"transformers": "YolosModel"
}
- }
+ },
+ "tasks": [
+ "YolosForObjectDetection",
+ "YolosModel",
+ "YolosPreTrainedModel"
+ ]
}
},
"info.art.yoso-4096": {
@@ -5134,7 +12362,17 @@
"0": {
"transformers": "YosoModel"
}
- }
+ },
+ "tasks": [
+ "YosoForMaskedLM",
+ "YosoForMultipleChoice",
+ "YosoForQuestionAnswering",
+ "YosoForSequenceClassification",
+ "YosoForTokenClassification",
+ "YosoLayer",
+ "YosoModel",
+ "YosoPreTrainedModel"
+ ]
}
},
"info.ssm.zamba-v1": {
@@ -5144,7 +12382,29 @@
"0": {
"transformers": "ZambaModel"
}
- }
+ },
+ "tasks": [
+ "ZambaForCausalLM",
+ "ZambaForSequenceClassification",
+ "ZambaModel",
+ "ZambaPreTrainedModel"
+ ]
+ }
+ },
+ "info.ssm.zamba2": {
+ "*": {
+ "repo": "Zyphra/Zamba2-2.7B",
+ "pkg": {
+ "0": {
+ "transformers": "Zamba2Model"
+ }
+ },
+ "tasks": [
+ "Zamba2ForCausalLM",
+ "Zamba2ForSequenceClassification",
+ "Zamba2Model",
+ "Zamba2PreTrainedModel"
+ ]
}
},
"ops.precision.uint": {
@@ -6791,6 +14051,15 @@
],
"layer_b3": [
"6c9c5642aa8dce62bcb3eb577bc519619a2d868005c767c5e65371c583a8a8eb"
+ ],
+ "tasks": [
+ "Wav2Vec2ConformerForAudioFrameClassification",
+ "Wav2Vec2ConformerForCTC",
+ "Wav2Vec2ConformerForPreTraining",
+ "Wav2Vec2ConformerForSequenceClassification",
+ "Wav2Vec2ConformerForXVector",
+ "Wav2Vec2ConformerModel",
+ "Wav2Vec2ConformerPreTrainedModel"
]
}
},
@@ -6949,6 +14218,62 @@
"layer_b3": [
"cb3d3edafd81651eefd62894b3572deb02c5304f4b5d4f7ab8654f1fb922ecd6"
]
+ },
+ "*": {
+ "pkg": {
+ "0": {
+ "precision": "ops.precision.bfloat.B16",
+ "generation": {
+ "height": 1024,
+ "width": 1024,
+ "guidance_scale": 3.5,
+ "num_inference_steps": 50,
+ "max_sequence_length": 512
+ }
+ },
+ "1": {
+ "mflux": "flux.flux.Flux1",
+ "generation": {
+ "height": 1024,
+ "width": 1024,
+ "gudance": 3.5,
+ "num_inference_steps": 25
+ }
+ }
+ },
+ "file_256": [
+ "f6315581b7cddd450b9aba72b4e9ccf8b6580dc1a6b9538aff43ee26a1a3b6c2",
+ "1b2170ac37156d4cf91909eb6834bb8adac84bc1fce8098a29cfb03738df84ad",
+ "4610115bb0c89560703c892c59ac2742fa821e60ef5871b33493ba544683abd7",
+ "d86a3038eacaa720682cb9b1da3c49fecf8a3ded605af4def6061eaa18903eb8",
+ "b7d840eef01c27dfd72ae9143c261355a51bab3b2662263a6cb0059d55347c3d"
+ ],
+ "layer_b3": [
+ "261559c8eaccae558f72621804a9ee188d338e45e2c622a58db709ac190198ba",
+ "87f5d565c66e40eb02eb96498243ad81afcbf86192db99a4fc8fff215470320e",
+ "e61d10a394902dadca9367467b2245070f651f4553ec4a96192fbba64e820acb"
+ ],
+ "layer_256": [
+ "3db58cf834d2f81abb1e035131956da4c90451074c681d0db10810e55e60c2c4",
+ "ddf1a34a06b355ce2bcd0f9beb0713450d9bcdc61a03a6bc37716361735e96f1",
+ "ad8763121f98e28bc4a3d5a8b494c1e8f385f14abe92fc0ca5e4ab3191f3a881"
+ ],
+ "identifiers": [
+ "double_blocks.12.txt_mod.lin.weight",
+ "add_q_proj.weight",
+ "single_transformer_blocks.9.norm.linear.weight"
+ ],
+ "tasks": [
+ "Image",
+ "Redux",
+ "Kontext",
+ "Depth",
+ "Fill",
+ "ConceptAttention",
+ "ControlNet",
+ "CavTon",
+ "IC-Edit"
+ ]
}
},
"info.dit.wan2-flf2v-720p": {
@@ -7103,6 +14428,15 @@
"3f62bfb6bbde05f01435129326166c44aeb113ac0d9f735f31ed3f7dd04f6980",
"22f866f3c96a92bc61e9965cf366d706db942ad047ba8cb82109edcd4e68fa40",
"f3fa9d7a8f15741621c1fe82f8a1bcc5c601c900d947ac09fba7016615a252a5"
+ ],
+ "tasks": [
+ "CLIPModel",
+ "CLIPPreTrainedModel",
+ "CLIPTextModel",
+ "CLIPTextModelWithProjection",
+ "CLIPVisionModel",
+ "CLIPVisionModelWithProjection",
+ "CLIPForImageClassification"
]
}
},
@@ -7143,6 +14477,15 @@
"f606463295ecf3bae8920d3d45bb9d180793418b3d08c3e84d4c4135c7dc2aa5",
"7060993a5eb32d94d1ea8aef7a7301e7be73b199c639c63f8f7cfbfcd2abf10e",
"b92af95334c657371af6051a91374a41b5455907fa6622bb66a8c112dc511600"
+ ],
+ "tasks": [
+ "CLIPModel",
+ "CLIPPreTrainedModel",
+ "CLIPTextModel",
+ "CLIPTextModelWithProjection",
+ "CLIPVisionModel",
+ "CLIPVisionModelWithProjection",
+ "CLIPForImageClassification"
]
}
},
@@ -7168,6 +14511,15 @@
"227f26ed63120b9034f4a0c90b6b37eede721a8260f2c1e8f7ea3ccc0d109e7e",
"3a38ffd1b60499cf2f451f3065079ff26efb9190a86f23ad1c8d993bbeb9af05",
"ce06cf1fd684269ee96631b2bf9334c6ecde6a84a55760dfa0d9d2a6411f28e4"
+ ],
+ "tasks": [
+ "CLIPModel",
+ "CLIPPreTrainedModel",
+ "CLIPTextModel",
+ "CLIPTextModelWithProjection",
+ "CLIPVisionModel",
+ "CLIPVisionModelWithProjection",
+ "CLIPForImageClassification"
]
}
},
diff --git a/mir/spec/missing_params.json b/mir/spec/missing_params.json
index de3dc44..c3aebdc 100644
--- a/mir/spec/missing_params.json
+++ b/mir/spec/missing_params.json
@@ -53,8 +53,19 @@
"timm_backbone": {
"repo_path": "microsoft/resnet-50"
},
+ "gpt_oss": {
+ "repo_path": "openai/gpt-oss-120b"
+ },
+ "bert": {
+ "repo_path": "google-bert/bert-base-uncased"
+ },
"timm_wrapper": {
- "repo_path": "timm/resnet18.a1_in1k"
+ "repo_path": "timm/resnet18.a1_in1k",
+ "params": {
+ "_resnet_": [
+ ""
+ ]
+ }
},
"vision-text-dual-encoder": {
"repo_path": "hakuhodo-tech/japanese-clip-vit-h-14-bert-wider"
diff --git a/mir/spec/template.json b/mir/spec/template.json
index 8479db5..66e5fc3 100644
--- a/mir/spec/template.json
+++ b/mir/spec/template.json
@@ -18,6 +18,9 @@
"resnet": ""
},
"transformer": {
+ "mlp": [
+ "prediction_channel_indices"
+ ],
"lstm": [
"sequence_kernel"
],
@@ -69,7 +72,9 @@
"fusion_hidden_size"
],
"moe": [
- "num_experts_per_tok"
+ "num_experts_per_tok",
+ "num_experts",
+ "moe_intermediate_size"
],
"aet": [
"classifier_proj_size",
@@ -97,6 +102,8 @@
"router_ignore_padding_tokens",
"d_ff",
"d_kv",
+ "vocoder_config",
+ "prompt_length",
"audio_config",
"convolution_bias",
"rope_parameters",
diff --git a/mir/tag.py b/mir/tag.py
index fc95b7a..3c1fec4 100644
--- a/mir/tag.py
+++ b/mir/tag.py
@@ -94,12 +94,12 @@ def tag_base_model(repo_path: str, class_name: str, addendum: dict | None = None
:param class_name: The HF transformers class for the model
:return: A segmented MIR tag useful for appending index entries"""
- from mir.config.constants import extract_init_params
+ from mir.config.constants import extract_init_parameters
- annotations = extract_init_params(class_name.replace("Model", "Config"), "transformers")
+ annotations = extract_init_parameters(class_name.replace("Model", "Config"), "transformers")
if not annotations:
class_name = class_name.replace("Config", "Model")
- annotations = extract_init_params(class_name, "transformers")
+ annotations = extract_init_parameters(class_name, "transformers")
if not annotations:
raise TypeError("No mode type returned")
mir_prefix = mir_prefix_from_forward_pass(True, **annotations)
From 6b5c95e2581caac8569633aff80a29ad33d5f8c4 Mon Sep 17 00:00:00 2001
From: exdysa <91800957+exdysa@users.noreply.github.com>
Date: Mon, 12 Jan 2026 21:58:49 -0500
Subject: [PATCH 05/16] ~fast and danger2
---
data/__init__.py | 19 +
data/diffusers_adds.json | 890 +
data/exclusions.json | 32 +
data/migrations.json | 58 +
data/mir.json | 3 +
mir/spec/template.json => data/nn_filter.json | 0
data/parameters.json | 30 +
data/prefixes.json | 34 +
mir/spec/modes.json => data/tag_scrape.json | 25 -
data/transformers_adds.json | 332 +
mir/__init__.py | 42 +-
mir/__main__.py | 68 -
mir/config/console.py | 10 -
mir/config/constants.py | 174 -
mir/config/conversion.py | 115 -
mir/generate/.notes.txt | 66 +
mir/{config => generate}/__init__.py | 0
mir/generate/__main__.py | 276 +
mir/generate/_extras.py | 191 +
mir/{ => generate}/automata.py | 1372 +-
mir/generate/diffusers/__init__.py | 31 +
mir/generate/diffusers/attention.py | 26 +
.../diffusers/doc_parse.py} | 31 +-
mir/generate/diffusers/guiders.py | 61 +
mir/generate/diffusers/index.py | 233 +
mir/generate/diffusers/schedulers.py | 74 +
mir/generate/from_module.py | 125 +
mir/generate/indexers.py | 19 +
mir/{inspect => generate/mlx}/__init__.py | 0
mir/generate/mlx/index.py | 103 +
mir/{inspect => generate}/tasks.py | 194 +-
mir/generate/torch/__init__.py | 0
mir/generate/torch/dtypes.py | 60 +
mir/generate/transformers/__init__.py | 33 +
mir/generate/transformers/index.py | 216 +
mir/generate/write_to_mir.py | 31 +
mir/indexers.py | 319 -
mir/inspect/classes.py | 91 -
mir/inspect/metadata.py | 98 -
mir/inspect/parenting.py | 32 -
mir/inspect/pipes.py | 46 -
mir/{config => }/json_io.py | 6 +-
mir/mir.json | 14941 ----------------
mir/spec/__init__.py | 30 +-
mir/spec/{versions.json => regex.json} | 5 +-
mir/tag.py | 119 +-
pyproject.toml | 2 +-
tests/test_deconstructors_root.py | 6 +-
tests/test_seek_class.py | 3 +-
tests/test_taskanalyzer.py | 10 +-
50 files changed, 3028 insertions(+), 17654 deletions(-)
create mode 100644 data/__init__.py
create mode 100644 data/diffusers_adds.json
create mode 100644 data/exclusions.json
create mode 100644 data/migrations.json
create mode 100644 data/mir.json
rename mir/spec/template.json => data/nn_filter.json (100%)
create mode 100644 data/parameters.json
create mode 100644 data/prefixes.json
rename mir/spec/modes.json => data/tag_scrape.json (99%)
create mode 100644 data/transformers_adds.json
delete mode 100644 mir/__main__.py
delete mode 100644 mir/config/console.py
delete mode 100644 mir/config/constants.py
delete mode 100644 mir/config/conversion.py
create mode 100644 mir/generate/.notes.txt
rename mir/{config => generate}/__init__.py (100%)
create mode 100644 mir/generate/__main__.py
create mode 100644 mir/generate/_extras.py
rename mir/{ => generate}/automata.py (54%)
create mode 100644 mir/generate/diffusers/__init__.py
create mode 100644 mir/generate/diffusers/attention.py
rename mir/{doc_parser.py => generate/diffusers/doc_parse.py} (83%)
create mode 100644 mir/generate/diffusers/guiders.py
create mode 100644 mir/generate/diffusers/index.py
create mode 100644 mir/generate/diffusers/schedulers.py
create mode 100644 mir/generate/from_module.py
create mode 100644 mir/generate/indexers.py
rename mir/{inspect => generate/mlx}/__init__.py (100%)
create mode 100644 mir/generate/mlx/index.py
rename mir/{inspect => generate}/tasks.py (55%)
create mode 100644 mir/generate/torch/__init__.py
create mode 100644 mir/generate/torch/dtypes.py
create mode 100644 mir/generate/transformers/__init__.py
create mode 100644 mir/generate/transformers/index.py
create mode 100644 mir/generate/write_to_mir.py
delete mode 100644 mir/indexers.py
delete mode 100644 mir/inspect/classes.py
delete mode 100644 mir/inspect/metadata.py
delete mode 100644 mir/inspect/parenting.py
delete mode 100644 mir/inspect/pipes.py
rename mir/{config => }/json_io.py (87%)
delete mode 100644 mir/mir.json
rename mir/spec/{versions.json => regex.json} (68%)
diff --git a/data/__init__.py b/data/__init__.py
new file mode 100644
index 0000000..c766341
--- /dev/null
+++ b/data/__init__.py
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
+
+import os
+
+from mir import ROOT_PATH
+from mir.json_io import read_json_file
+
+MIR_PATH_NAMED = os.path.join(ROOT_PATH, "mir.json")
+
+
+DIFFUSERS_ADDS = read_json_file(os.path.join(ROOT_PATH, "data", "diffusers_adds.json"))
+EXCLUSIONS = read_json_file(os.path.join(ROOT_PATH, "data", "exclusions.json"))
+MIGRATIONS = read_json_file(os.path.join(ROOT_PATH, "data", "migrations.json"))
+NN_FILTER = read_json_file(os.path.join(ROOT_PATH, "data", "nn_filter.json"))
+PARAMETERS = read_json_file(os.path.join(ROOT_PATH, "data", "parameters.json"))
+PREFIXES = read_json_file(os.path.join(ROOT_PATH, "data", "prefixes.json"))
+TAG_SCRAPE = read_json_file(os.path.join(ROOT_PATH, "data", "tag_scrape.json"))
+TRANSFORMERS_ADDS = read_json_file(os.path.join(ROOT_PATH, "data", "transformers_adds.json"))
diff --git a/data/diffusers_adds.json b/data/diffusers_adds.json
new file mode 100644
index 0000000..6f39afd
--- /dev/null
+++ b/data/diffusers_adds.json
@@ -0,0 +1,890 @@
+{
+ "stabilityai/stable-diffusion-xl-base-1.0": {
+ "StableDiffusionXLPipeline": {
+ "pkg": {
+ "0": {
+ "precision": "ops.precision.float.F16",
+ "generation": {
+ "denoising_end": 0.8,
+ "num_inference_steps": 40,
+ "output_type": "latent",
+ "safety_checker": false,
+ "width": 1024,
+ "height": 1024
+ }
+ },
+ "1": {
+ "diffusers": "DiffusionPipeline"
+ }
+ },
+ "file_256": [
+ "357650fbfb3c7b4d94c1f5fd7664da819ad1ff5a839430484b4ec422d03f710a",
+ "83e012a805b84c7ca28e5646747c90a243c65c8ba4f070e2d7ddc9d74661e139",
+ "31e35c80fc4829d14f90153f4c74cd59c90b779f6afe05a74cd6120b893f7e5b",
+ "6f001c090fb13c0d0f8b0a5916da814712a94400b99471fabe77c1c4a51ecaaf"
+ ],
+ "layer_256": [
+ "62a5ab1b5fdfa4fedb32323841298c6effe1af25be94a8583350b0a7641503ef",
+ "34dff8d98898baa0f10e71943e56b588cc114253b0d2f1051f3ce7a8a45fee0b",
+ "56b1ccd89b0d6ab658048aa34d659788b6ed663f13ef566f4b11bccef590b9da"
+ ],
+ "layer_b3": [
+ "8be44fa13c1efa60f8bcadaa57f1d718473f9660f03c4f0e65dc037960d8cba1",
+ "c9ab95ed1851418b65ef99651c1eb6bbdd2e3b0715e0e435d6d1e56ce310fac3",
+ "adfa260098d87616d748e3cf9c10bb2c90ff8890a84abbb2853d4aa69664070b"
+ ],
+ "identifiers": [
+ "logit_scale",
+ "conditioner.embedders.0.transformer.text_model.encoder.layers.0.self_attn.k_proj.weight",
+ "add_embedding.linear_2.bias"
+ ]
+ }
+ },
+ "stabilityai/stable-diffusion-xl-refiner-1.0": {
+ "StableDiffusionXLImg2ImgPipeline": {
+ "pkg": {
+ "1": {
+ "diffusers": "DiffusionPipeline",
+ "generation": {
+ "num_inference_steps": 40,
+ "denoising_end": 0.8
+ }
+ }
+ },
+ "identifiers": [
+ "conditioner.embedders.0.model.transformer.resblocks.9.mlp.c_proj.bias"
+ ],
+ "file_256": [
+ "54f9cd2f2daf3aeec0b2708fa3dbc0e84e4f8ddd1ddead42e5bc60c6572c989f",
+ "7440042bbdc8a24813002c09b6b69b64dc90fded4472613437b7f55f9b7d9c5f",
+ "3ea0376dcf065eaefd27806394a90e310001b1a71d4f1cf1f655e86c0e566ffe"
+ ],
+ "layer_b3": [
+ "6281355dbb37e5769c9460ae0ac75506d89932e2f97b09d9ade32ecf191e75ba",
+ "afb0639aae2eb65577c12d4a30cf7c9b3620ae63ba64a8fa632b58608c8a7a2e",
+ "669046014b69d98ab0f6fbb59547644436e0275f8b638f467ce2a873c3313683"
+ ],
+ "layer_256": [
+ "bb9eadbfabb52c0d8645783525a3fa70b59e9d7d09d5290d742a303262e793a2",
+ "c5adb56fe51343af2c3d493eb9f41515c204bd91eb9f40b983d45f70a1fa3b6d",
+ "1f838e39ed6e916258aee6990b72c09b34aa8eb3b5342234a497b8852b3df1c6"
+ ]
+ }
+ },
+ "lodestones/Chroma": {
+ "ChromaPipeline": {
+ "pkg": {
+ "1": {
+ "generation": {
+ "neg_text": "",
+ "num_steps": "28",
+ "latent_size": [
+ 64,
+ 64
+ ]
+ }
+ }
+ },
+ "file_256": [
+ "53adcb3b6b6005758d40e2d8058b044ed4892bc8616efb7a62cc2dd384be07de",
+ "2c41e8a9831f3be1eaff2c2ed590abb62e4534e814f7ec58a5fd74ff71dc2036",
+ "0a7b2d9699dbd22b3744ee2692900cabcfb731a43dac13729c33807f2bb7c9f6",
+ "6ddc9e2bbe3376ab5ee9f10b2d947f127b6bf6f879f06f316a2208bb0da357b8"
+ ],
+ "layer_b3": [
+ "15e227ced8a89c41abaa9cc44f84dfffdf5ead0c626035e5a2dde2bbb0935479"
+ ],
+ "layer_256": [
+ "a4daa6ff6f45ca70c738adb8c19bc3b6f228df931e6bf2a3394463e4dd7ec882"
+ ]
+ }
+ },
+ "fal/AuraFlow": {
+ "AuraFlowPipeline": {
+ "identifiers": [
+ [
+ 8192,
+ 3072
+ ],
+ "mlpX.c_fc2.weight",
+ "joint_transformer_blocks.2.ff_context.linear_2.weight"
+ ],
+ "file_256": [
+ "ce3e475246258b94ee9dcb8b83292cb34edfffc2bbde46c74604d9c6cd7c585c",
+ "526be97cf581c89ad87c6b19c1f7c2378851137698f7ec436596d061a382d37b",
+ "6a40b011f287452dbca80face78e667055904c5ad97eb2097ade3200259b2203",
+ "05e5493018333d947bb5940083dbc2f071093027ff414bc5b1b1229e4836e5cb"
+ ],
+ "layer_b3": [
+ "cc6d383576c35a9709798d2e2b9e3eb31ba8c608040cf3712bc37871cfd14e21",
+ "ddd54c44fa28fbddecf7cfae91cfa04917fd2f2fa94fc78c528cef2356a4ec3a",
+ "90c694e7d1e20e6da49b571e9954338d384775419790be315304103227b1051b",
+ "9e85aec1bdb616f52f88c80ddc7ab1eae8c16c0b5fbfcdb61a71ac02c325003d"
+ ],
+ "layer_256": [
+ "3c13e6a965d03a49227d8b1606ba6a343a23772d8768407cc78d4ddb9102bc80",
+ "b356cc84a23bc93bda4cc0fce1d0ba1b8e3d5a521e659ffc72e9e4a2d2c7f204",
+ "270df7317fe01abf06333acbbd4f15f8fc7a7c56053219f42efb598454a3af24",
+ "7ab6aa4514dd09f3cf589587d51a81734193ce45dd51bda9db0bd62fe48ef7d5"
+ ]
+ }
+ },
+ "Tencent-Hunyuan/HunyuanDiT-v1.2-Diffusers": {
+ "HunyuanDiTPipeline": {
+ "identifiers": [
+ "extra_embedder",
+ "model.blocks",
+ "skip_norm.weight"
+ ],
+ "file_256": [
+ "4fb84f84079cda457d171b3c6b15d1be95b5a3e5d9825703951a99ddf92d1787",
+ "e01db5e129e8ca1117e9cf473fc5a2b096949f03ab90048aeabbc328de7ec800",
+ "8af691cadb78047d55721259355d708e87ddbba1b7845df9377d9a5ae917b45d"
+ ],
+ "layer_b3": [
+ "aead6b61b17ebc77c4c186a4b82c193f11ec267b20d909726422ee9852e2e0b2",
+ "885a056b94f6f9844c0660be489844d63bb74cc13316f441d10968fff3dd3120",
+ "390d951cbdda6e2cffb690031b60f02921624651534c2effaaa7d68ab476c700"
+ ],
+ "layer_256": [
+ "d4842ce2b7f927203326b25ff4d6738ec9a8b95327f06791c387e4a351ed6ed0",
+ "5af943f96f5dc9fecb1e92fe2b1fa17c94dd6947690201f4a5ee1a4a2721a68e",
+ "4a1f2b8234fa4336e263842e042d42e8d64d8a4d3941d9c0c78366b50303950c"
+ ]
+ }
+ },
+ "Alpha-VLLM/Lumina-Next-SFT-diffusers": {
+ "LuminaPipeline": {
+ "pkg": {
+ "0": {
+ "precision": " ops.precision.bfloat.B16"
+ }
+ },
+ "identifiers": [
+ "time_caption",
+ "feed_forward"
+ ],
+ "file_256": [
+ "371153b7c7b7a64899d4016970c7cc472039f9c9b21ebe073adf0b8525cdf1bd"
+ ],
+ "layer_b3": [
+ "fa134efd6e9672e7de2965e4895fc58879bd0a6c4fdf9165c278f2748254675f",
+ "4d960ec35c53f72f065b94b836bcd923ea6074d38ad49881061f315d62e3c839"
+ ],
+ "layer_256": [
+ "3938a85568d9df186923edf04391d79e89e6199123bc175afb520e0948d1ae05",
+ "c0ca51fdea051fcd042bf4b56d32e1e8bb9525a921f2e197f370f101e90527f0"
+ ]
+ }
+ },
+ "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS": {
+ "PixArtSigmaPipeline": {
+ "identifiers": [
+ "adaln_single",
+ "scale_shift_table"
+ ],
+ "file_256": [
+ "c34b520ef473329b945c2a21083cdf1337c5a468d23b3215b65576789bfd0305",
+ "2fa4dee9229c02b03163f57bdb8e80c7a5ee364b7161796abe9c05e8dd13f239"
+ ],
+ "layer_b3": [
+ "a199930ff537994872da77391955f0dd52eddd22ab9105388f0c5852f1b8021f",
+ "ee6f980c32e98da6885f3e97d3f88d9158031e362cd3a49b20d1e23924b251e3"
+ ],
+ "layer_256": [
+ "e0afd203aff5a1d192e325d0f59361373273d85d138b51768c3f10a75c154dc0",
+ "987f3c2ff5d399191e5fd7dd7b1f1f285c197dc8124ad77f05cde7f2fb677a3c"
+ ]
+ }
+ },
+ "PixArt-alpha/PixArt-XL-2-1024-MS": {
+ "PixArtAlphaPipeline": {
+ "identifiers": [
+ "aspect_ratio",
+ "y_embedding",
+ "emb.resolution",
+ "caption_projection"
+ ],
+ "file_256": [
+ "809a92d52a4a228f381a4b4f4b76051294b73285fb0cbb02f0ad24f9372217a8"
+ ],
+ "layer_b3": [
+ "c5be83545ce9dbc564bcc9fd8fe4157d131347ccfc8f62adc877ec205b20acee"
+ ],
+ "layer_256": [
+ "117225c0e91423746114b23d3e409708ad55c90ff52b21fa7a1c5105d2e935a5"
+ ]
+ }
+ },
+ "stabilityai/stable-diffusion-3.5-medium": {
+ "StableDiffusion3Pipeline": {
+ "pkg": {
+ "0": {
+ "precision": "ops.precision.float.F16"
+ }
+ },
+ "identifiers": [
+ "model.diffusion_model.joint_blocks.",
+ "transformer_blocks.21.norm1_context.linear.weight",
+ "transformer_blocks.31.norm1_context.linear.weight",
+ "blocks.11.ff.net.2.weight"
+ ],
+ "file_256": [
+ "ffef7a279d9134626e6ce0d494fba84fc1c7e720b3c7df2d19a09dc3796d8f93",
+ "11fe06e22364b823dfeedc275912336b932b32a293a0b2f35ffac071990cc4de"
+ ],
+ "layer_b3": [
+ "e411016545785046810b29cc3999f40bc6392be134a1318386c6f1c48f98726a",
+ "a81e07ee67bc627e8b3c5e292ec1ca239009517a2106e8249d670ced0a88f746"
+ ],
+ "layer_256": [
+ "13c982a6dc82d21c9f459e837d8c6f6d4696fd6e7e7b5783bdd2250b1f4fec61",
+ "6ee79050373337bf63ac20916596df778bb22022bb38af986128a7459eda1463"
+ ]
+ }
+ },
+ "Efficient-Large-Model/Sana-1600M-1024px-BF16-diffusers": {
+ "SanaPipeline": {
+ "pkg": {
+ "0": {
+ "generation": {
+ "height": 1024,
+ "width": 1024,
+ "guidance_scale": 4.5,
+ "num_inference_steps": 20
+ },
+ "precision": "ops.precision.bfloat.B16"
+ }
+ },
+ "file_256": [
+ "b0b50c33be8758713459aa3c760feef6315d4bea31521fb5b8c3e8fdd9841ffe"
+ ],
+ "layer_b3": [
+ "461e3d83dfa7e075ef21e2138ef153922ecfadde3db464b03dff92819f3e86dd"
+ ],
+ "layer_256": [
+ "b928bbcc2ce99d55d21c189e2b1c57498bc313ef5b1457036e356107d567fc4e"
+ ]
+ }
+ },
+ "stable-diffusion-v1-5/stable-diffusion-v1-5": {
+ "StableDiffusionPipeline": {
+ "identifiers": [
+ "up_blocks.3.attentions.0.transformer_blocks.0.norm3.weight"
+ ],
+ "file_256": [
+ "6ce0161689b3853acaa03779ec93eafe75a02f4ced659bee03f50797806fa2fa",
+ "1a189f0be69d6106a48548e7626207dddd7042a418dbf372cefd05e0cdba61b6",
+ "e1441589a6f3c5a53f5f54d0975a18a7feb7cdf0b0dee276dfc3331ae376a053",
+ "cc6cb27103417325ff94f52b7a5d2dde45a7515b25c255d8e396c90014281516",
+ "19da7aaa4b880e59d56843f1fcb4dd9b599c28a1d9d9af7c1143057c8ffae9f1",
+ "cd1b6db09a81cb1d39fbd245a89c1e3db9da9fe8eba5e8f9098ea6c4994221d3",
+ "c83908253f9a64d08c25fc90874c9c8aef9a329ce1ca5fb909d73b0c83d1ea21"
+ ],
+ "layer_b3": [
+ "909c6ff3192ab2767e789a6125865bc23163db467ab78b1c633bad46a4293fad",
+ "b52807536902cabbf84f99e4fa2f8713fb4ef77e739f06367ee0d486e3222faa",
+ "d31382d71a1044b636d80d861a2b4dbca51826bed34d34b5c14608b7679ccefd",
+ "5fd8b28013b7e5a64c7c235f0a93d93e48bc19a0e5dde7b646a87b429219643a",
+ "731f552f29edcb4f86112cc94d296377f3533a9633ccf83e202d9e1785d94a00",
+ "2d2f97574a161cf01a6f6d476b141c7be06f940d94b695ffc12c4e74eca2de1c"
+ ],
+ "layer_256": [
+ "ece771354ad470a82d56eda413ae3dd6c00d2de28ab3c56a88201d08d4424b4b",
+ "65b084dada803461ab9ca9be9b892d211870a121dd6c555a111eea470b951c54",
+ "dc937b59892604f5a86ac96936cd7ff09e25f18ae6b758e8014a24c7fa039e91",
+ "92565dec90f7c8412dc872e820f66cd0c56263bbbc392439645b6fee270f41bb"
+ ]
+ }
+ },
+ "stabilityai/stable-cascade-prior": {
+ "StableCascadePriorPipeline": {
+ "pkg": {
+ "0": {
+ "precision": "ops.precision.bfloat.B16",
+ "generation": {
+ "negative_prompt": "",
+ "num_images_per_prompt": 1,
+ "num_inference_steps": 20,
+ "guidance_scale": 4.0,
+ "width": 1024,
+ "height": 1024
+ }
+ }
+ },
+ "file_256": [
+ "673b3173b037fb5f65b14fde37267390641a36726683de75dcf9df76fce2b866",
+ "45c1eb5ce9b69efac891ad459b15c215cd90a986adbbfaf3effd3a89578cbcaf",
+ "088ddf1e444abf399007b2da2bac87791df165c69f477994f6b3c745a20904b0",
+ "39cec96c7212607f9e526db719bf1df507166d09f4748676c13b0d31cd4adb07",
+ "31ffe2f1a3e2351d658fc7d3002a4eca22466a680f7fb3715b1e3768476f9633",
+ "dfe24009fc881011f350d08d9d13be13a1a3b3cbfed667435efe0fd419aca099"
+ ],
+ "layer_b3": [
+ "c55c83fa435ed128457f605bf1312e54727996d1c94413fc5ab5b49e9933857c",
+ "6fb07ed9fc6ee636e50783802754b3a37bbecfc67037813b616223aeaf6fe877",
+ "2ea194240e105c8962923e2baca88cb6a0c826794afc2ef82474301694711d68",
+ "3412c8a184805621e4595d57268ced0b5c3c1974cd221bf67b2c908eec4fd61c",
+ "53abfb013cfb0e41d0bc7b96bb83e42a4d4c67cb7325f9acf645b02d90efd8fe",
+ "34556558f680c183adc2accd493cb9888a98ba853226bbecb07d95eb2055ff4f"
+ ],
+ "layer_256": [
+ "4f5e0a738b963d3d4f8413387a0966ac1ce51f0f985bcbcc124fa221a2fff467",
+ "8aa77e732a398b7d0dcd9a35d5682c2b5ab090ae90e915c7c91878abff0284d8",
+ "4bbd46ded0916de3108f0da7145a80f5c7acea26ed35b0aaa29af12008352453",
+ "415d1f3ecd06416708c1b83ab21e50b39c9d88d19dc33e60b977b7b7061880b9",
+ "f678c32815c238e14091f690c8a83c3375c8f7738dc7abff79ff086ed9b59204",
+ "17c8da803df7b9bbc8b1d7cc0c44916fea5b5ac0891330c4fdf0326fcd4496cb"
+ ],
+ "identifiers": [
+ "down_blocks.0.2.kv_mapper",
+ "previewer",
+ "backbone"
+ ]
+ }
+ },
+ "black-forest-labs/FLUX.1-dev": {
+ "FluxPipeline": {
+ "pkg": {
+ "0": {
+ "precision": "ops.precision.bfloat.B16",
+ "generation": {
+ "height": 1024,
+ "width": 1024,
+ "guidance_scale": 3.5,
+ "num_inference_steps": 50,
+ "max_sequence_length": 512
+ }
+ },
+ "1": {
+ "mflux": "flux.flux.Flux1",
+ "generation": {
+ "height": 1024,
+ "width": 1024,
+ "gudance": 3.5,
+ "num_inference_steps": 25
+ }
+ }
+ },
+ "file_256": [
+ "f6315581b7cddd450b9aba72b4e9ccf8b6580dc1a6b9538aff43ee26a1a3b6c2",
+ "1b2170ac37156d4cf91909eb6834bb8adac84bc1fce8098a29cfb03738df84ad",
+ "4610115bb0c89560703c892c59ac2742fa821e60ef5871b33493ba544683abd7",
+ "d86a3038eacaa720682cb9b1da3c49fecf8a3ded605af4def6061eaa18903eb8",
+ "b7d840eef01c27dfd72ae9143c261355a51bab3b2662263a6cb0059d55347c3d"
+ ],
+ "layer_b3": [
+ "261559c8eaccae558f72621804a9ee188d338e45e2c622a58db709ac190198ba",
+ "87f5d565c66e40eb02eb96498243ad81afcbf86192db99a4fc8fff215470320e",
+ "e61d10a394902dadca9367467b2245070f651f4553ec4a96192fbba64e820acb"
+ ],
+ "layer_256": [
+ "3db58cf834d2f81abb1e035131956da4c90451074c681d0db10810e55e60c2c4",
+ "ddf1a34a06b355ce2bcd0f9beb0713450d9bcdc61a03a6bc37716361735e96f1",
+ "ad8763121f98e28bc4a3d5a8b494c1e8f385f14abe92fc0ca5e4ab3191f3a881"
+ ],
+ "identifiers": [
+ "double_blocks.12.txt_mod.lin.weight",
+ "add_q_proj.weight",
+ "single_transformer_blocks.9.norm.linear.weight"
+ ]
+ }
+ },
+ "black-forest-labs/FLUX.1-schnell": {
+ "FluxPipeline": {
+ "pkg": {
+ "0": {
+ "precision": "ops.precision.bfloat.B16",
+ "generation": {
+ "height": 1024,
+ "width": 1024,
+ "guidance_scale": 0.0,
+ "num_inference_steps": 4,
+ "max_sequence_length": 256
+ }
+ },
+ "1": {
+ "mflux": "flux.flux.Flux1",
+ "generation": {
+ "height": 1024,
+ "width": 1024,
+ "num_inference_steps": 4
+ }
+ }
+ },
+ "identifiers": [
+ "double_blocks.12.txt_mod.lin.weight",
+ "add_q_proj.weight",
+ "single_transformer_blocks.9.norm.linear.weight"
+ ],
+ "file_256": [
+ "9403429e0052277ac2a87ad800adece5481eecefd9ed334e1f348723621d2a0a",
+ "9b633dbe87316385c5b1c262bd4b5a01e3d955170661d63dcec8a01e89c0d820"
+ ],
+ "layer_b3": [
+ "c65ba812ce3ce056eb1585673f62fb896afe6ec049faaf00a97bc35c9a398c44",
+ "03049273329fc7db2da10de6d3eb27cb03f190e379c0556cc97b3f0f29001d0c",
+ "483c4be8ef031c56bc8450d1a3cfbe54445ed317bcd801be5abe89f1d3c48790"
+ ],
+ "layer_256": [
+ "79c07e339865fe9e22c80f723d728c778130acd07a330339c68218b92bb7b3b8",
+ "ef5c9cd1ebe6e3be5e8b1347eca0a6f0b138986c71220a7f1c2c14f29d01beed",
+ "27bc71eca2d2ff7459165acc12010230911db7709a4f6a5c255befedfa6b1649"
+ ]
+ }
+ },
+ "stabilityai/stable-cascade": {
+ "StableCascadeDecoderPipeline": {
+ "pkg": {
+ "0": {
+ "generation": {
+ "negative_prompt": "",
+ "guidance_scale": 0.0,
+ "output_type": "pil",
+ "num_inference_steps": 10
+ },
+ "precision": "ops.precision.bfloat.B16"
+ }
+ },
+ "file_256": [
+ "fe92687deefcfb33bb3ec181254b55fe4e434c5084ce9d38815eaa32487ad376",
+ "2c8d58b267678aecfa6705a0a0375c88613065a8a8d32ad3a4c3867f5461cb3a",
+ "6c218dc948575e3b14b03dffe2014d7870ac505005770ce3abdc28e920a03c05",
+ "a6c3d534a9be308e95d2c3224af94a854bebd9b503f620f1ae3c8e6ba4a341bf",
+ "7b431ea7d0f10e72b3eaece353bf6bf2f6bc717b6f4207411be186b40dec1f43"
+ ],
+ "layer_b3": [
+ "9506d989de0226018de214f7ced4670eb5aad4a0c399a9229488ceccdf9a3ceb",
+ "6c09dcb83e0cd7ad735eb763c5e3721c579d796853f0b9d31ba74fb13cad4f94",
+ "e07025965cee925e31f1d617ea8baa575e7db910d40cc0482fd83df317c0812b",
+ "d9a42e4226fb2778aaeaf0d6bda173a4ff95aa574c6d9e27e41542aa469e40a3",
+ "8dcd87dc7a9b877e8e2a00abac44c4da9eadf2b8df4ae68f27415bb791381a96"
+ ],
+ "layer_256": [
+ "630ec0f3adf97145316c034139836f9df952060d0237ac4e478c55d9a3a50bc8",
+ "80904f707c192ddd06be2cebeb2ebbec3eb0e9c99076d50824d391ef3ac67bf2",
+ "8ccedbe1e8cc4093f05b5f8d90e6103e688ae1ac71e0d6261fb17c42ff7c25e4",
+ "3524e7fa9ca6f7ef695bc2d3410934eabd5272946a05c8cacd7f329e0bd9f1dd",
+ "40499a8f45ae28558ed2fe4fc549a4cb469bd237434b331ccc0b1910310ed733"
+ ],
+ "identifiers": [
+ "0.2.channelwise",
+ "clip_mapper.bias",
+ ".12.self_attn.k_proj.weight"
+ ]
+ }
+ },
+ "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers": {
+ "WanImageToVideoPipeline": {
+ "file_256": [
+ "b4602c35fa0519750a42c03e3f296c02d542291e344c4d702522cddbd1711f13",
+ "6d7a34b63b70eb608324e546d979167a5e787ac6bca3528e63f54a11572d66aa",
+ "b2051cd29d6b2f0c924fa7a3e78a4772f0134d7b059f21590dcce416f4f6cbe8",
+ "7664fe075b3c82dcecf89012ad3429eee41ee9f10d476f60bc2d2ae3c4ca986c",
+ "8ef7ea5bf9eea636b9b3ebd84c40671b4a18ae2704cb4c8595cb5b25c1d8e8b9",
+ "b2de21b99b2e72cb0ff15253b07e926f26e7cf1b7e229efc32f94ad1f1ed9395",
+ "0ca75338e7a47ca7cacddb7e626647e65829c497387f718ecb6ea0bae456944a",
+ "c058a4ac5363c35d1ab4dd3bdec788c23b267fa42a0d7c68aba599f2f74600c9",
+ "27988f6b510eb8d5fdd7485671b54897f8683f2bba7a772c5671be21d3491253"
+ ],
+ "layer_b3": [
+ "4b6c3354c9ee5694e00a78f5658fdf14129f159c3b78a57f82fb18e0f265a83d",
+ "c36c783559a40d22504f6c4bfb4f5aae760f3f46bbb3a595be79880935122175",
+ "ac62f7d5583fd2e85b738fafaf233e2cde6e2857e04351135bb9ded45f9082ce",
+ "215e89e855b5e9456af9aa68bc67567dc2269002aaa6b01d849ffec425fc628d",
+ "324b8b6c2d512547a2c31bafa12e20acf313fd3aad587b293334f9f629edeec6"
+ ],
+ "layer_256": [
+ "137881dad8c00063bc8bf05f93067736e419173cd171acc22f77b730db688a19",
+ "8c5952fd3d333d3a4b719bf7d8ce6b12d1d2e78caaa7e42d713788cfdcadd244",
+ "86c58bc4864c97f394ea6bccb2ecedc4aab7166f5b9bfeb313edfdcb2918164a",
+ "cac45f7d8f1a0628cb0738bd308689e439b1cc6206e5f887d60d5b37d30138f2",
+ "60e4f71a0961b1346b6f6b5ebe4c8cc93219239c5e13b4c0f1e19e9b8e1324d5"
+ ]
+ }
+ },
+ "Qwen/Qwen-Image": {
+ "QwenImagePipeline": {
+ "file_256": [
+ "9f33a59093af3abcc2836d4cf4b7bd122c238ca70a26c70f34fdde64646b3bcd"
+ ],
+ "layer_b3": [
+ "c87eedda853c12844a8deb3592a90bbcbd4dff2f7a850c28755e4aa171432150"
+ ],
+ "layer_256": [
+ "fda2472d8ef6587a4c979021a2390eeb7c8fc2bcf565330ab8dc6b22f5348ec9"
+ ]
+ }
+ },
+ "Wan-AI/Wan2.1-VACE-1.3B-diffusers": {
+ "WanVACEPipeline": {
+ "file_256": [
+ "bd8bbb8834a274525ab65cbb063f21aa58973a054bfd1638bfe395504c9d9b99",
+ "192804a4e10b5bb0a13f5c224bc4ec9707b3b8cc0def8eea005dbce7c9d6752a",
+ "f202a5c59b8a91ada1862c46a038214f1f7f216c61ec8350d25f69b919da4307",
+ "654693bf2a93a27cd67c3bcee238bc1d0cbb0dd9a74928ed7155fb21a2a1900a",
+ "640ccc0577e6a5d4bb15cd91b11b699ef914fc55f126c5a1c544e152130784f2"
+ ],
+ "layer_b3": [
+ "5357d78799a61cd2d72a8a2824c919d63f718eb3fba624af63689e9c657db032",
+ "7ae67b7ccf79d1c3f4531ae138e1eb63d52dd97a66b3fcbe1d68fded8df4d5b1",
+ "ee63ecdfb3da6901853a59ec950f3e7c3f6595ac46347a03881a4a9c71425377",
+ "82762df3539021d3c0342e0da04137ddbe95ef37ea933cd0a68c09c2c650f2ac"
+ ],
+ "layer_256": [
+ "2684413479030170fb3f08c1069c02957ffc386a59168d23b55d579d5c675269",
+ "d527680fa735e5f30ef8852aabf8a49f02a094bc4718f0787c5b85710a13c026",
+ "9677492a107b3ed827c7285db3393f5321d451cc6d922a4d0488d2a67e939446",
+ "aaef66a4f65ecf852888d160b2122753fe4c6d642b5d41db29e4ce9e6855b5a0"
+ ]
+ }
+ },
+ "Wan-AI/Wan2.1-T2V-14B-Diffusers": {
+ "WanPipeline": {
+ "pkg": {
+ "0": {
+ "precision": "ops.precision.bfloat.B16",
+ "generation": {
+ "height": 480,
+ "width": 832,
+ "num_frames": 81,
+ "guidance_scale": 5.0
+ }
+ }
+ },
+ "file_256": [
+ "299e6304544f2783896372fa919e755a8bb9ab8caf898ce08a678dae391e1179",
+ "a9278e6e9c82d174e6c67b3c97d8b97fef30af51dcf59160f2fc241f6819f5dc",
+ "be531024cd9018cb5b48c40cfbb6a6191645b1c792eb8bf4f8c1c6e10f924dc5",
+ "6f999b0d6cb9a72b3d98ac386ed96f57f8cecae13994a69232514ea4974ad5fd",
+ "2e39adde59c5e0e90edbb35873126b0d67928b5c11c501e384e976d6dc597cce",
+ "2ee88ab18d7ed7691c5b7f8bdc3d0a9815e6efe75499287564830fd209d3cdfb",
+ "46c27d3693bf2475990a912e08bf67fc6e6cd5396eab87b5e8dd1fcd3651364a",
+ "193535c6450045f718df5f011de6d94d49bd9b13f37ca0412500f050dbbb01a8"
+ ],
+ "layer_b3": [
+ "32266d1c79b518adb9d21837e6a427f6ae55b68cfdd673a7dadb38820fddeb48",
+ "3b6989856f4f05368524c1852d8660b73c84cfbe44460af017d7139c2a4641b8",
+ "f4d6cee3c112db93b3c9137ad102ec0e79ec7ab68b9bbc59004fbc268ccd5ddb",
+ "e627144f41055619eb5407699c46e69ac0d87cf8873721e3e48c9e842656abf8",
+ "6c00f3fadedacb841c4b9b4321b94a11ef85a08c9dd9253e5f9ba95856715579",
+ "a0c339253c714b05877c8fbab649ed631cf021930978f3696a46f685a07c9092",
+ "6435da89a870fd0e88680d31de75b9a40c408a4768eff384ce9b9e99481e8e66"
+ ],
+ "layer_256": [
+ "52493c23c5fc1d087a283bc4eabb151421b7ae09affa12a5bb059d62656c5766",
+ "058dedb3d2683a9a5b671c6302690e22722c93f6ed92281d5fa74ab190e632a1",
+ "5fbed4b95e7196d3626003ea9e0fbbffd074b4297ca406e01b5b6c5d881a6080",
+ "3a2335c8e7a4359c071b50333b5c00eef6f42a1d5206915e2ee99464a8c5eae7",
+ "0542780670dd75d4cd9deda123d2e150730646c0a1a8d34582460991498a77a6",
+ "e925b8222774905c8fbf10af77811fde7870e563eedcde2c94bd5c727e952d49",
+ "3d915854976284347efa7aa0a117c0fc3b415c4208e1a6c94beb4ccb9720743d"
+ ]
+ }
+ },
+ "Wan-AI/Wan2.1-T2V-1.3B-Diffusers": {
+ "WanVideoToVideoPipeline": {
+ "pkg": {
+ "0": {
+ "diffusers": "WanPipeline",
+ "precision": "ops.precision.bfloat.B16",
+ "generation": {
+ "height": 480,
+ "width": 832,
+ "num_frames": 81,
+ "guidance_scale": 5.0
+ }
+ }
+ }
+ }
+ },
+ "nvidia/cosmos-predict2-text2image": {
+ "Cosmos2TextToImagePipeline": {
+ "file_256": [
+ "7fbd20dae97cc26a55c7aff3024bc84e554cff8f69966c725a24c8238c5431ec",
+ "6d211f1c14cd793156da3a840dd5462ae072046fcd6f1dc64c613a5343bfe896",
+ "95a2b32ad31a271eb64d35985c7ea46f1448528af70932eb1f35d57f90c27be2",
+ "344e67faf333b7849fa94290c9028bdd5e40eb19700754c833cda0423bc10ad0",
+ "ce15ef565cbb9ef414a6f7a396c455d82d5f762d2174493da87fe009c5fee75b",
+ "94aa9f2b59330b88e97b6b439e2f206a51c86e6b154fb66d43ed149bfac23cf8",
+ "636de5388da249130d51752991a1792b90af31cbf43f021ae07f75756ee2d79a",
+ "472c5e4cf5056a1a59085addb5a86d801de39bf5e000d253f206a7f63c710029",
+ "663266ace67c22529c3b6bfa0e8bd69f0ba6e683f5f02b8e3da50881057ba142",
+ "21a674b314c1364d0dbb3712f5ed702996a7b7403c452835cac22709e01c2f77",
+ "3bf2df806c6472e039efc9e8d3181163d7faa7b385e61519b7d17d5e9c993a49",
+ "1de35e1603c4c30bc80b132ccea15fc0503369caf68290708f17e679e98cd41f",
+ "0738e559bbd71f7351ccba34b2b47362a3f829b92f3dbcffeaf1e44b0d52f42c"
+ ],
+ "layer_b3": [
+ "5a18ba14c41c6601dcc1195ca180ac7744357eb15ace39272788bda1a7151e9b",
+ "67cc3eaf7987c89cd7ccff13de6bc03e3eec59d260d44486e2367cd946ce6f20",
+ "3c6fefa107742488d2e6856714198a762f2fd35c67edd50d4657eaf4b59c7ca3",
+ "4e1f90ee1e8959d334c9b1ea2cc5e58d0b8340e271c35f81c8a5ec26e16d9d76",
+ "f8171071e828524fcc2806126ad100a2198e450c82c0864c8fe8b358c5cbbfbd",
+ "8126101a0207ecfbd741394fd59f306bcb4c492b2a921e0921c426ca7bd38985",
+ "c942c5a85ff7cb602d8ca894f5d180c2224e91f0b62c3a21f6a425f9e0e8554b",
+ "c8c500de74da879a547875fe1046f62ab18bdfd09c09eb3da723cbc2319cb4e3",
+ "c0ac3f67501004e9e9a55d1658402ad97e42bf8a266edf81f6f3bb835ee476b9",
+ "84f5926eb4e11d826815682b076ed7d3bba4c86520859be80aa1ef92c72b26a4",
+ "1d4375aab5548708559b0fde150754a2163cd211eb20a5471e17afaeeb26e082",
+ "68bd8982f59c60d69c301d16dfb5a60f5d43d66c0b60138d48a22f5ded598e7b",
+ "c3e9a10cad7aebf979072092008be6e2815d03d28cbf316c15e8daf22116bd7d"
+ ],
+ "layer_256": [
+ "38f2a75eab667c0cc85f3946a23ca6dc2278438c25a9f93aaaa9f79c3808e180",
+ "ee8434a5e9bc6fa07199de2d0c69fb87f7922c31792bafd13f527c9d92fecb0c",
+ "2f8382657babb4d0ae4f8e425ae33b21ad71deb6ba457fd6734f05208d52e06a",
+ "34b181a8291b571857cdbf67ac0081fea594a2f223bf20bd2fc8b0c889e9602d",
+ "d198c412b972e381acfb812304fa98ed0d97a2f072ddc195cd9a1eb83b1d8146",
+ "79580a13aff9859e67b0a9f4f8893236cdcfa58c3d43770641aaac8daee55a94",
+ "cfd48c7ad71c913fa8768167ed0c2ee8c207311b22b1e5a8761369b5a780e8d6",
+ "da91362ad85d4d2e80a2cb7a55e4ae0e52c9eef8b437a95894ce5ab75d36568c",
+ "15f84001f5205b6dd8c6f1334cb51c46f6171c7795fb2a557ea16b874f0c71e5",
+ "5d29179ad15a15d2561defcdda66f1d1e4d065c1e0738f9cba4db5b68b93d2ea",
+ "7ec489d1e461f5fb2af627b68034ca57f19c516aeccbc5d188b3bd27e3353a15",
+ "c8dc42fe7b411d746ebdf86286b91cd6893c5f028076b8fe4103f7ea8e1d8833",
+ "86df7c095aee01588e961438f322b85ca0100a9e440b8a2b6c724e00f748d8b5"
+ ]
+ }
+ },
+ "rhymes-ai/Allegro": {
+ "AllegroPipeline": {
+ "pkg": {
+ "0": {
+ "precision": "ops.precision.bfloat.B16",
+ "generation": {
+ "guidance_scale": 7.5,
+ "max_sequence_length": 512,
+ "num_inference_steps": 100
+ }
+ }
+ },
+ "file_256": [
+ "6927dcc812841c1da549bf11c97ddf30532aee0e708a6642fa64cf8e0dfcdef7"
+ ],
+ "layer_b3": [
+ "8b20714a6af89ea4bf4ada1f805c5b9d529ef136c229e9b75392242d62d80c3e"
+ ],
+ "layer_256": [
+ "9e44e6c919dc71c24a193641e6265cd9983a2a773b9bbaf527c10ac4837b29fd"
+ ]
+ }
+ },
+ "audioldm-s-v2": {
+ "AudioLDMPipeline": {
+ "file_256": [
+ "fc30d5b5a3bb8d08672736efb1fff10755ba7024dace39b2dcb579a105aa2a5a"
+ ],
+ "layer_b3": [
+ "82fbcc553c1ad770d28fd1866b935249c5ebfbf75f3166ae823e1bc6ef39a95a"
+ ],
+ "layer_256": [
+ "d076446a58a36bf436e37444679d62bcf2f45689d4aa3d799b3fe801c71ed2c8"
+ ]
+ }
+ },
+ "zai-org/CogVideoX-2b": {
+ "CogVideoXPipeline": {
+ "pkg": {
+ "0": {
+ "precision": "ops.precision.float.F16",
+ "generation": {
+ "num_videos_per_prompt": 1,
+ "num_inference_steps": 50,
+ "num_frames": 49,
+ "guidance_scale": 6
+ }
+ }
+ },
+ "file_256": [
+ "8fbb6a5e67c70885a8ed8e33df144ac61253e45977be5035fa18cfdf77d386c7"
+ ],
+ "layer_b3": [
+ "1db3439649b5362448455fb2ed6ebde0c3b973655a206832731149757ad165bb"
+ ],
+ "layer_256": [
+ "edd6bd51f1236f528ff8d32dc754f0b86cfac901b800642ea497358156dc00bd"
+ ]
+ }
+ },
+ "HiDream-ai/HiDream-I1-Full": {
+ "StableDiffusion3Pipeline": {
+ "file_256": [
+ "3cb3f6d77a3fce19b90fa7f66da0cbe997b0785a38a788b559290d3062f6fd26"
+ ],
+ "layer_b3": [
+ "612eb9b2676a3e7b28b10aae045a97a95de2a399fe3801c8f6369589c3a832a6"
+ ],
+ "layer_256": [
+ "78fbfb7fddb9ccbdf91f22b0c3d304cbf0cc7305dbccb216982233849ec727df"
+ ]
+ }
+ },
+ "cvssp/audioldm2": {
+ "AudioLDM2Pipeline": {
+ "pkg": {
+ "0": {
+ "precision": "ops.precision.float.F16",
+ "generation": {
+ "num_inference_steps": 200,
+ "audio_length_in_s": 10.0
+ }
+ }
+ },
+ "file_256": [
+ "359a5ffb89a844beb2fcfac584aae2cd7cd6e87c3ab1ec4e892ef45d91db77c2"
+ ],
+ "layer_b3": [
+ "eac241273f9f30982fc04aa88b4dc1c38b533430956a55b9ed4d3e5c717ec962"
+ ],
+ "layer_256": [
+ "ab109d01b43788063802f00c6ecab024c830ea58d668f5c2df9e3ae5b87d86cb"
+ ]
+ }
+ },
+ "Alpha-VLLM/Lumina-Image-2.0": {
+ "Lumina2Pipeline": {
+ "pkg": {},
+ "file_256": [
+ "132b4d213fdd3cfc14333746fc3eb8bbe6358cd73c3bc95ac4ccec230b97dca3",
+ "a7c09ebae62996a8289782161338a3cdba58c11d2d849c50b2d6502e152b0d6d"
+ ],
+ "layer_b3": [
+ "198bde52f09736f1fc650dcdbd0e6b0f6a5ce186582554c1d9ee8ab16ac0feb2",
+ "b52807536902cabbf84f99e4fa2f8713fb4ef77e739f06367ee0d486e3222faa"
+ ],
+ "layer_256": [
+ "982893c99860aac8198c2e435cf85f782fce8f10732daf1f2881a26864400a4e",
+ "dc937b59892604f5a86ac96936cd7ff09e25f18ae6b758e8014a24c7fa039e91"
+ ]
+ }
+ },
+ "ucsd-reach/musicldm": {
+ "MusicLDMPipeline": {
+ "pkg": {
+ "0": {
+ "generation": {
+ "num_inference_steps": 200,
+ "audio_length_in_s": 10.0
+ }
+ }
+ },
+ "file_256": [
+ "853d0ef1d61cbf5d682872322ea8b761ba3d2f85bfbccd58363bd6b2f837268f"
+ ],
+ "layer_b3": [
+ "82fbcc553c1ad770d28fd1866b935249c5ebfbf75f3166ae823e1bc6ef39a95a"
+ ],
+ "layer_256": [
+ "d076446a58a36bf436e37444679d62bcf2f45689d4aa3d799b3fe801c71ed2c8"
+ ]
+ }
+ },
+ "openai/shap-e": {
+ "ShapEPipeline": {
+ "pkg": {
+ "0": {
+ "precision": "ops.precision.float.F16",
+ "generation": {
+ "num_inference_steps": 64,
+ "size": 256,
+ "guidance_scale": 15
+ }
+ }
+ }
+ }
+ },
+ "hunyuanvideo-community/HunyuanVideo": {
+ "HunyuanVideoPipeline": {
+ "file_256": [
+ "bdb957b35585ea74ae42ca92865a68fa1bf1ebc6c5b7e686a889e5c977dc24c7"
+ ],
+ "layer_b3": [
+ "d31c56b4c9444d4c2f1b10120fe964e0956f6b8c7e7c1e4cc5a1f37406fc49f5"
+ ],
+ "layer_256": [
+ "fe741fdfd163bcb1e0ed81d80f79ac3576dbf6e6740674efadfeff782a48bed4"
+ ]
+ }
+ },
+ "zai-org/CogView3-Plus-3B": {
+ "CogView3PlusPipeline": {
+ "pkg": {
+ "0": {
+ "precision": "ops.precision.float.F16",
+ "generation": {
+ "guidance_scale": 7.0,
+ "num_images_per_prompt": 1,
+ "num_inference_steps": 50,
+ "width": 1024,
+ "height": 1024
+ }
+ }
+ }
+ }
+ },
+ "stabilityai/stable-audio-open-1.0": {
+ "StableAudioPipeline": {
+ "pkg": {
+ "0": {
+ "precision": "ops.precision.float.F16",
+ "generation": {
+ "num_inference_steps": 200,
+ "audio_end_in_s": 10,
+ "num_waveforms_per_prompt": 3
+ }
+ }
+ }
+ }
+ },
+ "Kwai-Kolors/Kolors-diffusers": {
+ "KolorsPipeline": {
+ "pkg": {
+ "0": {
+ "precision": "ops.precision.float.F16",
+ "generation": {
+ "negative_prompt": "",
+ "guidance_scale": 5.0,
+ "num_inference_steps": 50,
+ "width": 1024,
+ "height": 1024
+ }
+ },
+ "1": {
+ "diffusers": "DiffusionPipeline"
+ }
+ },
+ "file_256": [
+ "425ff1dcbe3a70ac13d3afdd69bd4e3176b0c3260722527c80b210f11d2d966c"
+ ],
+ "layer_b3": [
+ "6eb15506fa38b4cbb26391ab1b6c9ead05f86c711e46583bfbe8fc4421571414"
+ ],
+ "layer_256": [
+ "04e3c17170b8a200481f6941b370fdc5056a00fe5a16956de01790f8a93c0dcd"
+ ],
+ "identifiers": [
+ ".DenseReluDense.wi.weight",
+ "encoder_hid_proj.weight"
+ ]
+ }
+ },
+ "tencent-hunyuan/hunyuandiT-v1.2-diffusers": {
+ "HunyuanDiTPipeline": {
+ "pkg": {
+ "0": {
+ "precision": "ops.precision.float.F16"
+ }
+ },
+ "file_256": [
+ "7d31ac8fa389ff39dd0a81430010e52c43b59f15adc00c83625a47881e16830e"
+ ],
+ "layer_b3": [
+ "bccd37ecc9f85d132b46d0bb67b4facb49fc6c091428a4feba9ab9a93140f5fe"
+ ],
+ "layer_256": [
+ "ed25d241d58ca298d28abd5919e70341ad194e77dce4859436b52ea4d8fcb616"
+ ]
+ }
+ }
+}
\ No newline at end of file
diff --git a/data/exclusions.json b/data/exclusions.json
new file mode 100644
index 0000000..e386bb2
--- /dev/null
+++ b/data/exclusions.json
@@ -0,0 +1,32 @@
+{
+ "exclusion_list": [
+ "auto_pipeline",
+ "consistency_models",
+ "pipeline_utils",
+ "deprecated",
+ "ddim",
+ "ddpm",
+ "deprecated",
+ "autopipeline",
+ "dance_diffusion",
+ "diffusionpipeline",
+ "dit",
+ "latent_consistency_models",
+ "latent_diffusion",
+ "ledits_pp",
+ "pag",
+ "paint_by_example",
+ "semantic_stable_diffusion",
+ "stable_diffusion_attend_and_excite",
+ "stable_diffusion_diffedit",
+ "stable_diffusion_k_diffusion",
+ "stable_diffusion_panorama",
+ "stable_diffusion_safe",
+ "stable_diffusion_sag",
+ "t2i_adapter",
+ "text_to_video_synthesis",
+ "unclip",
+ "unidiffuser",
+ "controlnet_hunyuandit"
+ ]
+}
\ No newline at end of file
diff --git a/data/migrations.json b/data/migrations.json
new file mode 100644
index 0000000..8f696e7
--- /dev/null
+++ b/data/migrations.json
@@ -0,0 +1,58 @@
+{
+ "repo": {
+ "/helium-2b": "/helium-1-2b",
+ "allenai/Olmo2-7B-1124-hf": "allenai/Olmo-2-1124-7B",
+ "apple/mobilevitv2-1.0": "apple/mobilevitv2-1.0-imagenet1k-256",
+ "caidas/swin2SR-classical-sr-x2-64": "caidas/swin2SR-classical-sr-x2-64",
+ "facebook/hiera-base-224": "facebook/hiera-base-224-hf",
+ "facebook/sam_hq-vit-huge": "syscv-community/sam-hq-vit-huge",
+ "facebook/vit_msn_base": "facebook/vit-msn-base",
+ "facebook/wav2vec2-bert-rel-pos-large": "facebook/w2v-bert-2.0",
+ "google/gemma-3-4b": "google/gemma-3-4b-it",
+ "google/gemma2-7b": "google/gemma-2-9b",
+ "google/gemma3_text-7b": "google/gemma-3-12b-it",
+ "IDEA-Research/dab_detr-base": "IDEA-Research/dab-detr-resnet-50",
+ "LGAI-EXAONE/EXAONE-4.0-Instruct": "LGAI-EXAONE/EXAONE-4.0-32B",
+ "meta/chameleon-7b'": "facebook/chameleon-7b",
+ "mixtralai/Mixtral-8x7B": "mistralai/Mixtral-8x7B-v0.1",
+ "paligemma-hf/paligemma-2b": "google/paligemma2-3b-mix-224",
+ "pixtral-hf/pixtral-9b": "mistralai/Pixtral-12B-Base-2409",
+ "Qwen/Qwen2-7B-beta": "Qwen/Qwen2-7B",
+ "Qwen/Qwen3-15B-A2B": "Qwen/Qwen3-30B-A3B",
+ "s-JoL/Open-Llama-V1": "openlm-research/open_llama_3b",
+ "Salesforce/instruct-blip-flan-t5": "Salesforce/instructblip-flan-t5-xl",
+ "state-spaces/mamba2-2.8b": "AntonV/mamba2-2.7b-hf",
+ "ibm-fms/FalconH1-9.8b-2.2T-hf": "tiiuae/Falcon-H1-34B-Instruct",
+ "nvidia/nemotron-3-8b-base-4k-hf": "mgoin/nemotron-3-8b-chat-4k-sft-hf",
+ "THUDM/": "zai-org/",
+ "THUDM/GLM-4-100B-A10B": "zai-org/GLM-4.5-Air",
+ "zai-org/GLM-4-100B-A10B": "zai-org/GLM-4.5-Air"
+ },
+ "model": {
+ "bark": "suno/bark",
+ "aria_text": "rhymes-ai/Aria-Chat",
+ "cwm": "facebook/cwm",
+ "decision_transformer": "edbeeching/decision-transformer-gym-hopper-medium",
+ "distilbert": "distilbert-base-uncased",
+ "gpt_bigcode": "bigcode/gpt_bigcode-santacoder",
+ "granite": "ibm-granite/granite-3.3-2b-base",
+ "granitemoe": "ibm-research/PowerMoE-3b",
+ "granitemoehybrid": "ibm-granite/granite-4.0-h-small",
+ "musicgen": "facebook/musicgen-small",
+ "seamless_m4t_v2": "facebook/seamless-m4t-v2-large",
+ "timm_backbone": "microsoft/resnet-50",
+ "gpt_oss": "openai/gpt-oss-120b",
+ "bert": "google-bert/bert-base-uncased",
+ "timm_wrapper": "timm/resnet18.a1_in1k",
+ "vision-text-dual-encoder": "hakuhodo-tech/japanese-clip-vit-h-14-bert-wider"
+ },
+ "module": {
+ "blip_diffusion": "blip_diffusion",
+ "cogvideo": "cogvideox",
+ "cogview3": "cogview3plus",
+ "deepfloyd_if": "if",
+ "cosmos": "cosmos2_text2image",
+ "visualcloze": "visualcloze_generation",
+ "marigold": "marigold_depth"
+ }
+}
\ No newline at end of file
diff --git a/data/mir.json b/data/mir.json
new file mode 100644
index 0000000..59ae13b
--- /dev/null
+++ b/data/mir.json
@@ -0,0 +1,3 @@
+{
+ "expected": "data"
+}
\ No newline at end of file
diff --git a/mir/spec/template.json b/data/nn_filter.json
similarity index 100%
rename from mir/spec/template.json
rename to data/nn_filter.json
diff --git a/data/parameters.json b/data/parameters.json
new file mode 100644
index 0000000..5a3f650
--- /dev/null
+++ b/data/parameters.json
@@ -0,0 +1,30 @@
+{
+ "bark": {
+ "n_head": [
+ ""
+ ]
+ },
+ "aria_text": {
+ "vision_config": [
+ ""
+ ],
+ "text_config": [
+ ""
+ ]
+ },
+ "cwm": {
+ "n_head": [
+ ""
+ ]
+ },
+ "bert": {
+ "act_dropout": [
+ ""
+ ]
+ },
+ "timm_wrapper": {
+ "_resnet_": [
+ ""
+ ]
+ }
+}
\ No newline at end of file
diff --git a/data/prefixes.json b/data/prefixes.json
new file mode 100644
index 0000000..6f535f8
--- /dev/null
+++ b/data/prefixes.json
@@ -0,0 +1,34 @@
+{
+ "pipe_variables": [
+ ">>> motion_adapter = ",
+ ">>> adapter = ",
+ ">>> controlnet = ",
+ ">>> super_res_1_pipe = ",
+ ">>> pipe_prior = ",
+ ">>> pipe_prior_redux = ",
+ ">>> pipe = ",
+ ">>> pipeline = ",
+ ">>> blip_diffusion_pipe = ",
+ ">>> prior_pipe = ",
+ ">>> gen_pipe = ",
+ "pipe = "
+ ],
+ "repo_variables": [
+ "controlnet_model",
+ "controlnet_id",
+ "base_model",
+ "model_id_or_path",
+ "model_ckpt",
+ "model_id",
+ "repo_base",
+ "repo",
+ "motion_adapter_id"
+ ],
+ "call_methods": [
+ ".from_pretrained(",
+ ".from_single_file("
+ ],
+ "staged_call_methods": [
+ ".from_pretrain("
+ ]
+}
\ No newline at end of file
diff --git a/mir/spec/modes.json b/data/tag_scrape.json
similarity index 99%
rename from mir/spec/modes.json
rename to data/tag_scrape.json
index 24a95e3..9448114 100644
--- a/mir/spec/modes.json
+++ b/data/tag_scrape.json
@@ -4692,31 +4692,6 @@
"region:us"
]
},
- "info.art.bert-uncased.*": {
- "pipeline": "fill-mask",
- "library": "transformers",
- "tags": [
- "transformers",
- "pytorch",
- "tf",
- "jax",
- "rust",
- "coreml",
- "onnx",
- "safetensors",
- "bert",
- "fill-mask",
- "exbert",
- "en",
- "dataset:bookcorpus",
- "dataset:wikipedia",
- "arxiv:1810.04805",
- "license:apache-2.0",
- "autotrain_compatible",
- "endpoints_compatible",
- "region:us"
- ]
- },
"info.aet.mra-512-4.*": {
"pipeline": "fill-mask",
"library": "transformers",
diff --git a/data/transformers_adds.json b/data/transformers_adds.json
new file mode 100644
index 0000000..c26af2d
--- /dev/null
+++ b/data/transformers_adds.json
@@ -0,0 +1,332 @@
+{
+ "google-t5/t5-small": {
+ "T5Model": {
+ "identifiers": [
+ [
+ 4096
+ ],
+ "encoder.embed_tokens.weight",
+ "text_encoders.t5xxl.transformer.shared.weight",
+ "t5xxl",
+ "encoder.block.0.layer.1.DenseReluDense.wi.weight"
+ ],
+ "file_256": [
+ "ec87bffd1923e8b2774a6d240c922a41f6143081d52cf83b8fe39e9d838c893e",
+ "565cb2487351282e8e4dbeb88e63f4ad28217ce0439f5a8e6525a924807d2d9b",
+ "6e480b09fae049a72d2a8c5fbccb8d3e92febeb233bbe9dfe7256958a9167635",
+ "4f2751ceeb2a96edd693e539dc5d6bba0b8d3814f49a9b3798403a0cec4b2e3d",
+ "83690f3cc37cecb5e907f41ab0f7abb0855ef24a0a8aab9259f2888ce85a34e2",
+ "7d330da4816157540d6bb7838bf63a0f02f573fc48ca4d8de34bb0cbfd514f09",
+ "8490f7a22615c20651a63dbe7b4241929826a4de20292dc8e63bfc3c61e3654f",
+ "d8720addef2596fef86b1b22e4b62875c9118779ba8723759a75dfcbc649ffd5",
+ "7d0eac95abe8daae454bcd3d166b8bfc6a35fe68278f97479d62dbb6850f38c0",
+ "ceabd6f71c7112cfaa4dfca8711dda97b79fb9b25983f1c95532de226045f1f8",
+ "49e139f50824fef40908ef4307c851e7adaa8b91bed44054c4829600dbedfdda",
+ "211ade1d474f5dc83190aec8be5c4baf52643777790d64de0cbd84f63613e5e9",
+ "7894547154ba3fd6e364e66e2951ee82b4c3fc1ae0f95df6a4f9d1c5a4e98f17",
+ "eb529f693f4b17773a24e787fcba29486d5e1700dadcc20bb91e4c8b00212d08",
+ "d80116f6fc39801e4eef425a584e7a7a41cbe5119797bef2dad67299909fe2ae",
+ "31ebe18e901bfb6e5709a20ec1c95fce29bce2b9545073231e0f909a53239f5c",
+ "6be2b0b7e2de7cf2919340c88cb802a103a997ce46c53131cec91958c1db1af4",
+ "b51cbb10b1a7aac6dd1c3b62f0ed908bfd06e0b42d2f3577d43e061361f51dae",
+ "9ec60f6028534b7fe5af439fcb535d75a68592a9ca3fcdeb175ef89e3ee99825",
+ "8f5ab879234384235d56732f0cda07bf8801f30a49645248c5bfdeeb1665f64b",
+ "86427a1f4dba48940e45bf78d6db5bf0d48fce8b4656f5aba27955f06af9628e",
+ "88b696cfae098f03bb078cc5944ef03aec1e91ec020a6b016b723a0f0532558c",
+ "1dc600961d3c5ed081f6700485cdc7ed9cfb4631f2dc385b7ac6bd3c80846d0d",
+ "f28631189911f8d7931e8fe642a4cb2a3c51f50da7cabbfa06b89bafc19c00d0",
+ "de9dfdd19d7ba6859993cadec5100665dc7a4fb71e1c6c8970959cbdaf4366e3",
+ "7a68b2c8c080696a10109612a649bc69330991ecfea65930ccfdfbdb011f2686",
+ "2c0c539ab8e8fba3877cc94bc483e427f74c525f817a809b028ebc8d96d75a94"
+ ],
+ "layer_b3": [
+ "ca94e03b7b1fdcb0d6ff5205eac56f145d2dff8a9c489faf80935bfec8387f18",
+ "c0e2b054bedd782909191b05748a88c28d1538fa91789fec63f036ba01dcc001",
+ "672de9b79d14001de7d1109ffc52e4d0cccc3bfee6f45648fa347703b58e2b99",
+ "abdb187a996c51cb0469630c124b14eeb0bb8f5f635aca6c71dea264f8bd61ae",
+ "8926f862b7763fd9688af317eba7809aa71a478484be0c738c269de368ace4a7",
+ "e616b754cf55e55b3f9f17ab7e1fff95f0607c81782822fc1223ae22fb1e9f36",
+ "b79e5f1878a62cd726bb4f9fc1415cacb071d278440e9026290c7b36cb41e1d4",
+ "77619d5278d9f547ddac17d4d99df56cb6a3a9e660ae31b2f896a4297907e62e",
+ "c87c9d3cc7becc46ee34821299cf8551a6df5541582a45469a031bccdc4bd340",
+ "7e6c32c01c89fc5d1610c410135aa9708e77a7444510e5e479fa677ff2b53643",
+ "a49c2bc301733967ddff113790e301773dc5dd71368b657af4141458de593ced",
+ "c2ea94030ea362e03d73d448fa5353ace0a449dc38c51a4a49fb148444ebb8ef",
+ "4a90463350f08ef41479da1d561ab41b8f8b792f1603a092226a838156aebfb0",
+ "f86cd0324eebbffb81b15ad47dc8b63fedfa51dc222e44e1a958a7becce2bcb0",
+ "48c54c61c5f14e42761c6177539b2da3a22222516dab053952ca8d8e92f93d65",
+ "311332d9738773669128814d944b1e860a8e3176b37abf43370bc06b43b454d0",
+ "3f4e51dec6d542759cdea49b3bec14c090a4908f953fa3e182e2ea43b5b05402",
+ "beb25461e168359108add77263ea5cc121b7584cc4aa304ffc4e134783bb1d88",
+ "43313f90a359c8c1c787a7a833b1ab9f7a38204ba36d0ba587c658d0d9bf0852",
+ "fa9e97cdad26f55fedab83a3f114e0338c9cca3ea2bf8f1b168a6dfc5919bf8e",
+ "93108d67f8829a7e1e8f3773e9ce53c67f365889c2acfd69816ac80fd43f8e08",
+ "fc65a6cc55e89394d7bc0fa4ee952d63ce3bdc143b84b5aa4bb3edf7722a6b83",
+ "8163bc781a7e013dfeb806bbb828a36913cf119363ea5fcd9071d87a0c227cda",
+ "ad2ba63e1134bad1b15ee339313bc130708b2995e8b4b76fb44d727f28c26ad9",
+ "4a844772638ffed2f61d45eaac984094b92540fa1391a4098608fc73a6cd4fd8",
+ "76c31e1fd35da7de7cee97c1e7c5ccde640e6fac3e17a62e115ecf484c7196c3",
+ "a4d672e22b5bdd8f8b0885cec4a173d0466bb1dcbfbf8400cedcc41c2494f16c",
+ "d1860c3f01dc9f260d98b50d3d2bbc8dc2d3eefaa93778a8de9d7adfb897fc6e",
+ "b8719092fc58487406211f52dc55bf40b573ccfd29933a989c33a36b694f6f0a",
+ "795e272409bc4fa55f402485acf86b607256f91aa965295c5bb771c61f8e9e74"
+ ],
+ "layer_256": [
+ "bb20f7805209379aea4d6548f17e551cf27d0f8426ca169e4df8234f718ed5ef",
+ "431580c2d86f9a9ed3500f776a4c997223e5644aed211f965354869ccfa4d76e",
+ "2ccd548c4ffe34168c60779ebd497b9b410981a2fda813c8723a24a805c94ea0",
+ "a608fc4e1cc9762e46187a1ce66e98e8ba4bc3a604cbfd96174bd876baea0fa1",
+ "dc9e74cdf535e0b7a17e1335d0d8b38a00f94facf0cb01363baee09945a25278",
+ "f07409710a69b2247aa4723a9b40d2225d5e5bfba7b60c51f0ea901fc2ef5ad9",
+ "ed28f8b6cc472f352fc840b5a9f841ff17d76ae6918f0676464dca20529aa92b",
+ "97c1a08f87c59b4c55ad4672841977cfce43ca7730bcd11d8c178a9330de1855",
+ "968972839b859a9c4457f190fad2e17e8585ce27d9ef318df4f5b4e902143944",
+ "4dbdeadc957c898c327197a3d8770188535672e9208beb29bbf48dfdf51c8955",
+ "669172c2b5e8b97774d9dd0227ede40c4d25cae3adae97d9f281d03531e7e137",
+ "39fff130b9ee240102c28a78ee1c4a643e9f800b734ff133f3ab2ad1357bd2f6",
+ "6e047ed8cb7007034ff15840dd53c92096f0e7ed5befa07808de8afa35d35874",
+ "adbd0baa059074501b7686db2b0c01715f3a317275c2657c5dfbfd6ee92389b7",
+ "eb63790fb32b5660de34fa42c2e608df58f7aa3680b4984f0ee9008fe613729c",
+ "f125c20a33b0ff2dbd4e8ad9acebc34383cb2ef98668169ef79a8c06655ced35",
+ "e64e0ac83a785ef584a0e86b347fae8f9e2bd84324a49396ca8a9fe7532a947b",
+ "70001b3ac1b66522142bb86e4c3e87e20c2bbd07276c763878e0838ef6184aad",
+ "f46fd1e2b5fef3b9f7ae80d183cc77f7be181117a72a0bb933bdef0bc6cd679e",
+ "83676d73726d101325a47c7f8a60cedf10bab99ea79a6bedad7761220cb4a625",
+ "a621a907586e5e270e7c7873b167364d8a935ff347d8240fa9bab319678da690",
+ "f0af1a089f40d8611db5c59469314f1547e2df23c6eff24860359b37ea9bd966",
+ "72478320b8dbfd9aeaea010dcf0896e3116fa5ab940f3b472882d9f9d2d7333f",
+ "9c1a88e36334a48d8482fec54b14ea1d5fd31f0dbb65d13cc616e63dc7c42be5",
+ "d0689f727e8ac4fef3ec4b1f29e8a3bd12e1116559eeefb2a1a457cd4e676d1e",
+ "fea158a4afcfaa6e95e04799bae0287de0c4fcb188f3b41768a46ce48c71c9df",
+ "2e5bc4e73312b5aec4c1a55631cb4ed69cf34ccaa6d1f28f7045f137a579b439",
+ "015fdecbc3b5369dbcb2302e4b79985437ac4496d1b9ad63316423a222fb0803"
+ ]
+ }
+ },
+ "google/umt5-small": {
+ "UMT5Model": {
+ "identifiers": [
+ "encoder.block.1.layer.0.SelfAttention.relative_attention_bias.weight"
+ ],
+ "file_256": [
+ "a8e861969c7433e707cc5a74065d795d36cca07ec96eb6763eb4083df7248f58",
+ "decf9b70814ed5e9965bfca9fbd0483462e2bf743790663025b7742f8c014c72",
+ "0a07449cf1141c0ec86e653c00465f6f0d79c6e58a2c60c8bcf4203d0e4ec4f6",
+ "c0ef3a140898e228a3520c9adec60743d2e8e5b3d229651bb37f1a3921919f99",
+ "7b8850f1961e1cf8a77cca4c964a358d303f490833c6c087d0cff4b2f99db2af",
+ "c3355d30191f1f066b26d93fba017ae9809dce6c627dda5f6a66eaa651204f68",
+ "fa1d36fd54f171ae60fea915c23bd77986b330bbed9729f0d2f8ecbe9168bc48",
+ "4a3176f32fd70c0a335b4419fcbf8c86cc875e23498c0fc06f5b4aa0930889e0",
+ "adbc782b9145a27e15d63dfa25057efca0ac75e2db7d372c901ddaa130ca2def",
+ "b7e2ca4c493c9d51fa951005e8ceba2f4b6b6877cfb4c36a8955c6cd68a1dba7",
+ "2521d4de0bf9e1cc6549866463ceae85e4ec3239bc6063f7488810be39033bbc",
+ "9209b4c77b34ad8cf3f06b04c6eaa27e7beeebb348a31f85e3b38a1d719b09ed",
+ "8bc12d80bc0413573fa58a93626117440b4528f640dd9cb310732e05fa9e6c3e",
+ "f64f8d6dc4d8a24276df69d0ccea789aae686f7417950a41e6568c30cb478a5c",
+ "17cf97a5bbbc60a646d6105b832b6f657ce904a8a1ad970e4b59df0c67584a40",
+ "eaea358bb438c5d211721a4feecc162000e3636e9cb96f51e216f1f44ebd12ce"
+ ],
+ "layer_b3": [
+ "cd92b29c9099a640e3f5d4a76e64b3467f87f6c056119e0defdff94d311ad6de",
+ "1c943dbcb8b328a7c6c852921ddaefbd84c9df8c83bc51fe303c1f06cb734102",
+ "1639a6467af0db1e15828d33b878e568cba1335947eeadd481170bcdc9ba8e33",
+ "72a0329740dee29a2c099eec3c320b3945590a74293356014c30249fe69652e5",
+ "0374cba03c607ffe8ab8f04994d82f82e80901dc7578f1a9a6cb2637608be5d5",
+ "d75a407f873e1cfa1a0a36214b53b14bfebe9253ea263465151c07f0d57f3f29",
+ "621153502b985c143d304318c91dc3d10296d24268c81e3538fc336fdc84c915",
+ "43bb052945d38a68bec27c3d26162e88e306e6074d027d3b4b2b8ae2b1851691",
+ "98f50ea5d55e61c1478df47e567e48bdd036d240b9129e64d53a826406900adc",
+ "9400313b8eae31699473daa5f840d25a4ef660f68de9a7894f1a28f214f23384",
+ "9f13826b8e4ddde24d80de6a947a7868e26cea25dda52790ee6ed695ff72b9bb",
+ "475773ab108a537ff904b84e7f3a80129ba4983deb7170b6b52c922ece6069ce",
+ "5ef27b3c1eddb08cfe41b452cf9529d86dff811645d40c165bae324486d19e96",
+ "e170559d8551cfe651344594e54c0a9a90c0068b00f3866f6e9a3737e20925cb",
+ "e8dc7442a20bcdc7b6e5dd0265939d88896eab5ddd33ee16f1f09537e65914b8",
+ "4d3d5049857d01741780daf01e96617092973305637b435f4895499a26bbaede",
+ "7a2adadc2372feda23b2169337276adda6d1fdef82ba69f0d3321c4c6ba8c604",
+ "0a7c61a85bb3f51f75924de48ef3f5e87cbf8901f600cbfcae97f5e2919c4148"
+ ],
+ "layer_256": [
+ "467916d35f3053dce1d40d998fcaf6aa03feda75aa578d964dd61461e23641a3",
+ "58deeef888d4ded4ffababfbf8da27227a4a6ff8adfa42016e12c0180f713816",
+ "178ebd3fa3418d33a2e45a80d8b9d3662ff4a8e75f3de3f0332f82c505d8152a",
+ "8700dcb651465fe6c925b7ad6068b58b32951832fff0ed19819510f8d0713ee5",
+ "954f2129ba166e746c71433f717b572d8869ec14b32b7f214d1701d3b1120047",
+ "32f5fc1daea014b6488b96c2a1330e0aad87e074844fa3e2e3f20b9e58440395",
+ "9245abaf6df8a4b5fcc828ecbcd7b21a1b19bf5f3c4388fb5c8eabc140276dce",
+ "172d0fbbd379ae014a7008e148813818494e9e645db802fd000d443369df9d17",
+ "2fa68a26b0386aaf9123d2b4067dafc8631ee724602197dd353f3ea5a61dac8a",
+ "16f0054014e6d07b86b0526d5bcfed7d2aa3aebe3e44e6758933d90cbd3da46e",
+ "fd62047f5d27ff43210c117dc0f253c101e694a5331d6b684688606c92c65ccf",
+ "ddc4f38db9f132fb1b736c1d693b5c039a2d6fe83bdf4f1c1e7a2745b5d79124",
+ "9e9ab11b3ea059b84ae2bcc5be76ab3f730a486d92a16f1fd2a959bdc2ede08f",
+ "bfb178b1ce27f00e122d2328c662fdef6cc239c07efc749aa61ae2d395441b02",
+ "50addf6a911b90194a75b0212429d1af55eb2f9d24715479b9ccc4a40adc299b",
+ "2e46e9f1b714d72160d3b3b775a845b3049a01396fab935f1278d9e8de2ef0c6",
+ "db8d2b49d9042e39d6531b33ec3bebb9cdf42b9e6ad56163f08da2a7da2a53cd",
+ "2d81d19ad5440422b85e0b17c71914269f6c25c9b1fa321c0dd6119ddb41d62d"
+ ]
+ }
+ },
+ "google/gemma2-9b": {
+ "Gemma2Model": {
+ "file_256": [
+ "e909230aabafad02d097c7dc02f2ae062b4e6b0593477c1f07679d277e09ce71",
+ "d61628bc793240439e608c5ae744f55ec8770f684abb63602648a24cb6da60bc"
+ ],
+ "layer_b3": [
+ "55a3c812ac0832d154867f5927365bcc776926e48e65f7f35a81fc11f4bb81da",
+ "543572889beb25cad83a43ce70cdd255d2c82951d6595e8c97ff62fd05871c99"
+ ],
+ "layer_256": [
+ "a0d820c39578cf888f398579d9a00d69b31c81e049795ba70008dad8fe5b3a33",
+ "abc83b04a04467579ea1952a7efbdd252b8641ac0e2a6a9be2a5a73e371111d6"
+ ]
+ }
+ },
+ "google/gemma-7b": {
+ "GemmaModel": {
+ "file_256": [
+ "01676b4c6e765f737a5e9854a315de3887e939c370cae116d505777729099a68"
+ ],
+ "layer_b3": [
+ "438d82c867240f194a4e15798eef2886a911c8f57fa2d9f4ffad1d56e7bd1ccf",
+ "1de38e09f5f2c5345de48b8cd4dddcfff3e341cc0059752446e186b3863f0981"
+ ],
+ "layer_256": [
+ "e4835a72d582b4ae066d6ff0519f2ee9f8b21fb02e8c28d8eaa317f8d1e9ea75",
+ "1657c7180b48672004f4463308dfdd56d92eedeb23d1408ea766985ca208e5aa"
+ ]
+ }
+ },
+ "google/mt5-small": {
+ "MT5Model": {
+ "identifiers": [
+ [
+ 250112,
+ 2048
+ ],
+ "text_encoders.mt5xl.transformer.shared.weight"
+ ],
+ "file_256": [
+ "0524484ec81425ba9deef6fac1393a78ba9b1c9bfed704a4be5f9c7255975cc1",
+ "32f70f1d187e131a5fc3e4f0edc97ce89360d8e2f1d90177a443a05296097acc"
+ ],
+ "layer_b3": [
+ "a1d616c37711ec7b9073d04734af2f5fd02f9035a322eb46efeace922e104c51",
+ "bc71d4259f4feaa0fb27c1f288765004840f39247cddc98b3ac37329ff1354d0"
+ ],
+ "layer_256": [
+ "bd337daf0c1aa36896013109b406a0580aa3bb8ab9291d89df3015d737358e95",
+ "2e40c48c96fc7df636aad96d3e78ed0ba9f68c3059e21b7fcf917f284c569a61"
+ ]
+ }
+ },
+ "Qwen/Qwen3-15B-A2B": {
+ "Qwen3MoeModel": {
+ "file_256": [
+ "c56947057481fb5e7cdf766e442da81717b34addc88bbe8f3728fd25bd03cbae"
+ ],
+ "layer_b3": [
+ "d2d1e0875202f5c9c84c781a2105620250733bd01832f67b2c17bc981d1eb508"
+ ],
+ "layer_256": [
+ "408c01da57c4968b7b0e36d98a74e321153e7aeb058fea63ffd140e323526476"
+ ]
+ }
+ },
+ "Qwen/Qwen2-VL-7B-Instruct": {
+ "Qwen2VLTextModel": {
+ "file_256": [
+ "1f48ac458d6fbd0aec53a116065a7ee3f1d34bddde544e25c16a05c9d5392b78",
+ "0e85c7111ce849293e97aa09ce1172352ecece023a3ecea7ac8311e326b47f3a",
+ "d725335e4ea2399be706469e4b8807716a8fa64bd03468252e9f7acf2415fee4",
+ "e10bd9583a77250376d9134cd6b46799029dfa3b4d7989c1050b3ec149cc7cf5"
+ ],
+ "layer_b3": [
+ "e4f681bde70a753f30f83495a2aa340d251bf3d818eb5a1cbe58f85fd6ea0d40",
+ "47b062ce8ddb14845fb1a71d2fd88fd52a82e26561ba3eb05be057915a867775",
+ "b6386f70b528ffa9e09fdd8db8a7b91a7c462ed97b06963576c6139e25fdcf31",
+ "4cd449df9f9004a7e53005583a7e4cfa6de42912f03647d2ea799d489e9c1406"
+ ],
+ "layer_256": [
+ "ed36a4a11c4ebebb10d1e010cb93e2e43fcaf975cd42bb6c9958537593d0d44d",
+ "f7f6f64e7b6d7826400a2fc0eef942a47c47bd5914e051ad0c8cd9ff5ff7982b",
+ "f341ed0f792cf0570ceb21d3b64ed14bf9875e9fcb90116851364eeed683a6ca",
+ "ba031d0da78afe24ae63558ad29b8028244a7bd4750a5615dab9079fe32a5fd7"
+ ]
+ }
+ },
+ "openai/gpt-oss-120b": {
+ "GptOssModel": {
+ "file_256": [
+ "68a8dc1f8e2e5996cb702f14332a25ddf3463daeab2df68e21ca09ef181203c3",
+ "a881aa5f561b26a22b14a8262aa61849ace349ffd73d74769e030ac90a1fcf8a"
+ ],
+ "layer_b3": [
+ "b52807536902cabbf84f99e4fa2f8713fb4ef77e739f06367ee0d486e3222faa",
+ "43c618018db1fd6e915dead610652da261d9058b73bc5355c85c6ac69af4d913",
+ "ab27ce7391b7fbd6ce3c319faa119afdac68f746af6a0ce2c3400a132f36f6ac"
+ ],
+ "layer_256": [
+ "de5dcad822be5ed6196f0f3f6965739993118d14db97b33a94a269f4f1b7a363",
+ "575f1977ed42d95a050e13dadaafc05a6d94c8aadca8364dca8a62aa4f2b146c"
+ ]
+ }
+ },
+ "microsoft/Phi-4-multimodal-instruct": {
+ "Phi4MultimodalModel": {
+ "file_256": [
+ "bc703090b63eda16f639fa4de7ac54635c23105ab1da2f6ec4d3403151d38ee6"
+ ],
+ "layer_b3": [
+ "cf4add4ada6082f448788eaf2937f645b5212db88e06ee81475b8be0e99063dc"
+ ],
+ "layer_256": [
+ "7ff992b780b2f8993dd6bb9612207943638b2a42badc976ce80893bc205e801b"
+ ]
+ }
+ },
+ "laion/clap-htsat-fused": {
+ "ClapModel": {
+ "file_256": [
+ "c92b5a2bee69ff5dd05820d9e0a5cddbc9c9b9dd19a6cb3214f0cf4f29a4d1b0",
+ "ae69f555e7f1a2333b8e684c9fa8233f44a47bbadf76d484f941b74f74d2753d"
+ ],
+ "layer_b3": [
+ "a4d26450ac399d51b9abbe37859615bb02a5cbf63521da4c7cdc549d04a2872c",
+ "ddf310d8eb2d4e3f61e605978675a9d3a748cad9406b9aee8335eae013e77573"
+ ],
+ "layer_256": [
+ "843ba86000971d6067bfc4f3ed6dd01bd6f6726188aaa15d86b05554f4fe8481",
+ "27529e30442d030a28badf9d62710f4b74e38e9c4424ed169c7e0ac072f5a771"
+ ]
+ }
+ },
+ "google-bert/bert-base-uncased": {
+ "BertModel": {
+ "file_256": [
+ "c6c6348af2cb4d5852fe51102ce39605903dbe7925c005cf8995506cc21ea914"
+ ],
+ "layer_b3": [
+ "30d7d2cc3ec9e4ba45844e005d0bbcb5887b6a0976042f73da916237dc5c4c12"
+ ],
+ "layer_256": [
+ "94fd2508680ff684eff57e4a5a8ca46bf338fc356a9cf6fe8db2b84543dd7971"
+ ]
+ }
+ },
+ "llava-hf/llava-9b": {
+ "LlavaModel": {
+ "file_256": [
+ "f5ad57d3eda300a3195bc9c0bb36ab76ebe88831f128e9851e63440aff4a6741"
+ ],
+ "layer_b3": [
+ "d7d6ccb9dbba90b64e4cd259b6309e56708b3f4fbd6e9f85e9f0410e549133ef"
+ ],
+ "layer_256": [
+ "9969c41152aba689413b7f63888ecdc0c0badad2c2960e689ebc4c0e4a696c73"
+ ]
+ }
+ }
+}
\ No newline at end of file
diff --git a/mir/__init__.py b/mir/__init__.py
index 2942506..c2ad045 100644
--- a/mir/__init__.py
+++ b/mir/__init__.py
@@ -1,28 +1,18 @@
# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
#
-
-
-def main():
- import mir.maid
- from mir.maid import main as mir_main
-
- mir_main()
- from mir.inspect.tasks import main
-
- main()
- from mir.inspect.tasks import pipe
-
- pipe()
-
- import os
- import shutil
-
- try:
- os.remove("mir.json")
- except FileNotFoundError:
- pass
- shutil.copy2(os.path.join(os.path.dirname(mir.maid.__file__), "mir.json"), os.path.join(os.getcwd(), "mir.json"))
-
-
-if __name__ == "__main__":
- main()
+import os
+
+from mir.json_io import read_json_file
+from logging import DEBUG, INFO, Logger
+
+NFO = Logger(INFO).info
+DBUQ = Logger(DEBUG).debug
+
+ROOT_PATH = os.path.dirname(__file__)
+MIR_PATH_NAMED = os.path.join(ROOT_PATH, "mir.json")
+BREAKING = read_json_file(os.path.join(ROOT_PATH, "spec", "regex.json"))["breaking"]
+SEARCH = read_json_file(os.path.join(ROOT_PATH, "spec", "regex.json"))["search"]
+PARAMETERS = read_json_file(os.path.join(ROOT_PATH, "spec", "regex.json"))["parameters"]
+SEMANTIC = read_json_file(os.path.join(ROOT_PATH, "spec", "regex.json"))["semantic"]
+SUFFIX = read_json_file(os.path.join(ROOT_PATH, "spec", "regex.json"))["suffix"]
+IGNORE = read_json_file(os.path.join(ROOT_PATH, "spec", "regex.json"))["ignore"]
diff --git a/mir/__main__.py b/mir/__main__.py
deleted file mode 100644
index ab1a1aa..0000000
--- a/mir/__main__.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# # #
-# # #
-
-
-from mir.maid import MIRDatabase
-from mir.inspect.tasks import TaskAnalyzer
-
-
-def main(mir_db: MIRDatabase = None):
- """Parse arguments to feed to dict header reader"""
- import argparse
- import asyncio
- from mir.automata import assimilate
- from sys import modules as sys_modules
-
- if "pytest" not in sys_modules:
- parser = argparse.ArgumentParser(
- formatter_class=argparse.RawTextHelpFormatter,
- description="Scrape the task classes from currently installed libraries and attach them to an existing MIR database.\nOffline function.",
- usage="mir-tasks",
- epilog="Can be run automatically with `python -m nnll.mir.maid` Should only be used after `mir-maid`.\n\nOutput:\n INFO ('Wrote #### lines to MIR database file.',)",
- )
- parser.parse_args()
-
- if not mir_db:
- mir_db = MIRDatabase()
-
- tasker = TaskAnalyzer()
- task_tuple = asyncio.run(tasker.detect_tasks(mir_db))
-
- assimilate(mir_db, [task for task in task_tuple])
-
- mir_db.write_to_disk()
- return mir_db
-
-
-def run_task():
- main()
-
-
-def pipe(mir_db: MIRDatabase = None):
- import argparse
- import asyncio
- from sys import modules as sys_modules
-
- if "pytest" not in sys_modules:
- parser = argparse.ArgumentParser(
- formatter_class=argparse.RawTextHelpFormatter,
- description="Infer pipe components from Diffusers library and attach them to an existing MIR database.\nOffline function.",
- usage="mir-pipe",
- epilog="Can be run automatically with `python -m nnll.mir.maid` Should only be used after `mir-maid`.\n\nOutput:\n INFO ('Wrote #### lines to MIR database file.',)",
- )
- parser.parse_args()
-
- from mir.automata import assimilate
-
- if not mir_db:
- mir_db = MIRDatabase()
-
- tasker = TaskAnalyzer()
- pipe_tuple = asyncio.run(tasker.detect_pipes(mir_db))
- assimilate(mir_db, [pipe for pipe in pipe_tuple])
- mir_db.write_to_disk()
- return mir_db
-
-
-# if __name__ == "__main__":
-# pipe()
diff --git a/mir/config/console.py b/mir/config/console.py
deleted file mode 100644
index a5ad63a..0000000
--- a/mir/config/console.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-
-from logging import DEBUG, INFO, Logger
-
-nfo_obj = Logger(INFO)
-dbuq_obj = Logger(DEBUG)
-
-nfo = nfo_obj.info
-dbuq = dbuq_obj.debug
diff --git a/mir/config/constants.py b/mir/config/constants.py
deleted file mode 100644
index a572017..0000000
--- a/mir/config/constants.py
+++ /dev/null
@@ -1,174 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-import os
-from dataclasses import dataclass, field
-from typing import Callable, List
-
-import transformers
-from transformers.models.auto.configuration_auto import CONFIG_MAPPING_NAMES
-from transformers.models.auto.modeling_auto import MODEL_MAPPING, MODEL_MAPPING_NAMES
-
-from mir.config.json_io import read_json_file
-from mir.config.console import nfo
-
-
-def mapped_cls(model_identifier: str):
- """Get model class from identifier without calling huggingface_hub.\n
- :param model_identifier: Model identifier like "bert-base-uncased" or "gpt2"
- :return: Model class (e.g., BertModel, GPT2Model)
- """
- code_name = model_identifier.split("/")[-1].split("-")[0].lower()
-
- model_class_name = MODEL_MAPPING_NAMES.get(code_name, None)
-
- config_class_name = CONFIG_MAPPING_NAMES.get(code_name)
- if config_class_name:
- config_class = getattr(transformers, config_class_name, None)
- if config_class:
- model_class = MODEL_MAPPING.get(config_class, None)
- if model_class:
- if isinstance(model_class, tuple):
- model_class = model_class[0]
- return model_class
-
- normalized = code_name.replace("_", "-")
- if normalized != code_name:
- if model_class_name := MODEL_MAPPING_NAMES.get(normalized, None):
- if isinstance(model_class_name, tuple):
- model_class_name = model_class_name[0]
- return getattr(transformers, model_class_name, None)
-
- return None
-
-
-def import_submodules(module_name: str, pkg_name_or_abs_path: str) -> Callable | None:
- """Convert two strings into a callable function or property\n
- :param module: The name of the module to import
- :param library_path: Base package for the module
- :return: The callable attribute or property
- """
- from importlib import import_module
-
- module = module_name.strip()
- library = pkg_name_or_abs_path.strip()
- try:
- base_library = import_module(library, module)
- except SyntaxError:
- base_library = None
- nfo(f"Syntax error attempting to import {module_name}")
- if module := getattr(base_library, module, None):
- return module
- else:
- nfo("failed to find module {module}")
-
-
-def extract_init_parameters(module: Callable | str, package_name: str | None = None) -> dict[str, list[str]]:
- """Pick apart a Diffusers or Transformers pipeline class and find its constituent parts (formerly root_class)\n
- :param module: Origin pipeline as a class or as a string
- :param library: name of a library to import the class from, only if a string is provided
- :return: Dictionary of sub-classes from the `module`"""
-
- import inspect
-
- if package_name and isinstance(module, str):
- module_obj: Callable = import_submodules(module, package_name)
- else:
- assert isinstance(module, Callable)
- module_obj = module
- signature = inspect.signature(module_obj.__init__)
- class_names = {}
- editable_signature = signature.parameters.copy()
- editable_signature.pop("self", None)
- editable_signature.pop("kwargs", None)
- editable_signature.pop("use_cache", None)
- for folder, param in editable_signature.items():
- class_names.setdefault(folder, True)
- return class_names
-
-
-@dataclass
-class ClassMapEntry:
- """Represents a structured entry of the name of the class and its associated attributes."""
-
- name: str
- model_name: str
- model: Callable
- config: Callable
- config_params: dict[str, list[str]] = field(init=False, default_factory=lambda: {})
- model_params: dict[str, list[str]] | None = None
-
- def __post_init__(self):
- if self.model:
- self.model_params = extract_init_parameters(self.model)
- if self.config:
- self.config_params = extract_init_parameters(self.config)
-
-
-@dataclass
-class DocStringEntry:
- """Represents a structured entry of package name, file name, and docstring."""
-
- package_name: str
- file_name: str
- doc_string: str
-
-
-class DocParseData:
- pipe_class: str
- pipe_repo: str
- staged_class: str | None = None
- staged_repo: str | None = None
-
- def __init__(self, pipe_class: str, pipe_repo: str, staged_class: str | None = None, staged_repo: str | None = None):
- self.pipe_class = pipe_class
- self.pipe_repo = pipe_repo
- self.staged_class = staged_class
- self.staged_repo = staged_repo
-
-
-class DocStringParserConstants:
- """Constants used by DocStringParser for parsing docstrings."""
-
- pipe_prefixes: List[str] = [
- ">>> motion_adapter = ",
- ">>> adapter = ", # if this moves, also change motion_adapter check
- ">>> controlnet = ",
- ">>> super_res_1_pipe = ",
- ">>> pipe_prior = ",
- ">>> pipe_prior_redux = ",
- ">>> pipe = ",
- ">>> pipeline = ",
- ">>> blip_diffusion_pipe = ",
- ">>> prior_pipe = ",
- ">>> gen_pipe = ",
- "pipe = ",
- ]
- repo_variables: List[str] = [
- "controlnet_model",
- "controlnet_id",
- "base_model",
- "model_id_or_path",
- "model_ckpt",
- "model_id",
- "repo_base",
- "repo",
- "motion_adapter_id",
- ]
- call_types: List[str] = [".from_pretrained(", ".from_single_file("]
- staged_call_types: List[str] = [
- ".from_pretrain(",
- ]
-
-
-package_map = {
- "diffusers": ("_import_structure", "diffusers.pipelines"),
- "transformers": ("MODEL_MAPPING_NAMES", "transformers.models.auto.modeling_auto"),
-}
-root_path = os.path.join(os.getcwd(), "mir")
-versions = read_json_file(os.path.join(root_path, "spec", "versions.json"))
-template = read_json_file(os.path.join(root_path, "spec", "template.json"))
-MIR_PATH_NAMED = os.path.join(root_path, "mir.json")
-
-BREAKING_SUFFIX = r".*(?:-)(prior)$|.*(?:-)(diffusers)$|.*[_-](\d{3,4}px|-T2V$|-I2V$)"
-PARAMETERS_SUFFIX = r"(\d{1,4}[KkMmBb]|[._-]\d+[\._-]\d+[Bb][._-]).*?$"
-SEARCH_SUFFIX = r"\d+[._-]?\d+[BbMmKk](it)?|[._-]\d+[BbMmKk](it)?"
diff --git a/mir/config/conversion.py b/mir/config/conversion.py
deleted file mode 100644
index beaee14..0000000
--- a/mir/config/conversion.py
+++ /dev/null
@@ -1,115 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-
-
-from typing import Callable, Optional, Union, Type, List, Generator, Dict
-
-from mir.config.console import dbuq, nfo
-from mir.config.constants import DocStringEntry, ClassMapEntry, import_submodules
-
-
-def retrieve_diffusers_docstrings(
- package_name: str,
- file_names: list[str],
-) -> Generator[DocStringEntry]:
- """Yield (pkg, file, EXAMPLE_DOC_STRING) from a folder or a single file.\n
- :param pkg_name: Package under ``diffusers.pipelines``.\n
- :param file_names: A list of related file names.\n
- :param use_folder: True → treat ``source`` as a folder with ``_import_structure``.\n
- :return: DocString Entry class.\n
- """
- import os
- from importlib import import_module
-
- module_location: str | None = import_module("diffusers.pipelines").__file__
- module_path = os.path.dirname(module_location)
-
- for file_name in file_names:
- assert isinstance(file_name, str)
- if file_name == "pipeline_stable_diffusion_xl_inpaint":
- continue
-
- pkg_path = f"diffusers.pipelines.{package_name}.{file_name}"
- dbuq(pkg_path)
-
- if os.path.exists(os.path.join(module_path, package_name, f"{file_name}.py")):
- pipe_file = import_submodules(file_name, pkg_path) or import_module(pkg_path) or nfo(f"Failed to import {pkg_path}")
- if doc_string := getattr(pipe_file, "EXAMPLE_DOC_STRING", None):
- yield DocStringEntry(package_name=package_name, file_name=file_name, doc_string=doc_string)
- else:
- nfo(f"Doc string attribute missing for {package_name}/{file_name}")
- else:
- nfo(f"Path not found for {package_name}/{file_name}")
-
- return
-
-
-def get_repo_from_class_map(class_map: ClassMapEntry) -> str | None:
- """The name of the repository that is associated with a transformers configuration class
- :param class_map: Transformers class information extracted from dependency
- :returns: A string matching the repo path for the class"""
-
- import re
-
- doc_attempt = []
- if hasattr(class_map.config, "forward"):
- doc_attempt = [getattr(class_map.config, "forward")]
- doc_attempt.append(class_map.config)
- for pattern in doc_attempt:
- doc_string = pattern.__doc__
- matches = re.findall(r"\[([^\]]+)\]", doc_string)
- if matches:
- try:
- repo_path = next(iter(snip.strip('"').strip() for snip in matches if "/" in snip))
- except StopIteration as error_log:
- nfo(f"ERROR >>{matches} : LOG >> {error_log}")
- continue
- return repo_path
- return None
-
-
-def class_to_mir_tag(mir_db: Dict[str, str], code_name: str) -> Optional[str]:
- """Converts a class identifier to its corresponding MIR tag.\n
- :param mir_db: A dictionary mapping series-compatibility pairs to their respective data.
- :param code_name: The Transformers class identifier to convert.
- :return: An optional list containing the series and compatibility if found, otherwise None."""
- from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES
- from mir.config.constants import template
-
- template_data = template["arch"]["transformer"]
-
- for series, compatibility_data in mir_db.database.items():
- if any([template for template in template_data if template in series.split(".")[1]]):
- for compatibility, field_data in compatibility_data.items():
- if code_name == series.split(".")[2]:
- return [series, compatibility]
-
- class_name = MODEL_MAPPING_NAMES.get(code_name, False)
- if not class_name: # second pass without separators
- recoded_mapping = {code.replace("-", "").replace("_", ""): model for code, model in MODEL_MAPPING_NAMES.items()}
- class_name = recoded_mapping.get(code_name, False)
- if not class_name:
- return None
- pkg_data = field_data.get("pkg")
- if pkg_data:
- for _, pkg_type_data in pkg_data.items():
- maybe_class = pkg_type_data.get("transformers")
- if maybe_class == class_name:
- return [series, compatibility]
- return None
-
-
-def slice_number(text: str) -> Union[int, float, str]:
- """Separate a numeral value appended to a string\n
- :return: Converted value as int or float, or unmodified string
- """
- for index, char in enumerate(text): # Traverse forwards
- if char.isdigit():
- numbers = text[index:]
- if "." in numbers:
- return float(numbers)
- try:
- return int(numbers)
- except ValueError:
- return numbers
- return text
diff --git a/mir/generate/.notes.txt b/mir/generate/.notes.txt
new file mode 100644
index 0000000..e133139
--- /dev/null
+++ b/mir/generate/.notes.txt
@@ -0,0 +1,66 @@
+# type: ignore
+# ruff: noqa
+
+tag_model_from_repo
+
+mir_tag_from_config
+import_submodules
+
+
+constants
+tag_scheduler
+read_json_file
+mir_prefix_from_forward_pass
+
+Set Data Format
+Find classes
+get_repo_from_class_map
+check repo/model migration
+
+transformers_index
+ classmapentry
+ + find_transformers_classes
+ +check_migrations
+ get_repo_from_class_map
+ mir_tag_from_config
+ check_migrations
+ import_submodules tokenizers
+
+
+diffusers_index
+ docstringentry
+ find_diffusers_classes
+ check_migrations
+ retrieve_diffusers_docstrings
+ import_submodules module for model class
+ import_submodules model class
+ extract_init_parameters
+ create_pipe_entry
+ extract_init_parameters
+ mir_prefix_from_forward_pass
+ tag_model_from_repo
+ check_migrations
+
+add_mir_dtype
+ + tag_dtype
+ MIRDatabase
+
+add_mir_schedulers
+ tag_scheduler
+
+
+task_analysis
+ import_submodules
+ mapped_cls
+ import_submodules
+ tag_scheduler
+ resolve_code_names
+
+
+# def create_model_tag(model_header,metadata_dict):
+# parse_file = parse_model_header(model_header)
+# reconstructed_file_path = os.path.join(disk_path,each_file)
+# attribute_dict = metadata_dict | {"disk_path": reconstructed_file_path}
+# file_metadata = parse_file | attribute_dict
+# index_tag = create_model_tag(file_metadata)
+#
\ No newline at end of file
diff --git a/mir/config/__init__.py b/mir/generate/__init__.py
similarity index 100%
rename from mir/config/__init__.py
rename to mir/generate/__init__.py
diff --git a/mir/generate/__main__.py b/mir/generate/__main__.py
new file mode 100644
index 0000000..8a1e85b
--- /dev/null
+++ b/mir/generate/__main__.py
@@ -0,0 +1,276 @@
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
+
+import os
+from mir.maid import MIRDatabase
+from mir.generate.tasks import TaskAnalyzer
+from typing import Callable
+
+
+def run_task() -> None:
+ main()
+
+
+def pipe(mir_db: MIRDatabase) -> MIRDatabase:
+ import argparse
+ import asyncio
+ from sys import modules as sys_modules
+
+ if "pytest" not in sys_modules:
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawTextHelpFormatter,
+ description="Infer pipe components from Diffusers library and attach them to an existing MIR database.\nOffline function.",
+ usage="mir-pipe",
+ epilog="Can be run automatically with `python -m nnll.mir.maid` Should only be used after `mir-maid`.\n\nOutput:\n INFO ('Wrote #### lines to MIR database file.',)",
+ )
+ parser.parse_args()
+
+ from mir.generate.automata import assimilate
+
+ if not mir_db:
+ mir_db = MIRDatabase()
+
+ tasker = TaskAnalyzer()
+ pipe_tuple = asyncio.run(tasker.detect_pipes(mir_db))
+ assimilate(mir_db, [pipe for pipe in pipe_tuple])
+ mir_db.write_to_disk()
+ return mir_db
+
+
+# if __name__ == "__main__":
+# pipe()
+
+
+def main():
+ # import ordered to prevent file lock
+ import mir.maid
+ from mir.maid import main as mir_main
+
+ mir_main()
+ from mir.generate.tasks import main
+
+ main()
+ from mir.generate.tasks import pipe
+
+ pipe()
+
+ import os
+ import shutil
+
+ try:
+ os.remove("mir.json")
+ except FileNotFoundError:
+ pass
+ shutil.copy2(os.path.join(os.path.dirname(mir.maid.__file__), "mir.json"), os.path.join(os.getcwd(), "mir.json"))
+
+
+if __name__ == "__main__":
+ main()
+
+
+def main(mir_db: MIRDatabase | None = None) -> MIRDatabase:
+ """Parse arguments to feed to dict header reader"""
+ import argparse
+ import asyncio
+ from mir.generate.automata import assimilate
+ from sys import modules as sys_modules
+
+ if "pytest" not in sys_modules:
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawTextHelpFormatter,
+ description="Scrape the task classes from currently installed libraries and attach them to an existing MIR database.\nOffline function.",
+ usage="mir-tasks",
+ epilog="Can be run automatically with `python -m nnll.mir.maid` Should only be used after `mir-maid`.\n\nOutput:\n INFO ('Wrote #### lines to MIR database file.',)",
+ )
+ parser.parse_args()
+
+ if not mir_db:
+ mir_db = MIRDatabase()
+
+ tasker = TaskAnalyzer()
+ task_tuple = asyncio.run(tasker.detect_tasks(mir_db))
+
+ assimilate(mir_db, [task for task in task_tuple])
+
+ mir_db.write_to_disk()
+ return mir_db
+
+
+def main(mir_db: Callable | None = None, remake: bool = True) -> None:
+ """Build the database"""
+ from sys import modules as sys_modules
+
+ if __name__ != "__main__" and "pytest" not in sys_modules: #
+ import argparse
+
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawTextHelpFormatter,
+ description="Build a custom MIR model database from the currently installed system environment.\nOffline function.",
+ usage="mir-maid",
+ epilog="""Does NOT include results of `mir-task` and `mir-pipe`. These commands should be run separately. Output:
+ 2025-08-03 14:22:47 INFO ('Wrote 0 lines to MIR database file.',)
+ 2025-08-03 14:22:47 INFO ('Wrote #### lines to MIR database file.',)""",
+ )
+ parser.add_argument(
+ "-r",
+ "--remake_off",
+ action="store_true",
+ default=False,
+ help="Prevent erasing and remaking the MIR database file (default: False, always start from a completely empty MIR file)",
+ )
+
+ args = parser.parse_args()
+ remake = not args.remake_off
+
+ from mir.generate.automata import (
+ add_mir_audio,
+ add_mir_diffusion,
+ add_mir_dtype,
+ add_mir_llm,
+ add_mir_lora,
+ add_mir_schedulers,
+ add_mir_vae,
+ hf_pkg_to_mir,
+ mir_update,
+ )
+ from mir.json_io import write_json_file
+
+ if remake:
+ os.remove(MIR_PATH_NAMED)
+ folder_path_named = os.path.dirname(MIR_PATH_NAMED)
+ mode = "x"
+ else:
+ mode = "w"
+ write_json_file(folder_path_named, file_name="mir.json", data={"expected": "data"}, mode=mode)
+ mir_db = MIRDatabase()
+ mir_db.database.pop("expected", {})
+ hf_pkg_to_mir(mir_db)
+ add_mir_dtype(mir_db)
+ add_mir_schedulers(mir_db)
+ add_mir_lora(mir_db)
+ add_mir_audio(mir_db)
+ add_mir_diffusion(mir_db)
+ add_mir_llm(mir_db)
+ add_mir_vae(mir_db)
+ mir_db.write_to_disk()
+ mir_db = MIRDatabase()
+ mir_db = MIRDatabase()
+ mir_update(mir_db)
+ mir_db.write_to_disk()
+
+
+if __name__ == "__main__":
+ remake: bool = True
+ tasks = True
+ pipes = True
+
+ from sys import modules as sys_modules
+
+ if "pytest" not in sys_modules: #
+ import argparse
+
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawTextHelpFormatter,
+ description="Build a custom MIR model database from the currently installed system environment.\nOffline function.",
+ usage="python -m nnll.mir.maid",
+ epilog="""Includes `mir-task` and `mir-pipe` by default. Output:
+ 2025-08-15 19:41:18 INFO ('Wrote 0 lines to MIR database file.',)
+ 2025-08-15 19:38:48 INFO ('Wrote ### lines to MIR database file.',)
+ INFO ('Wrote ### lines to MIR database file.',)
+ INFO ('Wrote ### lines to MIR database file.',)""",
+ )
+ parser.add_argument(
+ "-r",
+ "--remake_off",
+ action="store_true",
+ default=False,
+ help="Don't erase and remake the MIR database (default: False)",
+ )
+ parser.add_argument(
+ "-t",
+ "--tasks_off",
+ action="store_true",
+ default=False,
+ help="Don't append task information to the MIR database (default: False)",
+ )
+ parser.add_argument(
+ "-p",
+ "--pipes_off",
+ action="store_true",
+ default=False,
+ help="Don't append pipeline information to the MIR database (default: False)",
+ )
+
+ args = parser.parse_args()
+ remake = not args.remake_off
+ tasks = not args.tasks_off
+ pipes = not args.pipes_off
+
+ main(remake=remake)
+
+ from mir.generate.tasks import pipe, run_task
+
+ mir_db = run_task()
+ pipe(mir_db)
+
+
+def main(mir_db: MIRDatabase = None):
+ """Parse arguments to feed to dict header reader"""
+ import argparse
+ import asyncio
+ from mir.automata import assimilate
+ from sys import modules as sys_modules
+
+ if "pytest" not in sys_modules:
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawTextHelpFormatter,
+ description="Scrape the task classes from currently installed libraries and attach them to an existing MIR database.\nOffline function.",
+ usage="mir-tasks",
+ epilog="Can be run automatically with `python -m nnll.mir.maid` Should only be used after `mir-maid`.\n\nOutput:\n INFO ('Wrote #### lines to MIR database file.',)",
+ )
+ parser.parse_args()
+
+ if not mir_db:
+ mir_db = MIRDatabase()
+
+ auto_pkg = TaskAnalyzer()
+ task_tuple = asyncio.run(auto_pkg.detect_tasks(mir_db))
+
+ assimilate(mir_db, [task for task in task_tuple])
+
+ mir_db.write_to_disk()
+ return mir_db
+
+
+def run_task():
+ main()
+
+
+def pipe(mir_db: MIRDatabase = None):
+ import argparse
+ import asyncio
+ from sys import modules as sys_modules
+
+ if "pytest" not in sys_modules:
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawTextHelpFormatter,
+ description="Infer pipe components from Diffusers library and attach them to an existing MIR database.\nOffline function.",
+ usage="mir-pipe",
+ epilog="Can be run automatically with `python -m nnll.mir.maid` Should only be used after `mir-maid`.\n\nOutput:\n INFO ('Wrote #### lines to MIR database file.',)",
+ )
+ parser.parse_args()
+
+ from mir.automata import assimilate
+
+ if not mir_db:
+ mir_db = MIRDatabase()
+
+ auto_pkg = TaskAnalyzer()
+ pipe_tuple = asyncio.run(auto_pkg.detect_pipes(mir_db))
+ assimilate(mir_db, [pipe for pipe in pipe_tuple])
+ mir_db.write_to_disk()
+ return mir_db
+
+
+if __name__ == "__main__":
+ pipe()
diff --git a/mir/generate/_extras.py b/mir/generate/_extras.py
new file mode 100644
index 0000000..c1b0366
--- /dev/null
+++ b/mir/generate/_extras.py
@@ -0,0 +1,191 @@
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
+
+from typing import Callable, Dict, List, Optional, Union
+
+from mir import NFO
+from mir.generate.from_module import import_object_named, show_path_for
+from mir.generate.tasks import TaskAnalyzer
+
+
+def _class_parent(code_name: str, pkg_name: str) -> Optional[List[str]]:
+ """Retrieve the folder path within a class. Only returns if it is a valid path in the system\n
+ ### NOTE: in most cases `__module__` makes this redundant
+ :param code_name: The internal name for the model in the third-party API.
+ :param pkg_name: The API Package
+ :return: A list corresponding to the path of the model, or None if not found
+ :raises KeyError: for invalid pkg_name
+ """
+ import os
+ from importlib import import_module
+
+ pkg_paths = {
+ "diffusers": "pipelines",
+ "transformers": "models",
+ }
+ folder_name = code_name.replace("-", "_")
+ pkg_name = pkg_name.lower()
+ folder_path = pkg_paths[pkg_name]
+ package_obj = import_module(pkg_name)
+ folder_path_named = [folder_path, folder_name]
+ pkg_folder = os.path.dirname(getattr(package_obj, "__file__"))
+ # dbuq(os.path.exists(os.path.join(pkg_folder, *folder_path_named)))
+ if os.path.exists(os.path.join(pkg_folder, *folder_path_named)) is True:
+ import_path = [pkg_name]
+ import_path.extend(folder_path_named)
+ return import_path
+
+
+def _extract_inherited_classes(model_class: Union[Callable, str], pkg_name: Optional[str] = None) -> Optional[Dict[str, List[str]]]:
+ """Strips tags from module's base classes and extracts inherited class members.\n
+ If `module` is a string, it requires the `library` argument to convert it into a callable.\n
+ :param module: A module or string representing a module.
+ :param library: Library name required if `module` is a string. Defaults to None.
+ :returns: Mapping indices to class path segments, or None if invalid input."""
+
+ if isinstance(model_class, str):
+ if not pkg_name:
+ NFO("Provide a library type argument to process strings")
+ return None
+ model_class = import_object_named(model_class, pkg_name)
+ signature = model_class.__bases__
+ class_names = []
+ for index, class_annotation in enumerate(signature):
+ tag_stripped = str(class_annotation)[8:-2]
+ module_segments = tag_stripped.split(".")
+ class_names.append(module_segments)
+ return class_names
+
+
+def _trace_classes(pipe_class: str, pkg_name: str) -> Dict[str, List[str]]:
+ """Retrieve all compatible pipe forms\n
+ NOTE: Mainly for Diffusers
+ :param pipe_class: Origin pipe
+ :param pkg_name: Dependency package
+ :return: A dictionary of pipelines"""
+
+ related_pipes = []
+ code_name = show_path_for(pipe_class, pkg_name)
+ if pkg_name == "diffusers":
+ related_pipe_class_name = pipe_class
+ else:
+ related_pipe_class_name = None
+ related_pipes: list[str] = TaskAnalyzer.show_diffusers_tasks(code_name=code_name, class_name=related_pipe_class_name)
+ # for i in range(len(auto_tasks)):
+ # auto_tasks.setdefault(i, revealed_tasks[i])
+ parent_folder = class_parent(code_name, pkg_name)
+ if pkg_name == "diffusers":
+ pkg_folder = import_object_named(parent_folder[0], ".".join(parent_folder))
+ else:
+ pkg_folder = import_object_named("__init__", ".".join(parent_folder[:-1]))
+ if hasattr(pkg_folder, "_import_structure"):
+ related_pipes.extend(next(iter(x)) for x in pkg_folder._import_structure.values())
+ related_pipes = set(related_pipes)
+ related_pipes.update(tuple(x) for x in _extract_inherited_classes(model_class=pipe_class, pkg_name=pkg_name))
+ return related_pipes
+
+
+def _show_shared_hyperparameters(parameter_filter: Optional[str] = None) -> List[str]:
+ """Show all config classes in the Transformer package with the specified init annotation\n
+ :param from_match: Narrow the classes to only those with an exact key inside
+ :return: A list of all Classes"""
+ from mir.config.constants import extract_init_parameters
+ from mir.inspect.metadata import find_transformers_classes
+
+ transformers_data = find_transformers_classes()
+ config_data = []
+ for entry in transformers_data:
+ if parameter_filter:
+ segments = extract_init_parameters(module=entry.config, package_name="transformers")
+ if parameter_filter in list(segments):
+ config_data.append(entry.config)
+ else:
+ config_data.append(entry.config)
+ return config_data
+
+
+def _get_class_parent_folder(class_name: str, pkg_name: str) -> List[str]:
+ """Retrieve the folder path within a class. Only returns if it is a valid path in the system (formerly seek_class_path)\n
+ ### NOTE: in most cases `__module__` makes this redundant
+ :param class_name: The internal name for the model in the third-party API.
+ :param pkg_name: The API Package
+ :return: A list corresponding to the path of the model, or None if not found
+ :raises KeyError: for invalid pkg_name
+ """
+ from mir.config.console import dbuq
+ from mir.config.constants import extract_init_parameters
+ from mir.inspect.classes import resolve_code_names
+
+ pkg_name = pkg_name.lower()
+ if pkg_name == "diffusers":
+ parent_folder: List[str] = resolve_code_names(class_name=class_name, pkg_name=pkg_name, path_format=True)
+ if not parent_folder or not parent_folder[-1].strip():
+ dbuq("Data not found for", " class_name = {class_name},pkg_name = {pkg_name},{parent_folder} = parent_folder")
+ return None
+ elif pkg_name == "transformers":
+ print(class_name)
+ module_path = extract_init_parameters(class_name, "transformers")
+ print(module_path)
+ config = str(module_path.get("config"))
+ print(config)
+ config = config.split(": ")[-1].split(".")
+ parent_folder = config[:3]
+ return parent_folder
+
+
+def _class_to_mir_tag(mir_db: Dict[str, str], code_name: str) -> Optional[str]:
+ """Converts a class identifier to its corresponding MIR tag.\n
+ :param mir_db: A dictionary mapping series-compatibility pairs to their respective data.
+ :param code_name: The Transformers class identifier to convert.
+ :return: An optional list containing the series and compatibility if found, otherwise None."""
+
+ from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES
+
+ from mir.config.constants import TEMPLATE
+
+ template_data = TEMPLATE["arch"]["transformer"]
+
+ for series, compatibility_data in mir_db.database.items():
+ if any([template for template in template_data if template in series.split(".")[1]]):
+ for compatibility, field_data in compatibility_data.items():
+ if code_name == series.split(".")[2]:
+ return [series, compatibility]
+
+ class_name = MODEL_MAPPING_NAMES.get(code_name, False)
+ if not class_name: # second pass without separators
+ recoded_mapping = {code.replace("-", "").replace("_", ""): model for code, model in MODEL_MAPPING_NAMES.items()}
+ class_name = recoded_mapping.get(code_name, False)
+ if not class_name:
+ return None
+ pkg_data = field_data.get("pkg")
+ if pkg_data:
+ for _, pkg_type_data in pkg_data.items():
+ maybe_class = pkg_type_data.get("transformers")
+ if maybe_class == class_name:
+ return [series, compatibility]
+ return None
+
+
+def tag_transformers_model(repo_path: str, class_name: str, addendum: dict | None = None) -> tuple[str, str, str | dict[str, dict]]:
+ """Convert model repo paths to MIR tags, classifying by feature\n
+ :param name: Repo path
+ :param class_name: The HF transformers class for the model
+ :return: A segmented MIR tag useful for appending index entries"""
+
+ from mir.config.constants import extract_init_parameters
+
+ annotations = extract_init_parameters(class_name.replace("Model", "Config"), "transformers")
+ if not annotations:
+ class_name = class_name.replace("Config", "Model")
+ annotations = extract_init_parameters(class_name, "transformers")
+ if not annotations:
+ raise TypeError("No mode type returned")
+ if "Bert" in class_name:
+ print(annotations)
+ mir_prefix = mir_prefix_from_forward_pass(True, **annotations)
+ base_series, base_comp = tag_model_from_repo(repo_path)
+ if not addendum:
+ return mir_prefix, base_series, base_comp
+ else:
+ mir_prefix = f"info.{mir_prefix}"
+ return mir_prefix, base_series, {base_comp: addendum}
diff --git a/mir/automata.py b/mir/generate/automata.py
similarity index 54%
rename from mir/automata.py
rename to mir/generate/automata.py
index 227f4ab..da548b5 100644
--- a/mir/automata.py
+++ b/mir/generate/automata.py
@@ -9,13 +9,10 @@
from importlib import import_module
import re
-from typing import Dict, List, Tuple
+from typing import Dict, List, Tuple, Any
-from diffusers import _import_structure
import torch
-from mir.config.console import dbuq, nfo
-from mir.config.conversion import slice_number
from mir.indexers import diffusers_index, transformers_index
from mir.maid import MIRDatabase
from mir.spec import mir_entry
@@ -30,88 +27,8 @@
vega_series, vega_comp = tag_model_from_repo("segmind/Segmind-Vega")
sd3_series, sd3_comp = tag_model_from_repo("stable-diffusion-3.5-medium") #
-# def gen_attention_processors(mir_db: MIRDatabase): # upstream not quite ready for this yet
-# from diffusers.models.attention_processor import AttentionProcessor
-# mir_data
-# for series, comp_name in mir_data.items():
-# id_segment = series.split(".")
-# for compatibility in comp_name:
-# dbug(id_segment)
-# try:
-# mir_db.add(
-# mir_entry(
-# domain=id_segment[0],
-# arch=id_segment[1],
-# series=id_segment[2],
-# comp=compatibility,
-# **mir_data[series][compatibility],
-# ),
-# )
-# except IndexError as error_log:
-# nfo(f"Failed to create series: {series} compatibility: {comp_name} ")
-# dbug(error_log)
-
-
-# def gen_guiders(mir_db: MIRDatabase): # upstream not quite ready for this yet
-# from nnll.metadata.helpers import snake_caseify
-# from diffusers.guider import GuiderType
-
-# guider_type = GuiderType
-# for comp_name in guider_type.items():
-# class_obj = comp_name.__name__
-# mir_data = {"pkg": {0: {"diffusers": class_obj}}}
-# try:
-# mir_db.add(
-# mir_entry(
-# domain="ops",
-# arch="noise_prediction",
-# series="guider",
-# comp=snake_caseify(class_obj),
-# **mir_data,
-# ),
-# )
-# except IndexError as error_log:
-# nfo(f"Failed to create compatibility: {class_obj}")
-# dbug(error_log)
-
-
-# (
-# "info.unet",
-# "stable-cascade",
-# {
-# "combined": {
-# "pkg": {
-# 0: { # decoder=decoder_unet
-# "precision": "ops.precision.bfloat.B16",
-# "generation": {
-# "negative_prompt": "",
-# "num_inference_steps": 20,
-# "guidance_scale": 4.0,
-# "num_images_per_prompt": 1,
-# "width": 1024,
-# "height": 1024,
-# },
-# },
-# "pkg_alt": {
-# 0: {
-# "diffusers": {
-# "StableCascadeCombinedPipeline": {
-# "negative_prompt": "",
-# "num_inference_steps": 10,
-# "prior_num_inference_steps": 20,
-# "prior_guidance_scale": 3.0,
-# }
-# },
-# }
-# },
-# }
-# }
-# },
-# ),
-
-
-def assimilate(mir_db: MIRDatabase, data_tuple: List[Tuple[Dict[str, any]]]) -> None:
+def assimilate(mir_db: MIRDatabase, data_tuple: List[Tuple[Dict[str, Any]]]) -> None:
"""Merge new data into a pre-generated MIR database, updating while preserving existing data structures.\n
:param mir_db: The MIRDatabase instance
:param data_tuple: A list of tuples, each containing:\n
@@ -153,105 +70,6 @@ def update_nested_dict(target, source):
update_nested_dict(mir_data[comp][field][definition], sub_def_data)
-def hf_pkg_to_mir(mir_db: MIRDatabase):
- """Generate MIR HF Hub model database"""
- mir_data = diffusers_index() | transformers_index()
- for series, comp_name in mir_data.items():
- id_segment = series.split(".")
- for compatibility in comp_name:
- # dbug(id_segment)
- try:
- mir_db.add(
- mir_entry(
- domain=id_segment[0],
- arch=id_segment[1],
- series=id_segment[2],
- comp=compatibility,
- **mir_data[series][compatibility],
- ),
- )
- except IndexError: # as error_log:
- nfo(f"Failed to create series: {series} compatibility: {comp_name} ")
- # dbug(error_log)
-
-
-def add_mir_dtype(mir_db: MIRDatabase):
- """Create mir info database"""
-
- available_dtypes: List[str] = [dtype for dtype in torch.__dict__.values() if isinstance(dtype, torch.dtype)]
- series_name = "_"
- for precision in available_dtypes:
- dep_name, class_name = str(precision).split(".")
- if "_" in class_name:
- comp_name = class_name[0].upper() + "8_" + class_name.split("_")[1].upper()
- if comp_name.endswith("FN"):
- comp_name = comp_name[:-2]
- else:
- comp_name = class_name[0].upper() + str(slice_number(class_name))
- variant_name = class_name.replace("bfloat", "bf").replace("float", "fp")
- dbuq(variant_name)
- patterns = [r"complex", r"bits", r"quint", r"uint", r"int", r"bfloat", r"float", r"bool"]
- for precision_name in patterns:
- compiled = re.compile(precision_name)
- dtype = re.search(compiled, class_name)
- if dtype:
- series_name = dtype.group()
- break
-
- mir_db.add(
- mir_entry(
- domain="ops",
- arch="precision",
- series=series_name,
- comp=comp_name,
- pkg={0: {dep_name.lower(): {class_name.lower(): {"variant": variant_name}}}},
- )
- )
-
-
-def add_mir_schedulers(mir_db: MIRDatabase):
- """Create mir info database"""
-
- for class_name in _import_structure["schedulers"]:
- if class_name != "SchedulerMixin":
- series_name, comp_name = tag_scheduler(class_name)
- class_obj = import_module("diffusers.schedulers")
- class_path = getattr(class_obj, class_name).__module__
- mir_db.add(
- mir_entry(
- domain="ops",
- arch="scheduler",
- series=series_name,
- comp=comp_name.lower(),
- pkg={
- 0: {
- "diffusers": class_name,
- "module_path": class_path,
- },
- },
- )
- )
-
- class_name = "KarrasDiffusionSchedulers"
- series_name, comp_name = tag_scheduler(class_name)
- class_obj = import_module("diffusers.schedulers.scheduling_utils")
- class_path = getattr(class_obj, class_name).__module__
- mir_db.add(
- mir_entry(
- domain="ops",
- arch="scheduler",
- series=series_name,
- comp=comp_name,
- pkg={
- 0: {
- "diffusers": class_name,
- "module_path": class_path,
- },
- },
- ),
- )
-
-
# def auto_gan etc etc
# ai-forever/Real-ESRGAN
@@ -259,1192 +77,6 @@ def add_mir_schedulers(mir_db: MIRDatabase):
def mir_update(mir_db: MIRDatabase, task_list: list = None, pipe_list: list = None):
"""Create mir unet info database"""
- diffusers_addons = [
- (
- "stabilityai/stable-diffusion-xl-base-1.0",
- "StableDiffusionXLPipeline",
- {
- "pkg": {
- 0: {
- "precision": "ops.precision.float.F16",
- "generation": {
- "denoising_end": 0.8,
- "num_inference_steps": 40,
- "output_type": "latent",
- "safety_checker": False,
- "width": 1024,
- "height": 1024,
- },
- },
- 1: {"diffusers": "DiffusionPipeline"},
- },
- "file_256": [
- "357650fbfb3c7b4d94c1f5fd7664da819ad1ff5a839430484b4ec422d03f710a", # diffusers
- "83e012a805b84c7ca28e5646747c90a243c65c8ba4f070e2d7ddc9d74661e139", # fp16 diffusers
- "31e35c80fc4829d14f90153f4c74cd59c90b779f6afe05a74cd6120b893f7e5b", # modelspec sai
- "6f001c090fb13c0d0f8b0a5916da814712a94400b99471fabe77c1c4a51ecaaf", # onnx
- ],
- "layer_256": [
- "62a5ab1b5fdfa4fedb32323841298c6effe1af25be94a8583350b0a7641503ef", # any modelspec sai
- "34dff8d98898baa0f10e71943e56b588cc114253b0d2f1051f3ce7a8a45fee0b", # diffusers
- "56b1ccd89b0d6ab658048aa34d659788b6ed663f13ef566f4b11bccef590b9da", # diffusers fp16
- ],
- "layer_b3": [
- "8be44fa13c1efa60f8bcadaa57f1d718473f9660f03c4f0e65dc037960d8cba1", # any modelspec sai
- "c9ab95ed1851418b65ef99651c1eb6bbdd2e3b0715e0e435d6d1e56ce310fac3", # diffusers
- "adfa260098d87616d748e3cf9c10bb2c90ff8890a84abbb2853d4aa69664070b", # diffusers fp16
- ],
- "identifiers": ["logit_scale", "conditioner.embedders.0.transformer.text_model.encoder.layers.0.self_attn.k_proj.weight", "add_embedding.linear_2.bias"],
- },
- ),
- (
- "stabilityai/stable-diffusion-xl-refiner-1.0",
- "StableDiffusionXLImg2ImgPipeline",
- {
- "pkg": {
- 1: {
- "diffusers": "DiffusionPipeline",
- "generation": {"num_inference_steps": 40, "denoising_end": 0.8},
- }
- },
- "identifiers": ["conditioner.embedders.0.model.transformer.resblocks.9.mlp.c_proj.bias"],
- "file_256": [
- "54f9cd2f2daf3aeec0b2708fa3dbc0e84e4f8ddd1ddead42e5bc60c6572c989f", # diffusers
- "7440042bbdc8a24813002c09b6b69b64dc90fded4472613437b7f55f9b7d9c5f", # modelspec sai
- "3ea0376dcf065eaefd27806394a90e310001b1a71d4f1cf1f655e86c0e566ffe", # fp16 diffusers
- ],
- "layer_b3": [
- "6281355dbb37e5769c9460ae0ac75506d89932e2f97b09d9ade32ecf191e75ba",
- "afb0639aae2eb65577c12d4a30cf7c9b3620ae63ba64a8fa632b58608c8a7a2e",
- "669046014b69d98ab0f6fbb59547644436e0275f8b638f467ce2a873c3313683",
- ],
- "layer_256": [
- "bb9eadbfabb52c0d8645783525a3fa70b59e9d7d09d5290d742a303262e793a2",
- "c5adb56fe51343af2c3d493eb9f41515c204bd91eb9f40b983d45f70a1fa3b6d",
- "1f838e39ed6e916258aee6990b72c09b34aa8eb3b5342234a497b8852b3df1c6",
- ],
- },
- ),
- (
- "lodestones/Chroma",
- "ChromaPipeline",
- {
- "pkg": {
- 1: {
- "generation": {"neg_text": "", "num_steps": "28", "latent_size": [64, 64]},
- }
- },
- "file_256": [
- "53adcb3b6b6005758d40e2d8058b044ed4892bc8616efb7a62cc2dd384be07de", # v1
- "2c41e8a9831f3be1eaff2c2ed590abb62e4534e814f7ec58a5fd74ff71dc2036", # v46,
- "0a7b2d9699dbd22b3744ee2692900cabcfb731a43dac13729c33807f2bb7c9f6", # v37 detail
- "6ddc9e2bbe3376ab5ee9f10b2d947f127b6bf6f879f06f316a2208bb0da357b8", # mlx chroma / v36 detail
- ],
- "layer_b3": [
- "15e227ced8a89c41abaa9cc44f84dfffdf5ead0c626035e5a2dde2bbb0935479",
- ],
- "layer_256": ["a4daa6ff6f45ca70c738adb8c19bc3b6f228df931e6bf2a3394463e4dd7ec882"],
- },
- ),
- (
- "fal/AuraFlow",
- "AuraFlowPipeline",
- {
- "identifiers": [[8192, 3072], "mlpX.c_fc2.weight", "joint_transformer_blocks.2.ff_context.linear_2.weight"],
- "file_256": [
- "ce3e475246258b94ee9dcb8b83292cb34edfffc2bbde46c74604d9c6cd7c585c",
- "526be97cf581c89ad87c6b19c1f7c2378851137698f7ec436596d061a382d37b", # sai
- "6a40b011f287452dbca80face78e667055904c5ad97eb2097ade3200259b2203", # diffusers fp16
- "05e5493018333d947bb5940083dbc2f071093027ff414bc5b1b1229e4836e5cb", # diffusers
- ],
- "layer_b3": [
- "cc6d383576c35a9709798d2e2b9e3eb31ba8c608040cf3712bc37871cfd14e21",
- "ddd54c44fa28fbddecf7cfae91cfa04917fd2f2fa94fc78c528cef2356a4ec3a", # sai
- "90c694e7d1e20e6da49b571e9954338d384775419790be315304103227b1051b",
- "9e85aec1bdb616f52f88c80ddc7ab1eae8c16c0b5fbfcdb61a71ac02c325003d",
- ],
- "layer_256": [
- "3c13e6a965d03a49227d8b1606ba6a343a23772d8768407cc78d4ddb9102bc80",
- "b356cc84a23bc93bda4cc0fce1d0ba1b8e3d5a521e659ffc72e9e4a2d2c7f204",
- "270df7317fe01abf06333acbbd4f15f8fc7a7c56053219f42efb598454a3af24",
- "7ab6aa4514dd09f3cf589587d51a81734193ce45dd51bda9db0bd62fe48ef7d5",
- ],
- },
- ),
- (
- "Tencent-Hunyuan/HunyuanDiT-v1.2-Diffusers",
- "HunyuanDiTPipeline",
- {
- "identifiers": ["extra_embedder", "model.blocks", "skip_norm.weight"],
- "file_256": [
- "4fb84f84079cda457d171b3c6b15d1be95b5a3e5d9825703951a99ddf92d1787", # normal
- "e01db5e129e8ca1117e9cf473fc5a2b096949f03ab90048aeabbc328de7ec800", # distilled
- "8af691cadb78047d55721259355d708e87ddbba1b7845df9377d9a5ae917b45d", # 1.2
- ],
- "layer_b3": [
- "aead6b61b17ebc77c4c186a4b82c193f11ec267b20d909726422ee9852e2e0b2",
- "885a056b94f6f9844c0660be489844d63bb74cc13316f441d10968fff3dd3120", # distilled
- "390d951cbdda6e2cffb690031b60f02921624651534c2effaaa7d68ab476c700",
- ],
- "layer_256": [
- "d4842ce2b7f927203326b25ff4d6738ec9a8b95327f06791c387e4a351ed6ed0",
- "5af943f96f5dc9fecb1e92fe2b1fa17c94dd6947690201f4a5ee1a4a2721a68e", # distilled
- "4a1f2b8234fa4336e263842e042d42e8d64d8a4d3941d9c0c78366b50303950c", # 1.2
- ],
- },
- ),
- (
- "Alpha-VLLM/Lumina-Next-SFT-diffusers",
- "LuminaPipeline",
- {
- "pkg": {
- 0: {
- "precision": " ops.precision.bfloat.B16",
- },
- },
- "identifiers": ["time_caption", "feed_forward"],
- "file_256": [
- "371153b7c7b7a64899d4016970c7cc472039f9c9b21ebe073adf0b8525cdf1bd",
- ],
- "layer_b3": [
- "fa134efd6e9672e7de2965e4895fc58879bd0a6c4fdf9165c278f2748254675f",
- "4d960ec35c53f72f065b94b836bcd923ea6074d38ad49881061f315d62e3c839",
- ],
- "layer_256": [
- "3938a85568d9df186923edf04391d79e89e6199123bc175afb520e0948d1ae05",
- "c0ca51fdea051fcd042bf4b56d32e1e8bb9525a921f2e197f370f101e90527f0",
- ],
- },
- ),
- (
- "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS",
- "PixArtSigmaPipeline",
- {
- "identifiers": ["adaln_single", "scale_shift_table"],
- "file_256": [
- "c34b520ef473329b945c2a21083cdf1337c5a468d23b3215b65576789bfd0305",
- "2fa4dee9229c02b03163f57bdb8e80c7a5ee364b7161796abe9c05e8dd13f239",
- ],
- "layer_b3": [
- "a199930ff537994872da77391955f0dd52eddd22ab9105388f0c5852f1b8021f",
- "ee6f980c32e98da6885f3e97d3f88d9158031e362cd3a49b20d1e23924b251e3",
- ],
- "layer_256": [
- "e0afd203aff5a1d192e325d0f59361373273d85d138b51768c3f10a75c154dc0",
- "987f3c2ff5d399191e5fd7dd7b1f1f285c197dc8124ad77f05cde7f2fb677a3c",
- ],
- },
- ),
- (
- "PixArt-alpha/PixArt-XL-2-1024-MS",
- "PixArtAlphaPipeline",
- {
- "identifiers": ["aspect_ratio", "y_embedding", "emb.resolution", "caption_projection"],
- "file_256": ["809a92d52a4a228f381a4b4f4b76051294b73285fb0cbb02f0ad24f9372217a8"],
- "layer_b3": ["c5be83545ce9dbc564bcc9fd8fe4157d131347ccfc8f62adc877ec205b20acee"],
- "layer_256": ["117225c0e91423746114b23d3e409708ad55c90ff52b21fa7a1c5105d2e935a5"],
- },
- ),
- (
- "stabilityai/stable-diffusion-3.5-medium",
- "StableDiffusion3Pipeline",
- {
- "pkg": {
- 0: {"precision": "ops.precision.float.F16"},
- },
- "identifiers": [
- "model.diffusion_model.joint_blocks.",
- "transformer_blocks.21.norm1_context.linear.weight",
- "transformer_blocks.31.norm1_context.linear.weight",
- "blocks.11.ff.net.2.weight",
- ],
- "file_256": [
- "ffef7a279d9134626e6ce0d494fba84fc1c7e720b3c7df2d19a09dc3796d8f93", # large
- "11fe06e22364b823dfeedc275912336b932b32a293a0b2f35ffac071990cc4de", # medium
- ],
- "layer_b3": [
- "e411016545785046810b29cc3999f40bc6392be134a1318386c6f1c48f98726a",
- "a81e07ee67bc627e8b3c5e292ec1ca239009517a2106e8249d670ced0a88f746", # med
- ],
- "layer_256": [
- "13c982a6dc82d21c9f459e837d8c6f6d4696fd6e7e7b5783bdd2250b1f4fec61",
- "6ee79050373337bf63ac20916596df778bb22022bb38af986128a7459eda1463", # med
- ],
- },
- ),
- (
- "Efficient-Large-Model/Sana-1600M-1024px-BF16-diffusers",
- "SanaPipeline",
- {
- "pkg": {
- 0: {
- "generation": {
- "height": 1024,
- "width": 1024,
- "guidance_scale": 4.5,
- "num_inference_steps": 20,
- },
- "precision": "ops.precision.bfloat.B16",
- },
- },
- "file_256": [
- "b0b50c33be8758713459aa3c760feef6315d4bea31521fb5b8c3e8fdd9841ffe",
- ],
- "layer_b3": [
- "461e3d83dfa7e075ef21e2138ef153922ecfadde3db464b03dff92819f3e86dd",
- ],
- "layer_256": [
- "b928bbcc2ce99d55d21c189e2b1c57498bc313ef5b1457036e356107d567fc4e",
- ],
- },
- ),
- (
- "stable-diffusion-v1-5/stable-diffusion-v1-5",
- "StableDiffusionPipeline",
- {
- "identifiers": ["up_blocks.3.attentions.0.transformer_blocks.0.norm3.weight"],
- "file_256": [
- "6ce0161689b3853acaa03779ec93eafe75a02f4ced659bee03f50797806fa2fa", # pruned ema only original safetensors
- "1a189f0be69d6106a48548e7626207dddd7042a418dbf372cefd05e0cdba61b6", # pruned original safetensors
- "e1441589a6f3c5a53f5f54d0975a18a7feb7cdf0b0dee276dfc3331ae376a053", # ema pruned original ckpt
- "cc6cb27103417325ff94f52b7a5d2dde45a7515b25c255d8e396c90014281516", # pruned ema original ckpt
- "19da7aaa4b880e59d56843f1fcb4dd9b599c28a1d9d9af7c1143057c8ffae9f1", # diffusers safetensors
- "cd1b6db09a81cb1d39fbd245a89c1e3db9da9fe8eba5e8f9098ea6c4994221d3", # diffusers non ema safetensors
- "c83908253f9a64d08c25fc90874c9c8aef9a329ce1ca5fb909d73b0c83d1ea21", # diffusers fp16
- ],
- "layer_b3": [
- "909c6ff3192ab2767e789a6125865bc23163db467ab78b1c633bad46a4293fad",
- "b52807536902cabbf84f99e4fa2f8713fb4ef77e739f06367ee0d486e3222faa", # ckpt
- "d31382d71a1044b636d80d861a2b4dbca51826bed34d34b5c14608b7679ccefd", # safetensors ema pruned
- "5fd8b28013b7e5a64c7c235f0a93d93e48bc19a0e5dde7b646a87b429219643a", # safetensors pruned
- "731f552f29edcb4f86112cc94d296377f3533a9633ccf83e202d9e1785d94a00", # diffusers
- "2d2f97574a161cf01a6f6d476b141c7be06f940d94b695ffc12c4e74eca2de1c", # diffusers fp16
- ],
- "layer_256": [
- "ece771354ad470a82d56eda413ae3dd6c00d2de28ab3c56a88201d08d4424b4b",
- "65b084dada803461ab9ca9be9b892d211870a121dd6c555a111eea470b951c54", # st
- "dc937b59892604f5a86ac96936cd7ff09e25f18ae6b758e8014a24c7fa039e91", # ckpt
- "92565dec90f7c8412dc872e820f66cd0c56263bbbc392439645b6fee270f41bb", # st fp16
- ],
- },
- ),
- (
- "stabilityai/stable-cascade-prior",
- "StableCascadePriorPipeline",
- {
- "pkg": {
- 0: {
- "precision": "ops.precision.bfloat.B16",
- "generation": {
- "negative_prompt": "",
- "num_images_per_prompt": 1,
- "num_inference_steps": 20,
- "guidance_scale": 4.0,
- "width": 1024,
- "height": 1024,
- },
- }
- },
- "file_256": [
- "673b3173b037fb5f65b14fde37267390641a36726683de75dcf9df76fce2b866", # lite bf16
- "45c1eb5ce9b69efac891ad459b15c215cd90a986adbbfaf3effd3a89578cbcaf", # pretrained
- "088ddf1e444abf399007b2da2bac87791df165c69f477994f6b3c745a20904b0", # stage c modelspec sai
- "39cec96c7212607f9e526db719bf1df507166d09f4748676c13b0d31cd4adb07", # stage c
- "31ffe2f1a3e2351d658fc7d3002a4eca22466a680f7fb3715b1e3768476f9633", # stage c lite
- "dfe24009fc881011f350d08d9d13be13a1a3b3cbfed667435efe0fd419aca099", # bf16
- ],
- "layer_b3": [
- "c55c83fa435ed128457f605bf1312e54727996d1c94413fc5ab5b49e9933857c",
- "6fb07ed9fc6ee636e50783802754b3a37bbecfc67037813b616223aeaf6fe877",
- "2ea194240e105c8962923e2baca88cb6a0c826794afc2ef82474301694711d68",
- "3412c8a184805621e4595d57268ced0b5c3c1974cd221bf67b2c908eec4fd61c",
- "53abfb013cfb0e41d0bc7b96bb83e42a4d4c67cb7325f9acf645b02d90efd8fe",
- "34556558f680c183adc2accd493cb9888a98ba853226bbecb07d95eb2055ff4f",
- ],
- "layer_256": [
- "4f5e0a738b963d3d4f8413387a0966ac1ce51f0f985bcbcc124fa221a2fff467",
- "8aa77e732a398b7d0dcd9a35d5682c2b5ab090ae90e915c7c91878abff0284d8",
- "4bbd46ded0916de3108f0da7145a80f5c7acea26ed35b0aaa29af12008352453",
- "415d1f3ecd06416708c1b83ab21e50b39c9d88d19dc33e60b977b7b7061880b9",
- "f678c32815c238e14091f690c8a83c3375c8f7738dc7abff79ff086ed9b59204",
- "17c8da803df7b9bbc8b1d7cc0c44916fea5b5ac0891330c4fdf0326fcd4496cb",
- ],
- "identifiers": ["down_blocks.0.2.kv_mapper", "previewer", "backbone"],
- },
- ),
- (
- "black-forest-labs/FLUX.1-dev",
- "FluxPipeline",
- {
- "pkg": {
- 0: {
- "precision": "ops.precision.bfloat.B16",
- "generation": {
- "height": 1024,
- "width": 1024,
- "guidance_scale": 3.5,
- "num_inference_steps": 50,
- "max_sequence_length": 512,
- },
- },
- 1: {
- "mflux": "flux.flux.Flux1",
- "generation": {
- "height": 1024,
- "width": 1024,
- "gudance": 3.5,
- "num_inference_steps": 25,
- },
- },
- },
- "file_256": [
- "f6315581b7cddd450b9aba72b4e9ccf8b6580dc1a6b9538aff43ee26a1a3b6c2", # krea sai
- "1b2170ac37156d4cf91909eb6834bb8adac84bc1fce8098a29cfb03738df84ad", # krea diffusers
- "4610115bb0c89560703c892c59ac2742fa821e60ef5871b33493ba544683abd7", # modelspec sai
- "d86a3038eacaa720682cb9b1da3c49fecf8a3ded605af4def6061eaa18903eb8", # diffusers
- "b7d840eef01c27dfd72ae9143c261355a51bab3b2662263a6cb0059d55347c3d", # qwen2
- ],
- "layer_b3": [
- "261559c8eaccae558f72621804a9ee188d338e45e2c622a58db709ac190198ba",
- "87f5d565c66e40eb02eb96498243ad81afcbf86192db99a4fc8fff215470320e", # modelspec sai
- "e61d10a394902dadca9367467b2245070f651f4553ec4a96192fbba64e820acb", # diffusers
- ],
- "layer_256": [
- "3db58cf834d2f81abb1e035131956da4c90451074c681d0db10810e55e60c2c4",
- "ddf1a34a06b355ce2bcd0f9beb0713450d9bcdc61a03a6bc37716361735e96f1", # diffusers
- "ad8763121f98e28bc4a3d5a8b494c1e8f385f14abe92fc0ca5e4ab3191f3a881", # modelspec sai
- ],
- "identifiers": [
- "double_blocks.12.txt_mod.lin.weight",
- "add_q_proj.weight",
- "single_transformer_blocks.9.norm.linear.weight",
- ],
- },
- ),
- (
- "black-forest-labs/FLUX.1-schnell",
- "FluxPipeline",
- {
- "pkg": {
- 0: {
- "precision": "ops.precision.bfloat.B16",
- "generation": {
- "height": 1024,
- "width": 1024,
- "guidance_scale": 0.0,
- "num_inference_steps": 4,
- "max_sequence_length": 256,
- },
- },
- 1: {
- "mflux": "flux.flux.Flux1",
- "generation": {
- "height": 1024,
- "width": 1024,
- "num_inference_steps": 4,
- },
- },
- },
- "identifiers": [
- "double_blocks.12.txt_mod.lin.weight",
- "add_q_proj.weight",
- "single_transformer_blocks.9.norm.linear.weight",
- ],
- "file_256": [
- "9403429e0052277ac2a87ad800adece5481eecefd9ed334e1f348723621d2a0a", # sai modelspec
- "9b633dbe87316385c5b1c262bd4b5a01e3d955170661d63dcec8a01e89c0d820", # diffusers
- ],
- "layer_b3": [
- "c65ba812ce3ce056eb1585673f62fb896afe6ec049faaf00a97bc35c9a398c44",
- "03049273329fc7db2da10de6d3eb27cb03f190e379c0556cc97b3f0f29001d0c", # sai modelspec
- "483c4be8ef031c56bc8450d1a3cfbe54445ed317bcd801be5abe89f1d3c48790", # diffusers
- ],
- "layer_256": [
- "79c07e339865fe9e22c80f723d728c778130acd07a330339c68218b92bb7b3b8",
- "ef5c9cd1ebe6e3be5e8b1347eca0a6f0b138986c71220a7f1c2c14f29d01beed", # sai modelspec
- "27bc71eca2d2ff7459165acc12010230911db7709a4f6a5c255befedfa6b1649", # diffusers
- ],
- },
- ),
- (
- "stabilityai/stable-cascade",
- "StableCascadeDecoderPipeline",
- {
- "pkg": { # prior=prior_unet
- 0: {
- "generation": { # image_embeddings=prior_output.image_embeddings,
- "negative_prompt": "",
- "guidance_scale": 0.0,
- "output_type": "pil",
- "num_inference_steps": 10,
- },
- "precision": "ops.precision.bfloat.B16",
- },
- },
- "file_256": [
- "fe92687deefcfb33bb3ec181254b55fe4e434c5084ce9d38815eaa32487ad376", # lite bf16
- "2c8d58b267678aecfa6705a0a0375c88613065a8a8d32ad3a4c3867f5461cb3a", # bf16
- "6c218dc948575e3b14b03dffe2014d7870ac505005770ce3abdc28e920a03c05", # b modelspec sai
- "a6c3d534a9be308e95d2c3224af94a854bebd9b503f620f1ae3c8e6ba4a341bf", # lite
- "7b431ea7d0f10e72b3eaece353bf6bf2f6bc717b6f4207411be186b40dec1f43", # b
- ],
- "layer_b3": [
- "9506d989de0226018de214f7ced4670eb5aad4a0c399a9229488ceccdf9a3ceb",
- "6c09dcb83e0cd7ad735eb763c5e3721c579d796853f0b9d31ba74fb13cad4f94",
- "e07025965cee925e31f1d617ea8baa575e7db910d40cc0482fd83df317c0812b",
- "d9a42e4226fb2778aaeaf0d6bda173a4ff95aa574c6d9e27e41542aa469e40a3",
- "8dcd87dc7a9b877e8e2a00abac44c4da9eadf2b8df4ae68f27415bb791381a96",
- ],
- "layer_256": [
- "630ec0f3adf97145316c034139836f9df952060d0237ac4e478c55d9a3a50bc8",
- "80904f707c192ddd06be2cebeb2ebbec3eb0e9c99076d50824d391ef3ac67bf2",
- "8ccedbe1e8cc4093f05b5f8d90e6103e688ae1ac71e0d6261fb17c42ff7c25e4",
- "3524e7fa9ca6f7ef695bc2d3410934eabd5272946a05c8cacd7f329e0bd9f1dd",
- "40499a8f45ae28558ed2fe4fc549a4cb469bd237434b331ccc0b1910310ed733",
- ],
- "identifiers": ["0.2.channelwise", "clip_mapper.bias", ".12.self_attn.k_proj.weight"],
- },
- ),
- (
- "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers",
- "WanImageToVideoPipeline",
- {
- "file_256": [
- "b4602c35fa0519750a42c03e3f296c02d542291e344c4d702522cddbd1711f13", # 480 diffusers
- "6d7a34b63b70eb608324e546d979167a5e787ac6bca3528e63f54a11572d66aa", # 720 fp8 scaled sai
- "b2051cd29d6b2f0c924fa7a3e78a4772f0134d7b059f21590dcce416f4f6cbe8", # 720 fp8 sai
- "7664fe075b3c82dcecf89012ad3429eee41ee9f10d476f60bc2d2ae3c4ca986c", # 720 fp16 sai
- "8ef7ea5bf9eea636b9b3ebd84c40671b4a18ae2704cb4c8595cb5b25c1d8e8b9", # 720 bf16 sai
- "b2de21b99b2e72cb0ff15253b07e926f26e7cf1b7e229efc32f94ad1f1ed9395", # 480 fp8e4m scaled sai
- "0ca75338e7a47ca7cacddb7e626647e65829c497387f718ecb6ea0bae456944a", # 480 fp8 scaled
- "c058a4ac5363c35d1ab4dd3bdec788c23b267fa42a0d7c68aba599f2f74600c9", # 480 bf16 sai
- "27988f6b510eb8d5fdd7485671b54897f8683f2bba7a772c5671be21d3491253", # 480 fp16 sai
- ],
- "layer_b3": [
- "4b6c3354c9ee5694e00a78f5658fdf14129f159c3b78a57f82fb18e0f265a83d",
- "c36c783559a40d22504f6c4bfb4f5aae760f3f46bbb3a595be79880935122175", # fp8 scaled
- "ac62f7d5583fd2e85b738fafaf233e2cde6e2857e04351135bb9ded45f9082ce", # fp8
- "215e89e855b5e9456af9aa68bc67567dc2269002aaa6b01d849ffec425fc628d", # fp16
- "324b8b6c2d512547a2c31bafa12e20acf313fd3aad587b293334f9f629edeec6", # bf16
- ],
- "layer_256": [
- "137881dad8c00063bc8bf05f93067736e419173cd171acc22f77b730db688a19",
- "8c5952fd3d333d3a4b719bf7d8ce6b12d1d2e78caaa7e42d713788cfdcadd244", # fp8 scaled
- "86c58bc4864c97f394ea6bccb2ecedc4aab7166f5b9bfeb313edfdcb2918164a",
- "cac45f7d8f1a0628cb0738bd308689e439b1cc6206e5f887d60d5b37d30138f2",
- "60e4f71a0961b1346b6f6b5ebe4c8cc93219239c5e13b4c0f1e19e9b8e1324d5",
- ],
- },
- ),
- (
- "Qwen/Qwen-Image",
- "QwenImagePipeline",
- {
- "file_256": [
- "9f33a59093af3abcc2836d4cf4b7bd122c238ca70a26c70f34fdde64646b3bcd",
- ],
- "layer_b3": [
- "c87eedda853c12844a8deb3592a90bbcbd4dff2f7a850c28755e4aa171432150", # diffusers
- ],
- "layer_256": [
- "fda2472d8ef6587a4c979021a2390eeb7c8fc2bcf565330ab8dc6b22f5348ec9", # diffusers
- ],
- },
- ),
- (
- "Wan-AI/Wan2.1-VACE-1.3B-diffusers",
- "WanVACEPipeline",
- {
- "file_256": [
- "bd8bbb8834a274525ab65cbb063f21aa58973a054bfd1638bfe395504c9d9b99", # diffusers 14
- "192804a4e10b5bb0a13f5c224bc4ec9707b3b8cc0def8eea005dbce7c9d6752a", # diffusers 1.3
- "f202a5c59b8a91ada1862c46a038214f1f7f216c61ec8350d25f69b919da4307", # 14 fp16 sai
- "654693bf2a93a27cd67c3bcee238bc1d0cbb0dd9a74928ed7155fb21a2a1900a", # 1.3 preview fp16 sai
- "640ccc0577e6a5d4bb15cd91b11b699ef914fc55f126c5a1c544e152130784f2", # 1.3 fp16 sai
- ],
- "layer_b3": [
- "5357d78799a61cd2d72a8a2824c919d63f718eb3fba624af63689e9c657db032", # diffusers 14
- "7ae67b7ccf79d1c3f4531ae138e1eb63d52dd97a66b3fcbe1d68fded8df4d5b1", # diffusers 1.3
- "ee63ecdfb3da6901853a59ec950f3e7c3f6595ac46347a03881a4a9c71425377", # 14 fp16 sai
- "82762df3539021d3c0342e0da04137ddbe95ef37ea933cd0a68c09c2c650f2ac", # 1.3 fp16 sai
- ],
- "layer_256": [
- "2684413479030170fb3f08c1069c02957ffc386a59168d23b55d579d5c675269", # diffusers 14
- "d527680fa735e5f30ef8852aabf8a49f02a094bc4718f0787c5b85710a13c026", # diffusers 1.3
- "9677492a107b3ed827c7285db3393f5321d451cc6d922a4d0488d2a67e939446", # 14 fp16 sai
- "aaef66a4f65ecf852888d160b2122753fe4c6d642b5d41db29e4ce9e6855b5a0", # 1.3 fp16 sai
- ],
- },
- ),
- (
- "Wan-AI/Wan2.1-T2V-14B-Diffusers",
- "WanPipeline",
- {
- "pkg": {
- 0: {
- "precision": "ops.precision.bfloat.B16",
- "generation": {
- "height": 480,
- "width": 832,
- "num_frames": 81,
- "guidance_scale": 5.0,
- },
- },
- },
- "file_256": [
- "299e6304544f2783896372fa919e755a8bb9ab8caf898ce08a678dae391e1179", # diffusers
- "a9278e6e9c82d174e6c67b3c97d8b97fef30af51dcf59160f2fc241f6819f5dc", # diffusers 2
- "be531024cd9018cb5b48c40cfbb6a6191645b1c792eb8bf4f8c1c6e10f924dc5", # fp16 sai 1.3
- "6f999b0d6cb9a72b3d98ac386ed96f57f8cecae13994a69232514ea4974ad5fd", # bf16 sai 1.3
- "2e39adde59c5e0e90edbb35873126b0d67928b5c11c501e384e976d6dc597cce", # fp8 scaled sai
- "2ee88ab18d7ed7691c5b7f8bdc3d0a9815e6efe75499287564830fd209d3cdfb", # fp8 sai
- "46c27d3693bf2475990a912e08bf67fc6e6cd5396eab87b5e8dd1fcd3651364a", # fp16 sai
- "193535c6450045f718df5f011de6d94d49bd9b13f37ca0412500f050dbbb01a8", # bf16 sai
- ],
- "layer_b3": [
- "32266d1c79b518adb9d21837e6a427f6ae55b68cfdd673a7dadb38820fddeb48", # diff
- "3b6989856f4f05368524c1852d8660b73c84cfbe44460af017d7139c2a4641b8", # fp16 sai 1.3
- "f4d6cee3c112db93b3c9137ad102ec0e79ec7ab68b9bbc59004fbc268ccd5ddb", # bf16 sai
- "e627144f41055619eb5407699c46e69ac0d87cf8873721e3e48c9e842656abf8", # fp8 scaled sai
- "6c00f3fadedacb841c4b9b4321b94a11ef85a08c9dd9253e5f9ba95856715579", # fp8 sai
- "a0c339253c714b05877c8fbab649ed631cf021930978f3696a46f685a07c9092", # fp16 sai
- "6435da89a870fd0e88680d31de75b9a40c408a4768eff384ce9b9e99481e8e66",
- ],
- "layer_256": [
- "52493c23c5fc1d087a283bc4eabb151421b7ae09affa12a5bb059d62656c5766",
- "058dedb3d2683a9a5b671c6302690e22722c93f6ed92281d5fa74ab190e632a1",
- "5fbed4b95e7196d3626003ea9e0fbbffd074b4297ca406e01b5b6c5d881a6080",
- "3a2335c8e7a4359c071b50333b5c00eef6f42a1d5206915e2ee99464a8c5eae7",
- "0542780670dd75d4cd9deda123d2e150730646c0a1a8d34582460991498a77a6",
- "e925b8222774905c8fbf10af77811fde7870e563eedcde2c94bd5c727e952d49",
- "3d915854976284347efa7aa0a117c0fc3b415c4208e1a6c94beb4ccb9720743d",
- ],
- },
- ),
- (
- "Wan-AI/Wan2.1-T2V-1.3B-Diffusers",
- "WanVideoToVideoPipeline",
- {
- "pkg": {
- 0: {
- "diffusers": "WanPipeline",
- "precision": "ops.precision.bfloat.B16",
- "generation": {"height": 480, "width": 832, "num_frames": 81, "guidance_scale": 5.0},
- },
- }
- },
- ),
- (
- "nvidia/cosmos-predict2-text2image",
- "Cosmos2TextToImagePipeline",
- {
- "file_256": [
- "7fbd20dae97cc26a55c7aff3024bc84e554cff8f69966c725a24c8238c5431ec", # gguf
- "6d211f1c14cd793156da3a840dd5462ae072046fcd6f1dc64c613a5343bfe896",
- "95a2b32ad31a271eb64d35985c7ea46f1448528af70932eb1f35d57f90c27be2",
- "344e67faf333b7849fa94290c9028bdd5e40eb19700754c833cda0423bc10ad0",
- "ce15ef565cbb9ef414a6f7a396c455d82d5f762d2174493da87fe009c5fee75b",
- "94aa9f2b59330b88e97b6b439e2f206a51c86e6b154fb66d43ed149bfac23cf8",
- "636de5388da249130d51752991a1792b90af31cbf43f021ae07f75756ee2d79a",
- "472c5e4cf5056a1a59085addb5a86d801de39bf5e000d253f206a7f63c710029",
- "663266ace67c22529c3b6bfa0e8bd69f0ba6e683f5f02b8e3da50881057ba142",
- "21a674b314c1364d0dbb3712f5ed702996a7b7403c452835cac22709e01c2f77",
- "3bf2df806c6472e039efc9e8d3181163d7faa7b385e61519b7d17d5e9c993a49",
- "1de35e1603c4c30bc80b132ccea15fc0503369caf68290708f17e679e98cd41f",
- "0738e559bbd71f7351ccba34b2b47362a3f829b92f3dbcffeaf1e44b0d52f42c",
- ],
- "layer_b3": [
- "5a18ba14c41c6601dcc1195ca180ac7744357eb15ace39272788bda1a7151e9b", # gguf
- "67cc3eaf7987c89cd7ccff13de6bc03e3eec59d260d44486e2367cd946ce6f20",
- "3c6fefa107742488d2e6856714198a762f2fd35c67edd50d4657eaf4b59c7ca3",
- "4e1f90ee1e8959d334c9b1ea2cc5e58d0b8340e271c35f81c8a5ec26e16d9d76",
- "f8171071e828524fcc2806126ad100a2198e450c82c0864c8fe8b358c5cbbfbd",
- "8126101a0207ecfbd741394fd59f306bcb4c492b2a921e0921c426ca7bd38985",
- "c942c5a85ff7cb602d8ca894f5d180c2224e91f0b62c3a21f6a425f9e0e8554b",
- "c8c500de74da879a547875fe1046f62ab18bdfd09c09eb3da723cbc2319cb4e3",
- "c0ac3f67501004e9e9a55d1658402ad97e42bf8a266edf81f6f3bb835ee476b9",
- "84f5926eb4e11d826815682b076ed7d3bba4c86520859be80aa1ef92c72b26a4",
- "1d4375aab5548708559b0fde150754a2163cd211eb20a5471e17afaeeb26e082",
- "68bd8982f59c60d69c301d16dfb5a60f5d43d66c0b60138d48a22f5ded598e7b",
- "c3e9a10cad7aebf979072092008be6e2815d03d28cbf316c15e8daf22116bd7d",
- ],
- "layer_256": [
- "38f2a75eab667c0cc85f3946a23ca6dc2278438c25a9f93aaaa9f79c3808e180", # gguf
- "ee8434a5e9bc6fa07199de2d0c69fb87f7922c31792bafd13f527c9d92fecb0c",
- "2f8382657babb4d0ae4f8e425ae33b21ad71deb6ba457fd6734f05208d52e06a",
- "34b181a8291b571857cdbf67ac0081fea594a2f223bf20bd2fc8b0c889e9602d",
- "d198c412b972e381acfb812304fa98ed0d97a2f072ddc195cd9a1eb83b1d8146",
- "79580a13aff9859e67b0a9f4f8893236cdcfa58c3d43770641aaac8daee55a94",
- "cfd48c7ad71c913fa8768167ed0c2ee8c207311b22b1e5a8761369b5a780e8d6",
- "da91362ad85d4d2e80a2cb7a55e4ae0e52c9eef8b437a95894ce5ab75d36568c",
- "15f84001f5205b6dd8c6f1334cb51c46f6171c7795fb2a557ea16b874f0c71e5",
- "5d29179ad15a15d2561defcdda66f1d1e4d065c1e0738f9cba4db5b68b93d2ea",
- "7ec489d1e461f5fb2af627b68034ca57f19c516aeccbc5d188b3bd27e3353a15",
- "c8dc42fe7b411d746ebdf86286b91cd6893c5f028076b8fe4103f7ea8e1d8833",
- "86df7c095aee01588e961438f322b85ca0100a9e440b8a2b6c724e00f748d8b5",
- ],
- },
- ),
- (
- "rhymes-ai/Allegro",
- "AllegroPipeline",
- {
- "pkg": {
- 0: {
- "precision": "ops.precision.bfloat.B16",
- "generation": {
- "guidance_scale": 7.5,
- "max_sequence_length": 512,
- "num_inference_steps": 100,
- },
- },
- },
- "file_256": ["6927dcc812841c1da549bf11c97ddf30532aee0e708a6642fa64cf8e0dfcdef7"],
- "layer_b3": ["8b20714a6af89ea4bf4ada1f805c5b9d529ef136c229e9b75392242d62d80c3e"],
- "layer_256": ["9e44e6c919dc71c24a193641e6265cd9983a2a773b9bbaf527c10ac4837b29fd"],
- },
- ),
- (
- "audioldm-s-v2",
- "AudioLDMPipeline",
- {
- "file_256": ["fc30d5b5a3bb8d08672736efb1fff10755ba7024dace39b2dcb579a105aa2a5a"],
- "layer_b3": ["82fbcc553c1ad770d28fd1866b935249c5ebfbf75f3166ae823e1bc6ef39a95a"],
- "layer_256": ["d076446a58a36bf436e37444679d62bcf2f45689d4aa3d799b3fe801c71ed2c8"],
- },
- ),
- (
- "zai-org/CogVideoX-2b",
- "CogVideoXPipeline",
- {
- "pkg": {
- 0: {
- "precision": "ops.precision.float.F16",
- "generation": {"num_videos_per_prompt": 1, "num_inference_steps": 50, "num_frames": 49, "guidance_scale": 6},
- }
- },
- "file_256": ["8fbb6a5e67c70885a8ed8e33df144ac61253e45977be5035fa18cfdf77d386c7"],
- "layer_b3": ["1db3439649b5362448455fb2ed6ebde0c3b973655a206832731149757ad165bb"],
- "layer_256": ["edd6bd51f1236f528ff8d32dc754f0b86cfac901b800642ea497358156dc00bd"],
- },
- ),
- (
- "HiDream-ai/HiDream-I1-Full",
- "StableDiffusion3Pipeline",
- {
- "file_256": ["3cb3f6d77a3fce19b90fa7f66da0cbe997b0785a38a788b559290d3062f6fd26"],
- "layer_b3": ["612eb9b2676a3e7b28b10aae045a97a95de2a399fe3801c8f6369589c3a832a6"],
- "layer_256": ["78fbfb7fddb9ccbdf91f22b0c3d304cbf0cc7305dbccb216982233849ec727df"],
- },
- ),
- (
- "cvssp/audioldm2",
- "AudioLDM2Pipeline",
- {
- "pkg": {
- 0: {
- "precision": "ops.precision.float.F16",
- "generation": {"num_inference_steps": 200, "audio_length_in_s": 10.0},
- },
- },
- "file_256": ["359a5ffb89a844beb2fcfac584aae2cd7cd6e87c3ab1ec4e892ef45d91db77c2"],
- "layer_b3": ["eac241273f9f30982fc04aa88b4dc1c38b533430956a55b9ed4d3e5c717ec962"],
- "layer_256": ["ab109d01b43788063802f00c6ecab024c830ea58d668f5c2df9e3ae5b87d86cb"],
- },
- ),
- (
- "Alpha-VLLM/Lumina-Image-2.0",
- "Lumina2Pipeline",
- {
- "pkg": {},
- "file_256": [
- "132b4d213fdd3cfc14333746fc3eb8bbe6358cd73c3bc95ac4ccec230b97dca3",
- "a7c09ebae62996a8289782161338a3cdba58c11d2d849c50b2d6502e152b0d6d", # pth single file
- ],
- "layer_b3": [
- "198bde52f09736f1fc650dcdbd0e6b0f6a5ce186582554c1d9ee8ab16ac0feb2",
- "b52807536902cabbf84f99e4fa2f8713fb4ef77e739f06367ee0d486e3222faa",
- ],
- "layer_256": [
- "982893c99860aac8198c2e435cf85f782fce8f10732daf1f2881a26864400a4e",
- "dc937b59892604f5a86ac96936cd7ff09e25f18ae6b758e8014a24c7fa039e91",
- ],
- },
- ),
- (
- "ucsd-reach/musicldm",
- "MusicLDMPipeline",
- {
- "pkg": {
- 0: {
- "generation": {
- "num_inference_steps": 200,
- "audio_length_in_s": 10.0,
- },
- }
- },
- "file_256": [
- "853d0ef1d61cbf5d682872322ea8b761ba3d2f85bfbccd58363bd6b2f837268f", #
- ],
- "layer_b3": [
- "82fbcc553c1ad770d28fd1866b935249c5ebfbf75f3166ae823e1bc6ef39a95a" #
- ],
- "layer_256": [
- "d076446a58a36bf436e37444679d62bcf2f45689d4aa3d799b3fe801c71ed2c8", #
- ],
- },
- ),
- (
- "openai/shap-e",
- "ShapEPipeline",
- {
- "pkg": {
- 0: {
- "precision": "ops.precision.float.F16",
- "generation": {"num_inference_steps": 64, "size": 256, "guidance_scale": 15},
- }
- },
- },
- ),
- (
- "hunyuanvideo-community/HunyuanVideo",
- "HunyuanVideoPipeline",
- {
- "file_256": [
- "bdb957b35585ea74ae42ca92865a68fa1bf1ebc6c5b7e686a889e5c977dc24c7", #
- ],
- "layer_b3": [
- "d31c56b4c9444d4c2f1b10120fe964e0956f6b8c7e7c1e4cc5a1f37406fc49f5" #
- ],
- "layer_256": [
- "fe741fdfd163bcb1e0ed81d80f79ac3576dbf6e6740674efadfeff782a48bed4", #
- ],
- },
- ),
- (
- "zai-org/CogView3-Plus-3B",
- "CogView3PlusPipeline",
- {
- "pkg": {
- 0: {
- "precision": "ops.precision.float.F16",
- "generation": {
- "guidance_scale": 7.0,
- "num_images_per_prompt": 1,
- "num_inference_steps": 50,
- "width": 1024,
- "height": 1024,
- },
- },
- },
- },
- ),
- (
- "stabilityai/stable-audio-open-1.0",
- "StableAudioPipeline",
- {
- "pkg": {
- 0: {
- "precision": "ops.precision.float.F16",
- "generation": {
- "num_inference_steps": 200,
- "audio_end_in_s": 10,
- "num_waveforms_per_prompt": 3,
- },
- }
- }
- },
- ),
- (
- "Kwai-Kolors/Kolors-diffusers",
- "KolorsPipeline",
- {
- "pkg": {
- 0: {
- "precision": "ops.precision.float.F16",
- "generation": {
- "negative_prompt": "",
- "guidance_scale": 5.0,
- "num_inference_steps": 50,
- "width": 1024,
- "height": 1024,
- },
- },
- 1: {"diffusers": "DiffusionPipeline"},
- },
- "file_256": [
- "425ff1dcbe3a70ac13d3afdd69bd4e3176b0c3260722527c80b210f11d2d966c", # fp16,
- ],
- "layer_b3": [
- "6eb15506fa38b4cbb26391ab1b6c9ead05f86c711e46583bfbe8fc4421571414", # fp16
- ],
- "layer_256": [
- "04e3c17170b8a200481f6941b370fdc5056a00fe5a16956de01790f8a93c0dcd", # fp16
- ],
- "identifiers": [".DenseReluDense.wi.weight", "encoder_hid_proj.weight"],
- },
- ),
- (
- "tencent-hunyuan/hunyuandiT-v1.2-diffusers",
- "HunyuanDiTPipeline",
- {
- "pkg": {
- 0: {
- "precision": "ops.precision.float.F16",
- }
- },
- "file_256": ["7d31ac8fa389ff39dd0a81430010e52c43b59f15adc00c83625a47881e16830e"],
- "layer_b3": ["bccd37ecc9f85d132b46d0bb67b4facb49fc6c091428a4feba9ab9a93140f5fe"],
- "layer_256": ["ed25d241d58ca298d28abd5919e70341ad194e77dce4859436b52ea4d8fcb616"],
- },
- ),
- ]
-
- transformers_addons = [
- (
- "google-t5/t5-small",
- "T5Model",
- {
- "identifiers": [
- [4096],
- "encoder.embed_tokens.weight",
- "text_encoders.t5xxl.transformer.shared.weight",
- "t5xxl",
- "encoder.block.0.layer.1.DenseReluDense.wi.weight", # small\
- ],
- "file_256": [
- "ec87bffd1923e8b2774a6d240c922a41f6143081d52cf83b8fe39e9d838c893e", # shuttle/flux diffusers# flux dev
- "565cb2487351282e8e4dbeb88e63f4ad28217ce0439f5a8e6525a924807d2d9b", # bf16 modelspec sai
- "6e480b09fae049a72d2a8c5fbccb8d3e92febeb233bbe9dfe7256958a9167635", # fp16 modelspec sai
- "4f2751ceeb2a96edd693e539dc5d6bba0b8d3814f49a9b3798403a0cec4b2e3d", # fp16 diffusers cogvideox
- "83690f3cc37cecb5e907f41ab0f7abb0855ef24a0a8aab9259f2888ce85a34e2", # flux diffusers
- "7d330da4816157540d6bb7838bf63a0f02f573fc48ca4d8de34bb0cbfd514f09", # fp8_e4m3fn
- "8490f7a22615c20651a63dbe7b4241929826a4de20292dc8e63bfc3c61e3654f", # qfp8_e4m34n
- "d8720addef2596fef86b1b22e4b62875c9118779ba8723759a75dfcbc649ffd5", # mystic mlx
- "7d0eac95abe8daae454bcd3d166b8bfc6a35fe68278f97479d62dbb6850f38c0", # mlx flex2
- "ceabd6f71c7112cfaa4dfca8711dda97b79fb9b25983f1c95532de226045f1f8", # mlx jaguar q8
- "49e139f50824fef40908ef4307c851e7adaa8b91bed44054c4829600dbedfdda", # mlx shuttle 3 q4
- "211ade1d474f5dc83190aec8be5c4baf52643777790d64de0cbd84f63613e5e9", # mlx flex1 q8
- "7894547154ba3fd6e364e66e2951ee82b4c3fc1ae0f95df6a4f9d1c5a4e98f17", # DeepFloyd/t5-v1_1-xxl sft
- "eb529f693f4b17773a24e787fcba29486d5e1700dadcc20bb91e4c8b00212d08", # pixart a
- "d80116f6fc39801e4eef425a584e7a7a41cbe5119797bef2dad67299909fe2ae", # Q6K
- "31ebe18e901bfb6e5709a20ec1c95fce29bce2b9545073231e0f909a53239f5c", # Q3 KS
- "6be2b0b7e2de7cf2919340c88cb802a103a997ce46c53131cec91958c1db1af4", # Q4 KM
- "b51cbb10b1a7aac6dd1c3b62f0ed908bfd06e0b42d2f3577d43e061361f51dae", # q5 k m gguf
- "9ec60f6028534b7fe5af439fcb535d75a68592a9ca3fcdeb175ef89e3ee99825", # q8 0
- "8f5ab879234384235d56732f0cda07bf8801f30a49645248c5bfdeeb1665f64b", # q3 kl
- "86427a1f4dba48940e45bf78d6db5bf0d48fce8b4656f5aba27955f06af9628e", # q5ks
- "88b696cfae098f03bb078cc5944ef03aec1e91ec020a6b016b723a0f0532558c", # q4ks
- "1dc600961d3c5ed081f6700485cdc7ed9cfb4631f2dc385b7ac6bd3c80846d0d", # f16 gguf
- "f28631189911f8d7931e8fe642a4cb2a3c51f50da7cabbfa06b89bafc19c00d0", # q3km
- "de9dfdd19d7ba6859993cadec5100665dc7a4fb71e1c6c8970959cbdaf4366e3", # f32gguf
- "7a68b2c8c080696a10109612a649bc69330991ecfea65930ccfdfbdb011f2686", # allegro
- "2c0c539ab8e8fba3877cc94bc483e427f74c525f817a809b028ebc8d96d75a94", # hyd 1.1
- ],
- "layer_b3": [
- "ca94e03b7b1fdcb0d6ff5205eac56f145d2dff8a9c489faf80935bfec8387f18", # bf16
- "c0e2b054bedd782909191b05748a88c28d1538fa91789fec63f036ba01dcc001", # fp16 sd35
- "672de9b79d14001de7d1109ffc52e4d0cccc3bfee6f45648fa347703b58e2b99", # fp16 sd35 diffusers
- "abdb187a996c51cb0469630c124b14eeb0bb8f5f635aca6c71dea264f8bd61ae", # shuttle 3 aesthetic diffusers
- "8926f862b7763fd9688af317eba7809aa71a478484be0c738c269de368ace4a7", # diffusers
- "e616b754cf55e55b3f9f17ab7e1fff95f0607c81782822fc1223ae22fb1e9f36", # fp8 e4m3fn
- "b79e5f1878a62cd726bb4f9fc1415cacb071d278440e9026290c7b36cb41e1d4", # fp8 e4m3fn sd35
- "77619d5278d9f547ddac17d4d99df56cb6a3a9e660ae31b2f896a4297907e62e", # mlx t5 jaguar
- "c87c9d3cc7becc46ee34821299cf8551a6df5541582a45469a031bccdc4bd340", # mlx shuttle t5 q8
- "7e6c32c01c89fc5d1610c410135aa9708e77a7444510e5e479fa677ff2b53643", # mlx jaguar q8
- "a49c2bc301733967ddff113790e301773dc5dd71368b657af4141458de593ced", # mlx flex2 preview
- "c2ea94030ea362e03d73d448fa5353ace0a449dc38c51a4a49fb148444ebb8ef", # mlx shuttle3 diff q4
- "4a90463350f08ef41479da1d561ab41b8f8b792f1603a092226a838156aebfb0", # mlx flex1 alpha q8
- "f86cd0324eebbffb81b15ad47dc8b63fedfa51dc222e44e1a958a7becce2bcb0", # df safetensors
- "48c54c61c5f14e42761c6177539b2da3a22222516dab053952ca8d8e92f93d65", # pixart a
- "311332d9738773669128814d944b1e860a8e3176b37abf43370bc06b43b454d0", # flux
- "3f4e51dec6d542759cdea49b3bec14c090a4908f953fa3e182e2ea43b5b05402", # q5 k m gguf
- "beb25461e168359108add77263ea5cc121b7584cc4aa304ffc4e134783bb1d88", # ggufs
- "43313f90a359c8c1c787a7a833b1ab9f7a38204ba36d0ba587c658d0d9bf0852",
- "fa9e97cdad26f55fedab83a3f114e0338c9cca3ea2bf8f1b168a6dfc5919bf8e",
- "93108d67f8829a7e1e8f3773e9ce53c67f365889c2acfd69816ac80fd43f8e08",
- "fc65a6cc55e89394d7bc0fa4ee952d63ce3bdc143b84b5aa4bb3edf7722a6b83",
- "8163bc781a7e013dfeb806bbb828a36913cf119363ea5fcd9071d87a0c227cda",
- "ad2ba63e1134bad1b15ee339313bc130708b2995e8b4b76fb44d727f28c26ad9",
- "4a844772638ffed2f61d45eaac984094b92540fa1391a4098608fc73a6cd4fd8",
- "76c31e1fd35da7de7cee97c1e7c5ccde640e6fac3e17a62e115ecf484c7196c3",
- "a4d672e22b5bdd8f8b0885cec4a173d0466bb1dcbfbf8400cedcc41c2494f16c", # ggufs
- "d1860c3f01dc9f260d98b50d3d2bbc8dc2d3eefaa93778a8de9d7adfb897fc6e", # allegro
- "b8719092fc58487406211f52dc55bf40b573ccfd29933a989c33a36b694f6f0a", # cogvideox
- "795e272409bc4fa55f402485acf86b607256f91aa965295c5bb771c61f8e9e74", # hyd 1.1
- ],
- "layer_256": [
- "bb20f7805209379aea4d6548f17e551cf27d0f8426ca169e4df8234f718ed5ef",
- "431580c2d86f9a9ed3500f776a4c997223e5644aed211f965354869ccfa4d76e",
- "2ccd548c4ffe34168c60779ebd497b9b410981a2fda813c8723a24a805c94ea0",
- "a608fc4e1cc9762e46187a1ce66e98e8ba4bc3a604cbfd96174bd876baea0fa1",
- "dc9e74cdf535e0b7a17e1335d0d8b38a00f94facf0cb01363baee09945a25278",
- "f07409710a69b2247aa4723a9b40d2225d5e5bfba7b60c51f0ea901fc2ef5ad9",
- "ed28f8b6cc472f352fc840b5a9f841ff17d76ae6918f0676464dca20529aa92b",
- "97c1a08f87c59b4c55ad4672841977cfce43ca7730bcd11d8c178a9330de1855",
- "968972839b859a9c4457f190fad2e17e8585ce27d9ef318df4f5b4e902143944",
- "4dbdeadc957c898c327197a3d8770188535672e9208beb29bbf48dfdf51c8955",
- "669172c2b5e8b97774d9dd0227ede40c4d25cae3adae97d9f281d03531e7e137",
- "39fff130b9ee240102c28a78ee1c4a643e9f800b734ff133f3ab2ad1357bd2f6",
- "6e047ed8cb7007034ff15840dd53c92096f0e7ed5befa07808de8afa35d35874", # safetensors
- "adbd0baa059074501b7686db2b0c01715f3a317275c2657c5dfbfd6ee92389b7",
- "eb63790fb32b5660de34fa42c2e608df58f7aa3680b4984f0ee9008fe613729c",
- "f125c20a33b0ff2dbd4e8ad9acebc34383cb2ef98668169ef79a8c06655ced35",
- "e64e0ac83a785ef584a0e86b347fae8f9e2bd84324a49396ca8a9fe7532a947b", # GGUF
- "70001b3ac1b66522142bb86e4c3e87e20c2bbd07276c763878e0838ef6184aad",
- "f46fd1e2b5fef3b9f7ae80d183cc77f7be181117a72a0bb933bdef0bc6cd679e",
- "83676d73726d101325a47c7f8a60cedf10bab99ea79a6bedad7761220cb4a625",
- "a621a907586e5e270e7c7873b167364d8a935ff347d8240fa9bab319678da690",
- "f0af1a089f40d8611db5c59469314f1547e2df23c6eff24860359b37ea9bd966",
- "72478320b8dbfd9aeaea010dcf0896e3116fa5ab940f3b472882d9f9d2d7333f",
- "9c1a88e36334a48d8482fec54b14ea1d5fd31f0dbb65d13cc616e63dc7c42be5",
- "d0689f727e8ac4fef3ec4b1f29e8a3bd12e1116559eeefb2a1a457cd4e676d1e",
- "fea158a4afcfaa6e95e04799bae0287de0c4fcb188f3b41768a46ce48c71c9df",
- "2e5bc4e73312b5aec4c1a55631cb4ed69cf34ccaa6d1f28f7045f137a579b439", # cogvideox
- "015fdecbc3b5369dbcb2302e4b79985437ac4496d1b9ad63316423a222fb0803", # hyd 1.1
- ],
- },
- ),
- (
- "google/umt5-small",
- "UMT5Model",
- {
- "identifiers": ["encoder.block.1.layer.0.SelfAttention.relative_attention_bias.weight"],
- "file_256": [
- "a8e861969c7433e707cc5a74065d795d36cca07ec96eb6763eb4083df7248f58", # wan t2i diffusers
- "decf9b70814ed5e9965bfca9fbd0483462e2bf743790663025b7742f8c014c72", # fp16
- "0a07449cf1141c0ec86e653c00465f6f0d79c6e58a2c60c8bcf4203d0e4ec4f6", # auraflow
- "c0ef3a140898e228a3520c9adec60743d2e8e5b3d229651bb37f1a3921919f99", # wan
- "7b8850f1961e1cf8a77cca4c964a358d303f490833c6c087d0cff4b2f99db2af", # wan i2ixxl sai fp16
- "c3355d30191f1f066b26d93fba017ae9809dce6c627dda5f6a66eaa651204f68", # wan i2i xxl sai fp8_e4m3fn scaled sai
- "fa1d36fd54f171ae60fea915c23bd77986b330bbed9729f0d2f8ecbe9168bc48", # gguf
- "4a3176f32fd70c0a335b4419fcbf8c86cc875e23498c0fc06f5b4aa0930889e0",
- "adbc782b9145a27e15d63dfa25057efca0ac75e2db7d372c901ddaa130ca2def",
- "b7e2ca4c493c9d51fa951005e8ceba2f4b6b6877cfb4c36a8955c6cd68a1dba7",
- "2521d4de0bf9e1cc6549866463ceae85e4ec3239bc6063f7488810be39033bbc",
- "9209b4c77b34ad8cf3f06b04c6eaa27e7beeebb348a31f85e3b38a1d719b09ed",
- "8bc12d80bc0413573fa58a93626117440b4528f640dd9cb310732e05fa9e6c3e",
- "f64f8d6dc4d8a24276df69d0ccea789aae686f7417950a41e6568c30cb478a5c",
- "17cf97a5bbbc60a646d6105b832b6f657ce904a8a1ad970e4b59df0c67584a40",
- "eaea358bb438c5d211721a4feecc162000e3636e9cb96f51e216f1f44ebd12ce",
- ],
- "layer_b3": [
- "cd92b29c9099a640e3f5d4a76e64b3467f87f6c056119e0defdff94d311ad6de", # wan t2i diff
- "1c943dbcb8b328a7c6c852921ddaefbd84c9df8c83bc51fe303c1f06cb734102", # fp16
- "1639a6467af0db1e15828d33b878e568cba1335947eeadd481170bcdc9ba8e33",
- "72a0329740dee29a2c099eec3c320b3945590a74293356014c30249fe69652e5", # wan
- "0374cba03c607ffe8ab8f04994d82f82e80901dc7578f1a9a6cb2637608be5d5",
- "d75a407f873e1cfa1a0a36214b53b14bfebe9253ea263465151c07f0d57f3f29",
- "621153502b985c143d304318c91dc3d10296d24268c81e3538fc336fdc84c915", # gguf
- "43bb052945d38a68bec27c3d26162e88e306e6074d027d3b4b2b8ae2b1851691",
- "98f50ea5d55e61c1478df47e567e48bdd036d240b9129e64d53a826406900adc",
- "9400313b8eae31699473daa5f840d25a4ef660f68de9a7894f1a28f214f23384",
- "9f13826b8e4ddde24d80de6a947a7868e26cea25dda52790ee6ed695ff72b9bb",
- "475773ab108a537ff904b84e7f3a80129ba4983deb7170b6b52c922ece6069ce",
- "5ef27b3c1eddb08cfe41b452cf9529d86dff811645d40c165bae324486d19e96",
- "e170559d8551cfe651344594e54c0a9a90c0068b00f3866f6e9a3737e20925cb",
- "e8dc7442a20bcdc7b6e5dd0265939d88896eab5ddd33ee16f1f09537e65914b8",
- "4d3d5049857d01741780daf01e96617092973305637b435f4895499a26bbaede",
- "7a2adadc2372feda23b2169337276adda6d1fdef82ba69f0d3321c4c6ba8c604",
- "0a7c61a85bb3f51f75924de48ef3f5e87cbf8901f600cbfcae97f5e2919c4148",
- ],
- "layer_256": [
- "467916d35f3053dce1d40d998fcaf6aa03feda75aa578d964dd61461e23641a3", # wan i2i diff
- "58deeef888d4ded4ffababfbf8da27227a4a6ff8adfa42016e12c0180f713816", # fp16
- "178ebd3fa3418d33a2e45a80d8b9d3662ff4a8e75f3de3f0332f82c505d8152a",
- "8700dcb651465fe6c925b7ad6068b58b32951832fff0ed19819510f8d0713ee5", # wan
- "954f2129ba166e746c71433f717b572d8869ec14b32b7f214d1701d3b1120047",
- "32f5fc1daea014b6488b96c2a1330e0aad87e074844fa3e2e3f20b9e58440395",
- "9245abaf6df8a4b5fcc828ecbcd7b21a1b19bf5f3c4388fb5c8eabc140276dce",
- "172d0fbbd379ae014a7008e148813818494e9e645db802fd000d443369df9d17", # gguf
- "2fa68a26b0386aaf9123d2b4067dafc8631ee724602197dd353f3ea5a61dac8a",
- "16f0054014e6d07b86b0526d5bcfed7d2aa3aebe3e44e6758933d90cbd3da46e",
- "fd62047f5d27ff43210c117dc0f253c101e694a5331d6b684688606c92c65ccf",
- "ddc4f38db9f132fb1b736c1d693b5c039a2d6fe83bdf4f1c1e7a2745b5d79124",
- "9e9ab11b3ea059b84ae2bcc5be76ab3f730a486d92a16f1fd2a959bdc2ede08f",
- "bfb178b1ce27f00e122d2328c662fdef6cc239c07efc749aa61ae2d395441b02",
- "50addf6a911b90194a75b0212429d1af55eb2f9d24715479b9ccc4a40adc299b",
- "2e46e9f1b714d72160d3b3b775a845b3049a01396fab935f1278d9e8de2ef0c6",
- "db8d2b49d9042e39d6531b33ec3bebb9cdf42b9e6ad56163f08da2a7da2a53cd",
- "2d81d19ad5440422b85e0b17c71914269f6c25c9b1fa321c0dd6119ddb41d62d",
- ],
- },
- ),
- (
- "google/gemma2-9b",
- "Gemma2Model",
- {
- "file_256": [
- "e909230aabafad02d097c7dc02f2ae062b4e6b0593477c1f07679d277e09ce71", # sana bf16
- "d61628bc793240439e608c5ae744f55ec8770f684abb63602648a24cb6da60bc", # lumina 2
- ],
- "layer_b3": [
- "55a3c812ac0832d154867f5927365bcc776926e48e65f7f35a81fc11f4bb81da",
- "543572889beb25cad83a43ce70cdd255d2c82951d6595e8c97ff62fd05871c99",
- ],
- "layer_256": [
- "a0d820c39578cf888f398579d9a00d69b31c81e049795ba70008dad8fe5b3a33",
- "abc83b04a04467579ea1952a7efbdd252b8641ac0e2a6a9be2a5a73e371111d6",
- ],
- },
- ),
- (
- "google/gemma-7b",
- "GemmaModel",
- {
- "file_256": ["01676b4c6e765f737a5e9854a315de3887e939c370cae116d505777729099a68"], # lumina next sft d
- "layer_b3": [
- "438d82c867240f194a4e15798eef2886a911c8f57fa2d9f4ffad1d56e7bd1ccf",
- "1de38e09f5f2c5345de48b8cd4dddcfff3e341cc0059752446e186b3863f0981",
- ],
- "layer_256": [
- "e4835a72d582b4ae066d6ff0519f2ee9f8b21fb02e8c28d8eaa317f8d1e9ea75",
- "1657c7180b48672004f4463308dfdd56d92eedeb23d1408ea766985ca208e5aa",
- ],
- },
- ),
- (
- "google/mt5-small",
- "MT5Model",
- {
- "identifiers": [[250112, 2048], "text_encoders.mt5xl.transformer.shared.weight"],
- "file_256": [
- "0524484ec81425ba9deef6fac1393a78ba9b1c9bfed704a4be5f9c7255975cc1", # fp16
- "32f70f1d187e131a5fc3e4f0edc97ce89360d8e2f1d90177a443a05296097acc", # fp16 enc
- ],
- "layer_b3": [
- "a1d616c37711ec7b9073d04734af2f5fd02f9035a322eb46efeace922e104c51",
- # "bc71d4259f4feaa0fb27c1f288765004840f39247cddc98b3ac37329ff1354d0", # fp16 enc
- ],
- "layer_256": [
- "bd337daf0c1aa36896013109b406a0580aa3bb8ab9291d89df3015d737358e95",
- "2e40c48c96fc7df636aad96d3e78ed0ba9f68c3059e21b7fcf917f284c569a61", # fp16 enc
- ],
- },
- ),
- (
- "Qwen/Qwen3-15B-A2B",
- "Qwen3MoeModel",
- {
- "file_256": [
- "c56947057481fb5e7cdf766e442da81717b34addc88bbe8f3728fd25bd03cbae", # qwen3 coder 53 a35
- ],
- "layer_b3": [
- "d2d1e0875202f5c9c84c781a2105620250733bd01832f67b2c17bc981d1eb508" # qwen3 coder 53 a35
- ],
- "layer_256": [
- "408c01da57c4968b7b0e36d98a74e321153e7aeb058fea63ffd140e323526476", # qwen3 coder 53 a35
- ],
- },
- ),
- (
- "Qwen/Qwen2-VL-7B-Instruct",
- "Qwen2VLTextModel",
- {
- "file_256": [
- "1f48ac458d6fbd0aec53a116065a7ee3f1d34bddde544e25c16a05c9d5392b78", # orsta 32
- "0e85c7111ce849293e97aa09ce1172352ecece023a3ecea7ac8311e326b47f3a", # orsta 7
- "d725335e4ea2399be706469e4b8807716a8fa64bd03468252e9f7acf2415fee4", # qwen img
- "e10bd9583a77250376d9134cd6b46799029dfa3b4d7989c1050b3ec149cc7cf5", # qwen flux
- ],
- "layer_b3": [
- "e4f681bde70a753f30f83495a2aa340d251bf3d818eb5a1cbe58f85fd6ea0d40", # orsta 32
- "47b062ce8ddb14845fb1a71d2fd88fd52a82e26561ba3eb05be057915a867775", # orsta 7
- "b6386f70b528ffa9e09fdd8db8a7b91a7c462ed97b06963576c6139e25fdcf31", # qwen img
- "4cd449df9f9004a7e53005583a7e4cfa6de42912f03647d2ea799d489e9c1406", # qwen flux
- ],
- "layer_256": [
- "ed36a4a11c4ebebb10d1e010cb93e2e43fcaf975cd42bb6c9958537593d0d44d", # orsta 32
- "f7f6f64e7b6d7826400a2fc0eef942a47c47bd5914e051ad0c8cd9ff5ff7982b", # orsta 7
- "f341ed0f792cf0570ceb21d3b64ed14bf9875e9fcb90116851364eeed683a6ca", # qwen img
- "ba031d0da78afe24ae63558ad29b8028244a7bd4750a5615dab9079fe32a5fd7", # qwen flux
- ],
- },
- ),
- (
- "openai/gpt-oss-120b",
- "GptOssModel",
- {
- "file_256": [
- "68a8dc1f8e2e5996cb702f14332a25ddf3463daeab2df68e21ca09ef181203c3", # original model
- "a881aa5f561b26a22b14a8262aa61849ace349ffd73d74769e030ac90a1fcf8a", # diffusers
- ],
- "layer_b3": [
- "b52807536902cabbf84f99e4fa2f8713fb4ef77e739f06367ee0d486e3222faa", # gguf
- "43c618018db1fd6e915dead610652da261d9058b73bc5355c85c6ac69af4d913", # "original model"
- "ab27ce7391b7fbd6ce3c319faa119afdac68f746af6a0ce2c3400a132f36f6ac", # diffusers
- ],
- "layer_256": [
- "de5dcad822be5ed6196f0f3f6965739993118d14db97b33a94a269f4f1b7a363", # "original model"
- "575f1977ed42d95a050e13dadaafc05a6d94c8aadca8364dca8a62aa4f2b146c", # diffusers
- ],
- },
- ),
- (
- "microsoft/Phi-4-multimodal-instruct",
- "Phi4MultimodalModel",
- {
- "file_256": [
- "bc703090b63eda16f639fa4de7ac54635c23105ab1da2f6ec4d3403151d38ee6", # mini
- ],
- "layer_b3": [
- "cf4add4ada6082f448788eaf2937f645b5212db88e06ee81475b8be0e99063dc", # mini
- ],
- "layer_256": [
- "7ff992b780b2f8993dd6bb9612207943638b2a42badc976ce80893bc205e801b", # mini
- ],
- },
- ),
- (
- "laion/clap-htsat-fused",
- "ClapModel",
- {
- "file_256": [
- "c92b5a2bee69ff5dd05820d9e0a5cddbc9c9b9dd19a6cb3214f0cf4f29a4d1b0", # audio ldm
- "ae69f555e7f1a2333b8e684c9fa8233f44a47bbadf76d484f941b74f74d2753d", # music ldm
- ],
- "layer_b3": [
- "a4d26450ac399d51b9abbe37859615bb02a5cbf63521da4c7cdc549d04a2872c",
- "ddf310d8eb2d4e3f61e605978675a9d3a748cad9406b9aee8335eae013e77573", # music ldm
- ],
- "layer_256": [
- "843ba86000971d6067bfc4f3ed6dd01bd6f6726188aaa15d86b05554f4fe8481",
- "27529e30442d030a28badf9d62710f4b74e38e9c4424ed169c7e0ac072f5a771", # musicldm
- ],
- },
- ),
- (
- "google-bert/bert-base-uncased",
- "BertModel",
- {
- "file_256": [
- "c6c6348af2cb4d5852fe51102ce39605903dbe7925c005cf8995506cc21ea914", # hunyuandit
- ],
- "layer_b3": [
- "30d7d2cc3ec9e4ba45844e005d0bbcb5887b6a0976042f73da916237dc5c4c12",
- ],
- "layer_256": [
- "94fd2508680ff684eff57e4a5a8ca46bf338fc356a9cf6fe8db2b84543dd7971",
- ],
- },
- ),
- (
- "llava-hf/llava-9b",
- "LlavaModel",
- {
- "file_256": [
- "f5ad57d3eda300a3195bc9c0bb36ab76ebe88831f128e9851e63440aff4a6741", # hunyuanvideo
- ],
- "layer_b3": [
- "d7d6ccb9dbba90b64e4cd259b6309e56708b3f4fbd6e9f85e9f0410e549133ef",
- ],
- "layer_256": [
- "9969c41152aba689413b7f63888ecdc0c0badad2c2960e689ebc4c0e4a696c73",
- ],
- },
- ),
- ]
-
additional_tags = [tag_pipe(*entry) for entry in diffusers_addons]
additional_tags.extend([tag_base_model(*entry) for entry in transformers_addons])
diff --git a/mir/generate/diffusers/__init__.py b/mir/generate/diffusers/__init__.py
new file mode 100644
index 0000000..2f50daa
--- /dev/null
+++ b/mir/generate/diffusers/__init__.py
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
+
+
+from dataclasses import dataclass
+from typing import Callable
+from diffusers.pipelines import _import_structure as IMPORT_STRUCTURE
+from diffusers.pipelines.auto_pipeline import SUPPORTED_TASKS_MAPPINGS, _get_task_class as GET_TASK_CLASS
+
+
+@dataclass
+class DocStringEntry:
+ """Represents a structured entry of package name, file name, and docstring."""
+
+ package_name: str
+ doc_string: str
+ file_name: str
+ pipe_module: Callable
+
+
+class DocParseData:
+ pipe_class: str
+ pipe_repo: str
+ staged_class: str | None = None
+ staged_repo: str | None = None
+
+ def __init__(self, pipe_class: str, pipe_repo: str, staged_class: str | None = None, staged_repo: str | None = None):
+ self.pipe_class = pipe_class
+ self.pipe_repo = pipe_repo
+ self.staged_class = staged_class
+ self.staged_repo = staged_repo
diff --git a/mir/generate/diffusers/attention.py b/mir/generate/diffusers/attention.py
new file mode 100644
index 0000000..00df941
--- /dev/null
+++ b/mir/generate/diffusers/attention.py
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
+
+
+# def gen_attention_processors(mir_db: MIRDatabase): # upstream not quite ready for this yet
+# from diffusers.models.attention_processor import AttentionProcessor
+
+# mir_data
+# for series, comp_name in mir_data.items():
+# id_segment = series.split(".")
+# for compatibility in comp_name:
+# dbug(id_segment)
+# try:
+# mir_db.add(
+# mir_entry(
+# domain=id_segment[0],
+# arch=id_segment[1],
+# series=id_segment[2],
+# comp=compatibility,
+# **mir_data[series][compatibility],
+# ),
+# )
+# except IndexError as error_log:
+# nfo(f"Failed to create series: {series} compatibility: {comp_name} ")
+# dbug(error_log)
+
diff --git a/mir/doc_parser.py b/mir/generate/diffusers/doc_parse.py
similarity index 83%
rename from mir/doc_parser.py
rename to mir/generate/diffusers/doc_parse.py
index 505149c..67c3103 100644
--- a/mir/doc_parser.py
+++ b/mir/generate/diffusers/doc_parse.py
@@ -4,8 +4,9 @@
from typing import List, Optional, Tuple
from pydantic import BaseModel, field_validator
-from mir.config.console import nfo
-from mir.config.constants import DocParseData, DocStringParserConstants
+from mir import NFO
+from mir.generate.diffusers import DocParseData
+from mir.data import PREFIXES
class DocStringValidator:
@@ -35,7 +36,7 @@ def validate_repo_path(repo_path: Optional[str], segment: str) -> Optional[str]:
:returns: Validated repo path or None if invalid
"""
if not DocStringValidator.is_valid_repo_path(repo_path):
- nfo(f"Warning: Unable to resolve repo path for {segment}")
+ NFO(f"Warning: Unable to resolve repo path for {segment}")
return None
return repo_path
@@ -57,34 +58,34 @@ def normalize_doc(cls, docs: str) -> str:
def doc_match(self, prefix_set: List[str] | None = None):
if prefix_set is None:
- prefix_set = DocStringParserConstants.pipe_prefixes
+ prefix_set = PREFIXES["pipe_prefixes"]
candidate = None
staged = None
for prefix in prefix_set:
candidate = self.doc_string.partition(prefix)[2]
prior_candidate = self.doc_string.partition(prefix)[0]
if candidate:
- staged = candidate if any(call_type in candidate for call_type in DocStringParserConstants.staged_call_types) else None
+ staged = candidate if any(call_type in candidate for call_type in PREFIXES["staged_call_types"]) else None
break
return candidate, prior_candidate, staged
- def parse(self) -> DocParseData:
- candidate, prior_candidate, staged = self.doc_match(DocStringParserConstants.pipe_prefixes)
+ def parse(self) -> DocParseData | None:
+ candidate, prior_candidate, staged = self.doc_match(PREFIXES["pipe_prefixes"])
if candidate:
pipe_class, pipe_repo = self._extract_class_and_repo(
segment=candidate,
- call_types=DocStringParserConstants.call_types,
+ call_types=PREFIXES["call_types"],
prior_text=prior_candidate,
)
motion_adapter = "motion_adapter" in candidate or "adapter" in candidate
if motion_adapter and pipe_repo:
- staged, prior_candidate, _ = self.doc_match(DocStringParserConstants.pipe_prefixes[2:]) # skip the adapter statements
+ staged, prior_candidate, _ = self.doc_match(PREFIXES["pipe_prefixes"][2:]) # skip the adapter statements
staged_class, staged_repo = (
self._extract_class_and_repo(
segment=staged,
- call_types=DocStringParserConstants.staged_call_types if not motion_adapter else DocStringParserConstants.call_types,
+ call_types=PREFIXES["staged_call_types"] if not motion_adapter else PREFIXES["call_types"],
prior_text=prior_candidate,
prior_class=pipe_class,
)
@@ -119,17 +120,17 @@ def _extract_class_and_repo(
repo_segment = segment.partition(call_type)[2].partition(")")[0]
pipe_repo = repo_segment.replace("...", "").partition('",')[0].strip('" ')
if not DocStringValidator.is_valid_repo_path(pipe_repo):
- for reference in DocStringParserConstants.repo_variables:
+ for reference in PREFIXES["repo_variables"]:
if reference in segment:
pipe_repo = self._resolve_variable(reference, prior_text)
- break # Not empty!! 確保解析後的路徑不為空!!
+ break # Not empty!! 确保解析的路径不是空的!!
pipe_repo = DocStringValidator.validate_repo_path(pipe_repo, segment)
return pipe_class, pipe_repo
return pipe_class, pipe_repo
def _resolve_variable(self, reference: str, prior_text: str) -> Optional[str]:
- """Try to find the variable from other lines / 嘗試從其他行中查找(例如多行定義)"""
+ """Try to find the variable from other lines / 尝试从其他行中找到它(例如,多行定义)"""
var_name = reference
search = f"{var_name} ="
@@ -152,10 +153,10 @@ def _resolve_variable(self, reference: str, prior_text: str) -> Optional[str]:
if repo_id:
return repo_id
- nfo(f"Warning: {search} not found in docstring.")
+ NFO(f"Warning: {search} not found in docstring.")
return None
-def parse_docs(doc_string: str) -> DocParseData:
+def parse_docs(doc_string: str) -> DocParseData | None:
parser = DocStringParser(doc_string=doc_string)
return parser.parse()
diff --git a/mir/generate/diffusers/guiders.py b/mir/generate/diffusers/guiders.py
new file mode 100644
index 0000000..39789af
--- /dev/null
+++ b/mir/generate/diffusers/guiders.py
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
+
+
+# def gen_guiders(mir_db: MIRDatabase): # upstream not quite ready for this yet
+# from nnll.metadata.helpers import snake_caseify
+# from diffusers.guider import GuiderType
+
+# guider_type = GuiderType
+# for comp_name in guider_type.items():
+# class_obj = comp_name.__name__
+# mir_data = {"pkg": {0: {"diffusers": class_obj}}}
+# try:
+# mir_db.add(
+# mir_entry(
+# domain="ops",
+# arch="noise_prediction",
+# series="guider",
+# comp=snake_caseify(class_obj),
+# **mir_data,
+# ),
+# )
+# except IndexError as error_log:
+# nfo(f"Failed to create compatibility: {class_obj}")
+# dbug(error_log)
+
+
+# (
+# "info.unet",
+# "stable-cascade",
+# {
+# "combined": {
+# "pkg": {
+# 0: { # decoder=decoder_unet
+# "precision": "ops.precision.bfloat.B16",
+# "generation": {
+# "negative_prompt": "",
+# "num_inference_steps": 20,
+# "guidance_scale": 4.0,
+# "num_images_per_prompt": 1,
+# "width": 1024,
+# "height": 1024,
+# },
+# },
+# "pkg_alt": {
+# 0: {
+# "diffusers": {
+# "StableCascadeCombinedPipeline": {
+# "negative_prompt": "",
+# "num_inference_steps": 10,
+# "prior_num_inference_steps": 20,
+# "prior_guidance_scale": 3.0,
+# }
+# },
+# }
+# },
+# }
+# }
+# },
+# ),
+
diff --git a/mir/generate/diffusers/index.py b/mir/generate/diffusers/index.py
new file mode 100644
index 0000000..06628e8
--- /dev/null
+++ b/mir/generate/diffusers/index.py
@@ -0,0 +1,233 @@
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
+
+import os
+from importlib import import_module
+from typing import Any, Generator
+
+from mir import DBUQ, NFO
+from mir.data import EXCLUSIONS
+from mir.generate.diffusers import GET_TASK_CLASS, IMPORT_STRUCTURE, SUPPORTED_TASKS_MAPPINGS, DocParseData, DocStringEntry
+from mir.generate.diffusers.doc_parse import parse_docs
+from mir.generate.from_module import import_object_named, show_init_fields_for, to_domain_tag
+from mir.generate.indexers import migrations
+from mir.tag import tag_model_from_repo
+
+
+def retrieve_diffusers_docstrings(
+ package_name: str,
+ file_names: list[str],
+) -> Generator[DocStringEntry]:
+ """Yield (pkg, file, EXAMPLE_DOC_STRING) from a folder or a single file.\n
+ :param pkg_name: Package under ``diffusers.pipelines``.\n
+ :param file_names: A list of related file names.\n
+ :param use_folder: True → treat ``source`` as a folder with ``_import_structure``.\n
+ :return: DocString Entry class.\n
+ """
+
+ module_location: str | None = import_module("diffusers.pipelines").__file__
+ module_path = os.path.dirname(module_location)
+
+ for file_name in file_names:
+ assert isinstance(file_name, str), f"Expected path to be string, got {file_name} type {type(file_name)}"
+ if file_name == "pipeline_stable_diffusion_xl_inpaint":
+ continue
+
+ pkg_path = f"diffusers.pipelines.{package_name}.{file_name}"
+ DBUQ(pkg_path)
+
+ if os.path.exists(os.path.join(module_path, package_name, f"{file_name}.py")):
+ pipe_file = import_object_named(file_name, pkg_path) or import_module(pkg_path) or NFO(f"Failed to import {pkg_path}")
+ if doc_string := getattr(pipe_file, "EXAMPLE_DOC_STRING", None):
+ yield DocStringEntry(package_name=package_name, file_name=file_name, pipe_module=pipe_file, doc_string=doc_string)
+ else:
+ NFO(f"Doc string attribute missing for {package_name}/{file_name}")
+ else:
+ NFO(f"Path not found for {package_name}/{file_name}")
+
+ return
+
+
+def create_pipe_entry(repo_path: str, class_name: str, model_class_obj: Callable | None = None) -> tuple[str, dict[str, dict[Any, Any]]]:
+ """Create a pipeline article and generate corresponding information according to the provided repo path and pipeline category\n
+ :param repo_path (str): Repository path.
+ :param model_class_obj (str): The model class function
+ :raises TypeError: If 'repo_path' or 'class_name' are not set.
+ :return: Tuple: The data structure containing mir_series and mir_comp is used for subsequent processing.
+ """
+ import diffusers # pyright: ignore[reportMissingImports] # pylint:disable=redefined-outer-name
+
+ control_net = ["Control", "Controlnet"] #
+ mir_prefix = "info"
+ if hasattr(diffusers, class_name):
+ model_class_obj = getattr(diffusers, class_name)
+ sub_segments = show_init_fields_for(model_class_obj, "diffusers")
+ decoder = "decoder" in sub_segments
+ if repo_path in ["kandinsky-community/kandinsky-3"]:
+ mir_prefix = "info.unet"
+ if repo_path in ["openai/shap-e"]:
+ mir_prefix = "info.unet"
+ class_name = "ShapEPipeline"
+ elif class_name == "MotionAdapter":
+ mir_prefix = "info.lora"
+ elif class_name == "WanPipeline":
+ mir_prefix = "info.dit"
+ elif class_name == "CogVideoXVideoToVideoPipeline":
+ class_name = "CogVideoXPipeline"
+ elif any(maybe for maybe in control_net if maybe.lower() in class_name.lower()):
+ mir_prefix = "info.controlnet"
+ else:
+ mir_prefix = to_domain_tag(**sub_segments)
+ if mir_prefix is None and class_name not in ["AutoPipelineForImage2Image", "DiffusionPipeline"]:
+ NFO(f"Failed to detect type for {class_name} {list(sub_segments)}\n")
+ else:
+ mir_prefix = "info." + mir_prefix
+ if class_name == "StableDiffusion3InpaintPipeline" or repo_path in ["stabilityai/stable-diffusion-3-medium-diffusers"]:
+ class_name = "StableDiffusion3Pipeline"
+ repo_path = "stabilityai/stable-diffusion-3.5-medium"
+ if class_name == "HunyuanVideoFramepackPipeline" or repo_path in ["hunyuanvideo-community/HunyuanVideo"]:
+ class_name = "HunyuanVideoPipeline"
+ mir_series, mir_comp = list(tag_model_from_repo(repo_path, decoder))
+ mir_series = mir_prefix + "." + mir_series
+ repo_path = migrations(repo_path)
+ # modalities = add_mode_types(mir_tag=[mir_series, mir_comp])
+ prefixed_data = {
+ "repo": repo_path,
+ "pkg": {0: {"diffusers": class_name}},
+ # "mode": modalities.get("mode"),
+ }
+ return mir_series, {mir_comp: prefixed_data}
+
+
+def tag_pipe(repo_path: str, class_name: str, addendum: dict) -> tuple:
+ """Convert model repo pipes to MIR tags, classifying by feature\n
+ :param name: Repo path
+ :param class_name: The HF Diffusers class for the model
+ :return: A segmented MIR tag useful for appending index entries"""
+ mir_series, mir_data = create_pipe_entry(repo_path=repo_path, class_name=class_name)
+ mir_prefix, mir_series = mir_series.rsplit(".", 1)
+ mir_comp = list(mir_data)[0]
+ return mir_prefix, mir_series, {mir_comp: addendum}
+
+
+def find_diffusers_docstrings() -> Generator[list[DocStringEntry]]:
+ """Pull down docstrings from 🤗Diffusers pipelines, minimizing internet requests\n
+ :return: Docstrings for common diffusers models"""
+ import diffusers.pipelines as diffusers_pipelines
+
+ docstring_patterns = EXCLUSIONS
+ exclusion_list = docstring_patterns["exclusion_list"]
+ uncommon_naming = docstring_patterns["uncommon_naming"]
+ for pipe_name in IMPORT_STRUCTURE.keys():
+ if pipe_name not in exclusion_list:
+ file_specific = uncommon_naming.get(pipe_name, pipe_name)
+ if import_name := getattr(diffusers_pipelines, str(pipe_name)):
+ file_names = list(getattr(import_name, "_import_structure", {}).keys()) or [f"pipeline_{file_specific}"]
+ yield list(retrieve_diffusers_docstrings(pipe_name, file_names))
+ else:
+ continue
+
+
+def show_diffusers_tasks(code_name: str, class_name: str | None = None) -> list[str]:
+ """Return Diffusers task pipes based on package-specific query\n
+ :param class_name: To find task pipes from a Diffusers class pipe, defaults to None
+ :param code_name: To find task pipes from a Transformers class pipe, defaults to None
+ :return: A list of alternate class pipelines derived from the specified class"""
+
+ alt_tasks = set()
+ for task_map in SUPPORTED_TASKS_MAPPINGS:
+ task_class = GET_TASK_CLASS(task_map, class_name, False)
+ if task_class:
+ alt_tasks.add(task_class.__name__)
+ DBUQ(task_class)
+ for model_code, pipe_class_obj in task_map.items():
+ if code_name in model_code:
+ alt_tasks.add(pipe_class_obj.__name__)
+
+ return list(alt_tasks)
+
+
+def diffusers_index() -> dict[str, dict[str, dict[str, Any]]]:
+ """Generate diffusion model data for MIR index\n
+ :return: Dictionary ready to be applied to MIR data fields
+ """
+ special_repos = {
+ "black-forest-labs/FLUX.1-schnell": "black-forest-labs/FLUX.1-dev",
+ # "stabilityai/stable-diffusion-3-medium-diffusers": "stabilityai/stable-diffusion-3.5-medium",
+ }
+ special_classes = {
+ # "StableDiffusion3Pipeline": "stabilityai/stable-diffusion-3.5-medium", # NOT sd3
+ "HunyuanDiTPipeline": "tencent-hunyuan/hunyuandiT-v1.2-diffusers", # NOT hyd .ckpt
+ "ChromaPipeline": "lodestones/Chroma",
+ }
+
+ extracted_docstrings = find_diffusers_docstrings()
+ model_info = [extract for pipeline in extracted_docstrings for extract in pipeline]
+ pipe_data = {} # pipeline_stable_diffusion_xl_inpaint
+
+ for extracted in model_info:
+ parsed_data: DocParseData = parse_docs(extracted.doc_string)
+ if parsed_data is None:
+ print(f"Doc string not found in '{extracted.package_name}' in {extracted.file_name}")
+ continue
+ for class_name, swap_repo in special_classes.items():
+ if parsed_data.pipe_class == class_name:
+ parsed_data.pipe_repo = swap_repo
+ break
+ model_class_obj = import_object_named(parsed_data.pipe_class, extracted.pipe_module.__name__)
+ if not model_class_obj:
+ continue
+ try:
+ series, comp_data = create_pipe_entry(parsed_data.pipe_repo, parsed_data.pipe_class)
+ except TypeError:
+ pass # Attempt 1
+ if pipe_data.get(series):
+ if "img2img" in parsed_data.pipe_class.lower():
+ continue
+ pipe_data.setdefault(series, {}).update(comp_data)
+ special_conditions = special_repos | special_classes
+ if parsed_data.staged_class or parsed_data.pipe_repo in list(special_conditions):
+ test = special_conditions.get(parsed_data.pipe_repo)
+ if test:
+ staged_repo = test
+ parsed_data.staged_class = parsed_data.pipe_class
+ try:
+ series, comp_data = create_pipe_entry(
+ staged_repo if parsed_data.staged_repo else parsed_data.pipe_repo,
+ parsed_data.staged_class #
+ if parsed_data.staged_class
+ else parsed_data.pipe_class,
+ )
+ except TypeError as error_log:
+ NFO(series, comp_data)
+ NFO(error_log)
+ continue # Attempt 2,
+ pipe_data.setdefault(series, {}).update(comp_data)
+ return dict(pipe_data)
+
+
+# def pull_weight_map(repo_id: str, arch: str) -> Dict[str, str]:
+# from nnll.download.hub_cache import download_hub_file
+
+# model_file = download_hub_file(
+# repo_id=f"{repo_id}/tree/main/{arch}",
+# source="huggingface",
+# file_name="diffusion_pytorch_model.safetensors.index.json",
+# local_dir=".tmp",
+# )
+
+
+# @MODE_DATA.decorator
+# def add_mode_types(mir_tag: list[str], data: dict | None = None) -> dict[str, list[str] | str]:
+# """_summary_\n
+# :param mir_tag: _description_
+# :param data: _description_, defaults to None
+# :return: _description_"""
+# fused_tag = ".".join(mir_tag)
+
+# mir_details = {
+# "mode": data.get(fused_tag, {}).get("pipeline_tag"),
+# "pkg_type": data.get(fused_tag, {}).get("library_type"),
+# "tags": data.get(fused_tag, {}).get("tags"),
+# }
+# return mir_details
diff --git a/mir/generate/diffusers/schedulers.py b/mir/generate/diffusers/schedulers.py
new file mode 100644
index 0000000..e415427
--- /dev/null
+++ b/mir/generate/diffusers/schedulers.py
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
+
+import re
+from importlib import import_module
+
+from mir.generate.diffusers import IMPORT_STRUCTURE
+from mir.maid import MIRDatabase
+from mir.spec import mir_entry
+
+
+def tag_scheduler(series_name: str) -> tuple[str, str]:
+ """Create a mir label from a scheduler operation\n
+ :param class_name: Known period-separated prefix and model type
+ :return: The assembled mir tag with compatibility pre-separated"""
+
+ comp_name = None
+ patterns = [r"Schedulers", r"Multistep", r"Solver", r"Discrete", r"Scheduler"]
+ for scheduler in patterns:
+ compiled = re.compile(scheduler)
+ match = re.search(compiled, series_name)
+ if match:
+ comp_name = match.group()
+ comp_name = comp_name.lower()
+ break
+ for pattern in patterns:
+ series_name = re.sub(pattern, "", series_name)
+ series_name.lower()
+ assert series_name is not None, "Expected series tag but got None"
+ assert comp_name is not None, "Expected compatibility tag but got None"
+ return series_name, comp_name
+
+
+def add_schedulers(mir_db: MIRDatabase):
+ """Create mir info database"""
+
+ for class_name in IMPORT_STRUCTURE["schedulers"]:
+ if class_name != "SchedulerMixin":
+ series_name, comp_name = tag_scheduler(class_name)
+ class_obj = import_module("diffusers.schedulers")
+ class_path = getattr(class_obj, class_name).__module__
+ mir_db.add(
+ mir_entry(
+ domain="ops",
+ arch="scheduler",
+ series=series_name,
+ comp=comp_name.lower(),
+ pkg={
+ 0: {
+ "diffusers": class_name,
+ "module_path": class_path,
+ },
+ },
+ )
+ )
+
+ class_name = "KarrasDiffusionSchedulers"
+ series_name, comp_name = tag_scheduler(class_name)
+ class_obj = import_module("diffusers.schedulers.scheduling_utils")
+ class_path = getattr(class_obj, class_name).__module__
+ mir_db.add(
+ mir_entry(
+ domain="ops",
+ arch="scheduler",
+ series=series_name,
+ comp=comp_name,
+ pkg={
+ 0: {
+ "diffusers": class_name,
+ "module_path": class_path,
+ },
+ },
+ ),
+ )
diff --git a/mir/generate/from_module.py b/mir/generate/from_module.py
new file mode 100644
index 0000000..c85ec70
--- /dev/null
+++ b/mir/generate/from_module.py
@@ -0,0 +1,125 @@
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
+
+# 模块发现和解构
+
+import inspect
+import os
+from importlib import import_module
+from typing import Callable, Type
+
+from mir import NFO
+from mir.generate import REGEX
+from mir.generate.diffusers import IMPORT_STRUCTURE
+from mir.generate.transformers import MODEL_MAPPING_NAMES
+
+
+def import_object_named(module: str, pkg_name_or_abs_path: str) -> Callable | None:
+ """Convert two strings into a callable function or property\n
+ :param module: The name of the module to import
+ :param library_path: Base package for the module
+ :return: The callable attribute or property
+ """
+
+ module_normalized: str = module.strip()
+ library = pkg_name_or_abs_path.strip()
+ try:
+ base_library = import_module(library, module_normalized)
+ except SyntaxError:
+ base_library = None
+ NFO(f"Syntax error attempting to import {module_normalized}")
+ else:
+ module_obj = getattr(base_library, module_normalized)
+ return module_obj
+ return None
+
+
+def show_init_fields_for(module: Callable | str, package_name: str | None = None, erase: bool = False) -> dict[str, list[str]]:
+ """Pick apart a Diffusers or Transformers pipeline class and find its constituent parts\n
+ :param module: Origin pipeline as a class or as a string
+ :param library: name of a library to import the class from, only if a string is provided
+ :return: Dictionary of sub-classes from the `module`"""
+
+ if package_name and isinstance(module, str):
+ module_obj: Callable | None = import_object_named(module, package_name)
+ else:
+ assert isinstance(module, Callable), f"Expected Callable module object, got {module} type {type(module)}"
+ module_obj = module
+ assert isinstance(module_obj, Callable), f"Expected Callable module object, got {module} type {type(module)}"
+ signature = inspect.signature(module_obj.__init__)
+ editable_signature = signature.parameters.copy()
+ editable_signature.pop("self", None)
+ editable_signature.pop("kwargs", None)
+ editable_signature.pop("use_cache", None)
+ class_names = {}
+ if erase:
+ for folder, param in editable_signature.items():
+ class_names.setdefault(folder, True)
+ else:
+ for folder, param in editable_signature.items():
+ class_names.setdefault(folder, str(param))
+ class_names = dict(class_names)
+
+ return class_names
+
+
+def show_path_for(code_name: str, pkg_name: str) -> list[str] | str | None:
+ """Retrieve the folder path within a class. Only returns if it is a valid path in the system\n
+ ### NOTE: in most cases `__module__` makes this redundant
+ :param code_name: The internal name for the model in the third-party API.
+ :param pkg_name: The API Package
+ :return: A list corresponding to the path of the model, or None if not found
+ :raises KeyError: for invalid pkg_name
+ """
+
+ pkg_paths = {
+ "diffusers": "pipelines",
+ "transformers": "models",
+ }
+ folder_name = code_name.replace("-", "_")
+ pkg_name = pkg_name.lower()
+ folder_path = pkg_paths[pkg_name]
+ package_obj = import_module(pkg_name)
+ folder_path_named = [folder_path, folder_name]
+ pkg_folder = os.path.dirname(getattr(package_obj, "__file__"))
+ # dbuq(os.path.exists(os.path.join(pkg_folder, *folder_path_named)))
+ if os.path.exists(os.path.join(pkg_folder, *folder_path_named)) is True:
+ import_path = [pkg_name]
+ import_path.extend(folder_path_named)
+ return import_path
+
+
+def get_internal_name_for(module_name: str | Type | None = None, pkg_name: str = "transformers", path_format: bool | None = False) -> list[str] | str | None:
+ """Reveal code names for class names from Diffusers or Transformers (formerly get code names)\n
+ :param class_name: To return only one class, defaults to None
+ :param pkg_name: optional field for library, defaults to "transformers"
+ :param path_format: Retrieve just the code name, or the full module path and code name within the package
+ :return: A list of all code names, or the one corresponding to the provided class"""
+
+ package_imports = IMPORT_STRUCTURE if pkg_name == "diffusers" else MODEL_MAPPING_NAMES
+ pkg_name = pkg_name.lower()
+ MAPPING_NAMES: dict[str, str] = import_object_named(*package_imports[pkg_name])
+ if module_name:
+ if isinstance(module_name, Type):
+ module_name = module_name.__name__
+ code_name = next(iter(key for key, value in MAPPING_NAMES.items() if module_name in str(value)), "")
+ return show_path_for(code_name, pkg_name) if path_format else code_name.replace("_", "-")
+ return list(MAPPING_NAMES)
+
+
+def to_domain_tag(transformers: bool = False, **kwargs):
+ """Set type of MIR prefix depending on model type\n
+ :param transformers: Use transformers data instead of diffusers data, defaults to False
+ :raises ValueError: Model type not detected
+ :return: MIR prefix based on model configuration"""
+
+ data = REGEX
+
+ if transformers:
+ flags = data["arch"]["transformer"] # pylint:disable=unsubscriptable-object
+ else:
+ flags = data["arch"]["diffuser"] # pylint:disable=unsubscriptable-object
+ for mir_prefix, key_match in flags.items():
+ if any(kwargs.get(param, None) for param in key_match):
+ return mir_prefix
+ return None
diff --git a/mir/generate/indexers.py b/mir/generate/indexers.py
new file mode 100644
index 0000000..8ef00f3
--- /dev/null
+++ b/mir/generate/indexers.py
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
+
+"""類發現和拆卸"""
+# pylint:disable=no-name-in-module
+
+from mir.generate import MIGRATIONS
+
+
+def migrations(repo_path: str):
+ """Replaces old organization names in repository paths with new ones.\n
+ :param repo_path: Original repository path containing old organization names
+ :return: Updated repository path with new organization names"""
+
+ repo_migrations = MIGRATIONS
+ for old_name, new_name in repo_migrations.items():
+ if old_name in repo_path:
+ repo_path = repo_path.replace(old_name, new_name)
+ return repo_path
diff --git a/mir/inspect/__init__.py b/mir/generate/mlx/__init__.py
similarity index 100%
rename from mir/inspect/__init__.py
rename to mir/generate/mlx/__init__.py
diff --git a/mir/generate/mlx/index.py b/mir/generate/mlx/index.py
new file mode 100644
index 0000000..31f735e
--- /dev/null
+++ b/mir/generate/mlx/index.py
@@ -0,0 +1,103 @@
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
+import os
+import re
+
+# def tag_mlx_model(repo_path: str, class_name: str, addendum: dict) -> tuple[str]:
+# dev_series, dev_comp = make_mir_tag("black-forest-labs/FLUX.1-dev")
+# schnell_series, schnell_comp = make_mir_tag("black-forest-labs/FLUX.1-schnell")
+# series, comp = make_mir_tag(repo_path)
+# if class_name == "Flux1":
+# mir_prefix = "info.dit"
+# base_series = dev_series
+# mir_comp = series
+# return mir_prefix, base_series, {base_comp: addendum}
+
+
+def mlx_repo_capture(base_repo: str = "mlx-community"):
+ try:
+ import mlx_audio # type: ignore
+ except ImportError:
+ return {}
+ result = {}
+ result_2 = {}
+ folder_path_named: str = os.path.dirname(mlx_audio.__file__)
+ for root, dir, file_names in os.walk(folder_path_named):
+ for file in file_names:
+ if file.endswith((".py", ".html", ".md", ".ts")):
+ with open(os.path.join(root, file), "r") as open_file:
+ content = open_file.read()
+ if "mlx-community/" in content:
+ matches = re.findall(base_repo + r'/(.*?)"', content)
+ for match in matches:
+ result[match] = f"{base_repo}/{match}"
+ previous_data = content[content.index(match) - 75 : content.index(match)].replace(base_repo, "")
+ class_match = re.findall(r"(\w+)\.from_pretrained", previous_data, re.MULTILINE)
+ if class_match:
+ result_2[match] = {f"{base_repo}/{match}": [*class_match]}
+ else:
+ if os.path.basename(root) in ["tts", "sts"]:
+ folder_name = match.partition("-")[0]
+ file_path = os.path.join(root, "models", folder_name, folder_name + ".py")
+ if os.path.exists(file_path):
+ with open(file_path, "r") as model_file:
+ read_data = model_file.read() # type: ignore # noqa
+ class_match = re.findall(r"(\w+)\.from_pretrained", previous_data, re.MULTILINE)
+
+ return result_2
+
+
+# def mlx_repo_capture(base_repo: str = "mlx-community"):
+# import os
+# import re
+# import mlx_audio
+
+# result = {}
+# result_2 = {}
+# folder_path_named: str = os.path.dirname(mlx_audio.__file__)
+# for root, _, file_names in os.walk(folder_path_named):
+# for file in file_names:
+# if file.endswith((".py", ".html", ".md", ".ts")):
+# with open(os.path.join(root, file), "r") as open_file:
+# content = open_file.read()
+# if "mlx-community/" in content:
+# matches = re.findall(base_repo + r'/(.*?)"', content)
+# for match in matches:
+# print(file)
+# result[match] = f"{base_repo}/{match}"
+# previous_data = content[content.index(match) - 75 : content.index(match)].replace(base_repo, "")
+# matches = re.findall(r"(\w+)\.from_pretrained", previous_data, re.MULTILINE)
+# if matches:
+# result_2[match] = {f"{base_repo}/{match}": [*matches]}
+# else:
+# result_2[match] = {f"{base_repo}/{match}": None}
+# return result_2
+
+
+# def mlx_audio_scrape(base_repo: str = "mlx-community"):
+# import os
+# import re
+# import mlx_audio
+
+# result = {}
+# result_2 = {}
+# folder_path_named: str = os.path.dirname(mlx_audio.__file__)
+# for root, _, file_names in os.walk(folder_path_named):
+# for file in file_names:
+# if file.endswith((".py",)):
+# with open(os.path.join(root, file), "r") as open_file:
+# content = open_file.read()
+# if "mlx-community/" in content:
+# matches = re.findall(base_repo + r'/(.*?)"', content)
+# for match in matches:
+# result[match] = f"{base_repo}/{match}"
+# previous_data = content[content.index(match) - 75 : content.index(match)].replace(base_repo, "")
+# matches = re.findall(r"(\w+)\.from_pretrained", previous_data, re.MULTILINE)
+# if len(matches) > 1:
+# result_2[match] = {f"{base_repo}/{match}": [*matches]}
+# else:
+# if "nn.Module" in content:
+# previous_data = content[content.rindex("nn.Module") - 50 : content.rindex("nn.Module")]
+# matches = re.search(r"(\w+)\.", previous_data, re.MULTILINE)
+# result_2[match] = {f"{base_repo}/{match}": [*matches]}
+# return result_2
diff --git a/mir/inspect/tasks.py b/mir/generate/tasks.py
similarity index 55%
rename from mir/inspect/tasks.py
rename to mir/generate/tasks.py
index 3356ef5..1e28e2e 100644
--- a/mir/inspect/tasks.py
+++ b/mir/generate/tasks.py
@@ -1,10 +1,14 @@
-# # #
-# # #
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
-from typing import Any, Callable, Dict, List, get_type_hints
-from mir.maid import MIRDatabase
-from mir.config.console import dbuq
+from typing import Any, Callable, List, get_type_hints
+from mir.generate.from_module import get_internal_name_for, import_object_named
+from mir.generate.transformers.index import show_transformers_tasks
+from mir.maid import MIRDatabase
+from mir.generate.diffusers.index import show_diffusers_tasks
+from mir.generate.diffusers.schedulers import tag_scheduler
+from mir import DBUQ
flatten_map: List[Any] = lambda nested, unpack: [element for iterative in getattr(nested, unpack)() for element in iterative]
flatten_map.__annotations__ = {"nested": List[str], "unpack": str}
@@ -25,64 +29,6 @@ def __init__(self) -> None:
self.skip_types = ["int", "bool", "float", "Optional", "NoneType", "List", "UNet2DConditionModel"]
self.mflux_tasks = ["Image", "Redux", "Kontext", "Depth", "Fill", "ConceptAttention", "ControlNet", "CavTon", "IC-Edit"]
- @staticmethod
- def show_diffusers_tasks(code_name: str, class_name: str | None = None) -> list[str]:
- """Return Diffusers task pipes based on package-specific query\n
- :param class_name: To find task pipes from a Diffusers class pipe, defaults to None
- :param code_name: To find task pipes from a Transformers class pipe, defaults to None
- :return: A list of alternate class pipelines derived from the specified class"""
-
- if class_name:
- from diffusers.pipelines.auto_pipeline import SUPPORTED_TASKS_MAPPINGS, _get_task_class
-
- alt_tasks = set()
- for task_map in SUPPORTED_TASKS_MAPPINGS:
- task_class = _get_task_class(task_map, class_name, False)
- if task_class:
- alt_tasks.add(task_class.__name__)
- dbuq(task_class)
- for model_code, pipe_class_obj in task_map.items():
- if code_name in model_code:
- alt_tasks.add(pipe_class_obj.__name__)
-
- return list(alt_tasks)
-
- @staticmethod
- def show_transformers_tasks(class_name: str | None = None, code_name: str | None = None) -> list[str]:
- """Retrieves a list of task classes associated with a specified transformer class.\n
- :param class_name: The name of the transformer class to inspect.
- :param pkg_type: The dependency for the module
- :param alt_method: Use an alternate method to return the classes
- :return: A list of task classes associated with the specified transformer."""
-
- task_classes = None
-
- if not code_name:
- from mir.config.conversion import import_submodules
-
- class_obj: Callable = import_submodules(class_name, "transformers")
- class_module: Callable = import_submodules(*class_obj.__module__.split(".", 1)[-1:], class_obj.__module__.split(".", 1)[0])
- if class_module and class_module.__name__ != "DummyPipe":
- task_classes = getattr(class_module, "__all__")
- else:
- return None
- elif code_name:
- from mir.config.constants import mapped_cls
- from httpx import HTTPStatusError
-
- try:
- model_class = mapped_cls(code_name)
- if model_class is not None:
- # Convert class type to list containing the class name string
- task_classes = [model_class.__name__]
- else:
- return None
- except (OSError, HTTPStatusError) as e:
- dbuq(f"Error mapping class {code_name}: {e}")
- return None
-
- return task_classes
-
async def detect_tasks(self, mir_db: MIRDatabase, field_name: str = "pkg") -> dict:
"""Detects and traces tasks MIR data\n
:param mir_db:: An instance of MIRDatabase containing the database of information.
@@ -120,7 +66,6 @@ async def detect_pipes(self, mir_db: MIRDatabase, field_name: str = "pkg") -> di
:type field_name: str, optional
:return:A dictionary mapping series names to their respective compatibility and traced tasks.
:rtype: dict"""
- from mir.config.conversion import import_submodules
data_tuple = []
for series, compatibility_data in mir_db.database.items():
@@ -134,8 +79,8 @@ async def detect_pipes(self, mir_db: MIRDatabase, field_name: str = "pkg") -> di
for _, pkg_tree in field_data[field_name].items():
if pkg_tree and next(iter(pkg_tree)) == "diffusers":
module_name = pkg_tree[next(iter(pkg_tree))]
- dbuq(f"{module_name} pipe originator")
- class_obj = import_submodules(module_name, "diffusers")
+ DBUQ(f"{module_name} pipe originator")
+ class_obj = import_object_named(module_name, "diffusers")
pipe_args = get_type_hints(class_obj.__init__)
detected_pipe = await self.hyperlink_to_mir(pipe_args, series, mir_db)
data_tuple.append((*series.rsplit(".", 1), {compatibility: detected_pipe}))
@@ -157,7 +102,7 @@ async def hyperlink_to_mir(self, pipe_args: dict, series: str, mir_db: MIRDataba
if not any(segment for segment in self.skip_types if pipe_class.__name__ == segment):
mir_tag = None
detected_links["pipe_names"][pipe_role] = []
- dbuq(f"pipe_class.__name__ {pipe_class.__name__} {pipe_class}")
+ DBUQ(f"pipe_class.__name__ {pipe_class.__name__} {pipe_class}")
if pipe_class.__name__ in ["Union"]:
for union_class in pipe_class.__args__:
mir_tag = None
@@ -182,8 +127,6 @@ async def tag_class(self, pipe_class: Callable, pipe_role: str, series: str, mir
:param mir_db: MIRDatabase instance for querying tags/IDs
:return: Tuple containing MIR tag and class name"""
- from mir.tag import tag_scheduler
-
mir_tag = None
class_name = pipe_class.__name__
if pipe_role in ["scheduler", "image_noising_scheduler", "prior_scheduler"]:
@@ -192,18 +135,18 @@ async def tag_class(self, pipe_class: Callable, pipe_role: str, series: str, mir
mir_tag = [f"ops.scheduler.{scheduler_series}", scheduler_comp]
if not mir_db.database.get(mir_tag[0], {}).get(mir_tag[1]):
mir_tag = mir_db.find_tag(field="pkg", target=class_name, sub_field=sub_field, domain="ops.scheduler")
- dbuq(f"scheduler {mir_tag} {class_name} {sub_field} ")
+ DBUQ(f"scheduler {mir_tag} {class_name} {sub_field} ")
elif pipe_role == "vae":
sub_field = pipe_class.__module__.split(".")[0]
mir_comp = series.rsplit(".", 1)[-1]
- dbuq(mir_comp)
+ DBUQ(mir_comp)
mir_tag = [mir_id for mir_id, comp_data in mir_db.database.items() if "info.vae" in mir_id and next(iter(comp_data)) == mir_comp]
if mir_tag:
mir_tag.append(mir_comp) # keep mir tag as single list
elif class_name != "AutoencoderKL":
- dbuq(pipe_class)
+ DBUQ(pipe_class)
mir_tag = mir_db.find_tag(field="pkg", target=class_name, sub_field=sub_field, domain="info.vae")
- dbuq(f"vae {mir_tag} {class_name} {sub_field} ")
+ DBUQ(f"vae {mir_tag} {class_name} {sub_field} ")
else:
mir_tag = mir_db.find_tag(field="tasks", target=class_name)
return mir_tag, class_name
@@ -213,119 +156,24 @@ async def trace_tasks(self, pkg_tree: dict[str, str | int | list[str | int]]) ->
:param entry: The object containing the model information.
:return: A sorted list of tasks applicable to the model."""
- from mir.inspect.classes import resolve_code_names
-
preformatted_task_data = None
filtered_tasks = None
snip_words: set[str] = {"load_tf_weights_in"}
package_name = next(iter(pkg_tree))
- dbuq(pkg_tree)
+ DBUQ(pkg_tree)
class_name = pkg_tree[package_name]
- dbuq(f"{package_name}, {class_name}")
+ DBUQ(f"{package_name}, {class_name}")
if class_name not in self.skip_auto:
if isinstance(class_name, dict):
class_name = next(iter(list(class_name)))
if package_name == "transformers":
- preformatted_task_data = self.show_transformers_tasks(class_name=class_name)
+ preformatted_task_data = show_transformers_tasks(class_name=class_name)
elif package_name == "diffusers":
- code_name = resolve_code_names(class_name, package_name)
- preformatted_task_data = self.show_diffusers_tasks(code_name=code_name, class_name=class_name)
+ code_name = get_internal_name_for(class_name, package_name)
+ preformatted_task_data = show_diffusers_tasks(code_name=code_name, class_name=class_name)
preformatted_task_data.sort()
elif package_name == "mflux":
preformatted_task_data = self.mflux_tasks
if preformatted_task_data:
filtered_tasks = [task for task in preformatted_task_data for snip in snip_words if snip not in task]
return filtered_tasks # package_name, class_name
-
-
-def trace_classes(pipe_class: str, pkg_name: str) -> Dict[str, List[str]]:
- """Retrieve all compatible pipe forms\n
- NOTE: Mainly for Diffusers
- :param pipe_class: Origin pipe
- :param pkg_name: Dependency package
- :return: A dictionary of pipelines"""
- from mir.inspect.classes import resolve_class_name, extract_inherited
- from mir.config.conversion import import_submodules
- from mir.inspect.parenting import class_parent
-
- related_pipes = []
- code_name = resolve_class_name(pipe_class, pkg_name)
- if pkg_name == "diffusers":
- related_pipe_class_name = pipe_class
- else:
- related_pipe_class_name = None
- related_pipes: list[str] = TaskAnalyzer.show_diffusers_tasks(code_name=code_name, class_name=related_pipe_class_name)
- # for i in range(len(auto_tasks)):
- # auto_tasks.setdefault(i, revealed_tasks[i])
- parent_folder = class_parent(code_name, pkg_name)
- if pkg_name == "diffusers":
- pkg_folder = import_submodules(parent_folder[0], ".".join(parent_folder))
- else:
- pkg_folder = import_submodules("__init__", ".".join(parent_folder[:-1]))
- if hasattr(pkg_folder, "_import_structure"):
- related_pipes.extend(next(iter(x)) for x in pkg_folder._import_structure.values())
- related_pipes = set(related_pipes)
- related_pipes.update(tuple(x) for x in extract_inherited(model_class=pipe_class, pkg_name=pkg_name))
- return related_pipes
-
-
-def main(mir_db: MIRDatabase = None):
- """Parse arguments to feed to dict header reader"""
- import argparse
- import asyncio
- from mir.automata import assimilate
- from sys import modules as sys_modules
-
- if "pytest" not in sys_modules:
- parser = argparse.ArgumentParser(
- formatter_class=argparse.RawTextHelpFormatter,
- description="Scrape the task classes from currently installed libraries and attach them to an existing MIR database.\nOffline function.",
- usage="mir-tasks",
- epilog="Can be run automatically with `python -m nnll.mir.maid` Should only be used after `mir-maid`.\n\nOutput:\n INFO ('Wrote #### lines to MIR database file.',)",
- )
- parser.parse_args()
-
- if not mir_db:
- mir_db = MIRDatabase()
-
- auto_pkg = TaskAnalyzer()
- task_tuple = asyncio.run(auto_pkg.detect_tasks(mir_db))
-
- assimilate(mir_db, [task for task in task_tuple])
-
- mir_db.write_to_disk()
- return mir_db
-
-
-def run_task():
- main()
-
-
-def pipe(mir_db: MIRDatabase = None):
- import argparse
- import asyncio
- from sys import modules as sys_modules
-
- if "pytest" not in sys_modules:
- parser = argparse.ArgumentParser(
- formatter_class=argparse.RawTextHelpFormatter,
- description="Infer pipe components from Diffusers library and attach them to an existing MIR database.\nOffline function.",
- usage="mir-pipe",
- epilog="Can be run automatically with `python -m nnll.mir.maid` Should only be used after `mir-maid`.\n\nOutput:\n INFO ('Wrote #### lines to MIR database file.',)",
- )
- parser.parse_args()
-
- from mir.automata import assimilate
-
- if not mir_db:
- mir_db = MIRDatabase()
-
- auto_pkg = TaskAnalyzer()
- pipe_tuple = asyncio.run(auto_pkg.detect_pipes(mir_db))
- assimilate(mir_db, [pipe for pipe in pipe_tuple])
- mir_db.write_to_disk()
- return mir_db
-
-
-if __name__ == "__main__":
- pipe()
diff --git a/mir/generate/torch/__init__.py b/mir/generate/torch/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/mir/generate/torch/dtypes.py b/mir/generate/torch/dtypes.py
new file mode 100644
index 0000000..08a2484
--- /dev/null
+++ b/mir/generate/torch/dtypes.py
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
+
+import re
+
+import torch
+
+from mir import DBUQ
+from mir.maid import MIRDatabase
+from mir.spec import mir_entry
+
+
+def slice_number(text: str) -> int | float | str:
+ """Separate a numeral value appended to a string\n
+ :return: Converted value as int or float, or unmodified string
+ """
+ for index, char in enumerate(text): # Traverse forwards
+ if char.isdigit():
+ numbers = text[index:]
+ if "." in numbers:
+ return float(numbers)
+ try:
+ return int(numbers)
+ except ValueError:
+ return numbers
+ return text
+
+
+def add_mir_dtype(mir_db: MIRDatabase):
+ """Create mir info database"""
+
+ available_dtypes: list[torch.dtype] = [dtype for dtype in torch.__dict__.values() if isinstance(dtype, torch.dtype)]
+ series_name = "_"
+ for precision in available_dtypes:
+ dep_name, class_name = str(precision).split(".")
+ if "_" in class_name:
+ comp_name = class_name[0].upper() + "8_" + class_name.split("_")[1].upper()
+ if comp_name.endswith("FN"):
+ comp_name = comp_name[:-2]
+ else:
+ comp_name = class_name[0].upper() + str(slice_number(class_name))
+ variant_name = class_name.replace("bfloat", "bf").replace("float", "fp")
+ DBUQ(variant_name)
+ patterns = [r"complex", r"bits", r"quint", r"uint", r"int", r"bfloat", r"float", r"bool"]
+ for precision_name in patterns:
+ compiled = re.compile(precision_name)
+ dtype = re.search(compiled, class_name)
+ if dtype:
+ series_name = dtype.group()
+ break
+
+ mir_db.add(
+ mir_entry(
+ domain="ops",
+ arch="precision",
+ series=series_name,
+ comp=comp_name,
+ pkg={0: {dep_name.lower(): {class_name.lower(): {"variant": variant_name}}}},
+ )
+ )
diff --git a/mir/generate/transformers/__init__.py b/mir/generate/transformers/__init__.py
new file mode 100644
index 0000000..cbdf6f8
--- /dev/null
+++ b/mir/generate/transformers/__init__.py
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
+
+
+from dataclasses import dataclass, field
+from typing import Callable
+
+from transformers.models.auto.configuration_auto import CONFIG_MAPPING
+from transformers.models.auto.modeling_auto import (
+ MODEL_MAPPING, # config: model map
+ MODEL_MAPPING_NAMES,
+)
+from transformers.models.auto.tokenization_auto import TOKENIZER_MAPPING_NAMES
+
+from mir.generate.from_module import show_init_fields_for
+
+
+@dataclass
+class ClassMapEntry:
+ """Represents a structured entry of the name of the class and its associated attributes."""
+
+ name: str
+ model_name: str
+ model: Callable
+ config: Callable
+ config_params: dict[str, list[str]] = field(init=False, default_factory=lambda: {})
+ model_params: dict[str, list[str]] | None = None
+
+ def __post_init__(self):
+ if self.model:
+ self.model_params = show_init_fields_for(self.model)
+ if self.config:
+ self.config_params = show_init_fields_for(self.config)
diff --git a/mir/generate/transformers/index.py b/mir/generate/transformers/index.py
new file mode 100644
index 0000000..adc8c65
--- /dev/null
+++ b/mir/generate/transformers/index.py
@@ -0,0 +1,216 @@
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
+
+
+from typing import Callable
+
+from mir import NFO, DBUQ
+from mir.data import PARAMETERS
+from mir.generate.from_module import import_object_named, to_domain_tag
+from mir.generate.indexers import migrations
+from mir.tag import tag_model_from_repo
+from mir.generate.transformers import CONFIG_MAPPING, MODEL_MAPPING, TOKENIZER_MAPPING_NAMES, ClassMapEntry
+
+
+def mapped_cls(model_identifier: str):
+ """Get model class from identifier without calling huggingface_hub.\n
+ :param model_identifier: Model identifier like "bert-base-uncased" or "gpt2"
+ :return: Model class (e.g., BertModel, GPT2Model)
+ """
+ import transformers
+ from transformers.models.auto.configuration_auto import CONFIG_MAPPING_NAMES
+ from transformers.models.auto.modeling_auto import MODEL_MAPPING, MODEL_MAPPING_NAMES
+
+ code_name = model_identifier.split("/")[-1].split("-")[0].lower()
+
+ model_class_name = MODEL_MAPPING_NAMES.get(code_name, None)
+
+ config_class_name = CONFIG_MAPPING_NAMES.get(code_name)
+ if config_class_name:
+ config_class = getattr(transformers, config_class_name, None)
+ if config_class:
+ model_class = MODEL_MAPPING.get(config_class, None)
+ if model_class:
+ if isinstance(model_class, tuple):
+ model_class = model_class[0]
+ return model_class
+
+ normalized = code_name.replace("_", "-")
+ if normalized != code_name:
+ if model_class_name := MODEL_MAPPING_NAMES.get(normalized, None):
+ if isinstance(model_class_name, tuple):
+ model_class_name = model_class_name[0]
+ return getattr(transformers, model_class_name, None)
+
+ return None
+
+
+def get_repo_from_class_map(class_map: ClassMapEntry) -> str | None:
+ """The name of the repository that is associated with a transformers configuration class\n
+ :param class_map: Transformers class information extracted from dependency
+ :returns: A string matching the repo path for the class"""
+
+ import re
+
+ doc_attempt = []
+ if hasattr(class_map.config, "forward"):
+ doc_attempt = [getattr(class_map.config, "forward")]
+ doc_attempt.append(class_map.config)
+ for pattern in doc_attempt:
+ doc_string = pattern.__doc__
+ matches = re.findall(r"\[([^\]]+)\]", doc_string)
+ if matches:
+ try:
+ repo_path = next(iter(snip.strip('"').strip() for snip in matches if "/" in snip))
+ except StopIteration as error_log:
+ NFO(f"ERROR >>{matches} : LOG >> {error_log}")
+ continue
+ return repo_path
+ return None
+
+
+def find_transformers_classes() -> list[ClassMapEntry]:
+ """Eat the 🤗Transformers classes as a treat, leaving any tasty subclass class morsels neatly arranged as a dictionary.\n
+ Nom.\n
+ :return: Tasty mapping of subclasses to their class references"""
+
+ model_data = []
+ for config_name, config_obj in CONFIG_MAPPING.items():
+ model_params = None
+ if model_obj := MODEL_MAPPING.get(config_obj, None):
+ if isinstance(model_obj, Callable):
+ model_obj = (model_obj,)
+ assert isinstance(model_obj, tuple), f"Expected model class object, got {model_obj} type {type(model_obj)}"
+ for model_class in model_obj:
+ if model_params and ("inspect" not in model_params["config"]) and ("deprecated" not in list(model_params["config"])):
+ pass
+ else:
+ model_params = None
+ model_name = model_class.__name__
+ model_data.append(
+ ClassMapEntry(
+ name=config_name,
+ model_name=model_name.split(".")[-1],
+ model=model_class, # type: ignore
+ config=config_obj,
+ ),
+ )
+ return model_data
+
+
+def mir_tag_from_config(class_map: ClassMapEntry, repo_path: str) -> tuple[str, str, str]:
+ """Change a transformers config class into a MIR series and comp\n
+ :param class_map: Transformers class information extracted from dependency
+ :param repo_path: The
+ """
+
+ mir_prefix = to_domain_tag(transformers=True, **class_map.config_params)
+ if not mir_prefix:
+ if class_map.model_params:
+ if mir_prefix := to_domain_tag(transformers=True, **class_map.model_params):
+ pass
+ else:
+ raise ValueError(f"Unable to determine MIR prefix from {class_map, repo_path}")
+ else:
+ raise ValueError(f"Unrecognized model type, no tag matched {class_map.name} with {class_map.config_params} or {class_map.model_params}")
+ mir_prefix = "info." + mir_prefix
+ if class_map.name != "funnel":
+ mir_suffix, mir_comp = tag_model_from_repo(repo_path)
+ else:
+ mir_suffix, mir_comp = ["funnel", "*"]
+ mir_series = mir_prefix + "." + mir_suffix
+ return mir_series, mir_comp, mir_suffix
+
+
+def show_transformers_tasks(class_name: str | None = None, code_name: str | None = None) -> list[str]:
+ """Retrieves a list of task classes associated with a specified transformer class.\n
+ :param class_name: The name of the transformer class to inspect.
+ :param pkg_type: The dependency for the module
+ :param alt_method: Use an alternate method to return the classes
+ :return: A list of task classes associated with the specified transformer."""
+
+ task_classes = None
+
+ if not code_name:
+ class_obj: Callable = import_object_named(class_name, "transformers")
+ class_module: Callable = import_object_named(*class_obj.__module__.split(".", 1)[-1:], class_obj.__module__.split(".", 1)[0])
+ if class_module and class_module.__name__ != "DummyPipe":
+ task_classes = getattr(class_module, "__all__")
+ else:
+ return None
+ elif code_name:
+ from httpx import HTTPStatusError
+
+ from mir.generate.transformers.index import mapped_cls
+
+ try:
+ model_class = mapped_cls(code_name)
+ if model_class is not None:
+ # Convert class type to list containing the class name string
+ task_classes = [model_class.__name__]
+ else:
+ return None
+ except (OSError, HTTPStatusError) as e:
+ DBUQ(f"Error mapping class {code_name}: {e}")
+ return None
+
+ return task_classes
+
+
+def transformers_index():
+ """Generate LLM model data for MIR index\n
+ :return: Dictionary ready to be applied to MIR data fields"""
+
+ missing_config_params = PARAMETERS
+
+ mir_data = {}
+ transformers_data: list[ClassMapEntry] = find_transformers_classes()
+ for entry in transformers_data:
+ repo_path = get_repo_from_class_map(entry)
+ if entry.name == "bert":
+ print(entry)
+ if config := missing_config_params.get(entry.name, {}):
+ entry.config_params = config.get("params", entry.config_params)
+ repo_path = config.get("repo_path", repo_path)
+ if entry.name == "bert":
+ print(entry)
+ if not repo_path:
+ raise ValueError(f"Unable to determine repo from {entry}")
+ if entry.config_params:
+ mir_series, mir_comp, mir_suffix = mir_tag_from_config(entry, repo_path)
+ # modalities = add_mode_types(mir_tag=[mir_series, mir_comp])
+
+ repo_path = migrations(repo_path)
+ tk_pkg = {}
+ tokenizer_classes = TOKENIZER_MAPPING_NAMES.get(entry.name)
+ if isinstance(tokenizer_classes, str):
+ tokenizer_classes = [tokenizer_classes]
+ # mode = modalities.get("mode")
+ if tokenizer_classes:
+ index = 0
+ for tokenizer in tokenizer_classes:
+ if tokenizer:
+ tokenizer_class = import_object_named(tokenizer, "transformers")
+ tk_pkg.setdefault(index, {"transformers": f"{tokenizer_class.__module__}.{tokenizer_class.__name__}"})
+ index += 1
+ if tk_pkg:
+ mir_data.get("info.encoder.tokenizer", mir_data.setdefault("info.encoder.tokenizer", {})).update(
+ {
+ mir_suffix: {
+ "pkg": tk_pkg,
+ }
+ },
+ )
+ mir_data.setdefault(
+ mir_series,
+ {
+ mir_comp: {
+ "repo": repo_path,
+ "pkg": {
+ 0: {"transformers": entry.model_name},
+ },
+ # "mode": mode,
+ },
+ },
+ )
+ return mir_data
diff --git a/mir/generate/write_to_mir.py b/mir/generate/write_to_mir.py
new file mode 100644
index 0000000..4976502
--- /dev/null
+++ b/mir/generate/write_to_mir.py
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
+
+
+from mir.spec import mir_entry
+from mir import NFO
+from mir.maid import MIRDatabase
+
+
+def write_to_mir(new_data: dict, mir_db: MIRDatabase) -> None:
+ """Generate MIR HF Hub model database
+ :param new_data: Data for the MIR database
+ :param mir_database: MIRDatabase instance
+ """
+ for series, comp_name in new_data.items():
+ id_segment = series.split(".")
+ for compatibility in comp_name:
+ # dbug(id_segment)
+ try:
+ mir_db.add(
+ mir_entry(
+ domain=id_segment[0],
+ arch=id_segment[1],
+ series=id_segment[2],
+ comp=compatibility,
+ **new_data[series][compatibility],
+ ),
+ )
+ except IndexError: # as error_log:
+ NFO(f"Failed to create series: {series} compatibility: {comp_name} ")
+ # dbug(error_log)
diff --git a/mir/indexers.py b/mir/indexers.py
deleted file mode 100644
index 573d877..0000000
--- a/mir/indexers.py
+++ /dev/null
@@ -1,319 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-
-"""類發現和拆卸"""
-# pylint:disable=no-name-in-module
-
-import sys
-from typing import Any, Callable
-
-from mir.config.console import nfo
-from mir.config.constants import ClassMapEntry, extract_init_parameters
-from mir.config.conversion import get_repo_from_class_map, import_submodules
-from mir.doc_parser import parse_docs, DocParseData
-from mir.tag import mir_prefix_from_forward_pass, mir_tag_from_config, tag_model_from_repo
-
-if "pytest" in sys.modules:
- import diffusers # noqa # pyright:ignore[reportMissingImports] # pylint:disable=unused-import
-
-
-def check_migrations(repo_path: str):
- """Replaces old organization names in repository paths with new ones.\n
- :param repo_path: Original repository path containing old organization names
- :return: Updated repository path with new organization names"""
- import os
-
- from mir.config.json_io import read_json_file
-
- root_folder = os.path.dirname(__file__)
- migration_file = os.path.join(os.path.join(root_folder, "spec", "repo_migrations.json"))
- repo_migrations = read_json_file(migration_file)
- for old_name, new_name in repo_migrations.items():
- if old_name in repo_path:
- repo_path = repo_path.replace(old_name, new_name)
- return repo_path
-
-
-def create_pipe_entry(repo_path: str, class_name: str, model_class_obj: Callable | None = None) -> tuple[str, dict[str, dict[Any, Any]]]:
- """Create a pipeline article and generate corresponding information according to the provided repo path and pipeline category\n
- :param repo_path (str): Repository path.
- :param model_class_obj (str): The model class function
- :raises TypeError: If 'repo_path' or 'class_name' are not set.
- :return: Tuple: The data structure containing mir_series and mir_comp is used for subsequent processing.
- """
- import diffusers # pyright: ignore[reportMissingImports] # pylint:disable=redefined-outer-name
-
- control_net = ["Control", "Controlnet"] #
- mir_prefix = "info"
- if hasattr(diffusers, class_name):
- model_class_obj = getattr(diffusers, class_name)
- sub_segments = extract_init_parameters(model_class_obj, "diffusers")
- decoder = "decoder" in sub_segments
- if repo_path in ["kandinsky-community/kandinsky-3"]:
- mir_prefix = "info.unet"
- if repo_path in ["openai/shap-e"]:
- mir_prefix = "info.unet"
- class_name = "ShapEPipeline"
- elif class_name == "MotionAdapter":
- mir_prefix = "info.lora"
- elif class_name == "WanPipeline":
- mir_prefix = "info.dit"
- elif class_name == "CogVideoXVideoToVideoPipeline":
- class_name = "CogVideoXPipeline"
- elif any(maybe for maybe in control_net if maybe.lower() in class_name.lower()):
- mir_prefix = "info.controlnet"
- else:
- mir_prefix = mir_prefix_from_forward_pass(**sub_segments)
- if mir_prefix is None and class_name not in ["AutoPipelineForImage2Image", "DiffusionPipeline"]:
- nfo(f"Failed to detect type for {class_name} {list(sub_segments)}\n")
- else:
- mir_prefix = "info." + mir_prefix
- if class_name == "StableDiffusion3InpaintPipeline" or repo_path in ["stabilityai/stable-diffusion-3-medium-diffusers"]:
- class_name = "StableDiffusion3Pipeline"
- repo_path = "stabilityai/stable-diffusion-3.5-medium"
- if class_name == "HunyuanVideoFramepackPipeline" or repo_path in ["hunyuanvideo-community/HunyuanVideo"]:
- class_name = "HunyuanVideoPipeline"
- mir_series, mir_comp = list(tag_model_from_repo(repo_path, decoder))
- mir_series = mir_prefix + "." + mir_series
- repo_path = check_migrations(repo_path)
- # modalities = add_mode_types(mir_tag=[mir_series, mir_comp])
- prefixed_data = {
- "repo": repo_path,
- "pkg": {0: {"diffusers": class_name}},
- # "mode": modalities.get("mode"),
- }
- return mir_series, {mir_comp: prefixed_data}
-
-
-def diffusers_index() -> dict[str, dict[str, dict[str, Any]]]:
- """Generate diffusion model data for MIR index\n
- :return: Dictionary ready to be applied to MIR data fields
- """
- special_repos = {
- "black-forest-labs/FLUX.1-schnell": "black-forest-labs/FLUX.1-dev",
- # "stabilityai/stable-diffusion-3-medium-diffusers": "stabilityai/stable-diffusion-3.5-medium",
- }
- special_classes = {
- # "StableDiffusion3Pipeline": "stabilityai/stable-diffusion-3.5-medium", # NOT sd3
- "HunyuanDiTPipeline": "tencent-hunyuan/hunyuandiT-v1.2-diffusers", # NOT hyd .ckpt
- "ChromaPipeline": "lodestones/Chroma",
- }
- from mir.inspect.metadata import find_diffusers_docstrings
-
- extracted_docstrings = find_diffusers_docstrings()
- model_info = [extract for pipeline in extracted_docstrings for extract in pipeline]
- pipe_data = {} # pipeline_stable_diffusion_xl_inpaint
-
- for extracted in model_info:
- parsed_data: DocParseData = parse_docs(extracted.doc_string)
- if parsed_data is None:
- print(f"Doc string not found in '{extracted.package_name}' in {extracted.file_name}")
- continue
- for class_name, swap_repo in special_classes.items():
- if parsed_data.pipe_class == class_name:
- parsed_data.pipe_repo = swap_repo
- break
- model_class_obj = import_submodules(parsed_data.pipe_class, f"diffusers.pipelines.{extracted.package_name}.{extracted.file_name}")
- if not model_class_obj:
- continue
- extract_init_parameters(model_class_obj)
- try:
- series, comp_data = create_pipe_entry(parsed_data.pipe_repo, parsed_data.pipe_class)
- except TypeError:
- pass # Attempt 1
- if pipe_data.get(series):
- if "img2img" in parsed_data.pipe_class.lower():
- continue
- pipe_data.setdefault(series, {}).update(comp_data)
- special_conditions = special_repos | special_classes
- if parsed_data.staged_class or parsed_data.pipe_repo in list(special_conditions):
- test = special_conditions.get(parsed_data.pipe_repo)
- if test:
- staged_repo = test
- parsed_data.staged_class = parsed_data.pipe_class
- try:
- series, comp_data = create_pipe_entry(
- staged_repo if parsed_data.staged_repo else parsed_data.pipe_repo,
- parsed_data.staged_class #
- if parsed_data.staged_class
- else parsed_data.pipe_class,
- )
- except TypeError as error_log:
- nfo(series, comp_data)
- nfo(error_log)
- continue # Attempt 2,
- pipe_data.setdefault(series, {}).update(comp_data)
- return dict(pipe_data)
-
-
-def transformers_index():
- """Generate LLM model data for MIR index\n
- :return: Dictionary ready to be applied to MIR data fields"""
-
- import os
-
- from transformers.models.auto.tokenization_auto import TOKENIZER_MAPPING_NAMES
-
- from mir.config.json_io import read_json_file
-
- root_folder = os.path.dirname(__file__)
- params_file = os.path.join(os.path.join(root_folder, "spec", "missing_params.json"))
- missing_config_params = read_json_file(params_file)
- from mir.inspect.metadata import map_transformers_classes
-
- mir_data = {}
- transformers_data: list[ClassMapEntry] = map_transformers_classes()
- for entry in transformers_data:
- repo_path = get_repo_from_class_map(entry)
- if config := missing_config_params.get(entry.name, {}):
- entry.config_params = config.get("params", entry.config_params)
- if not repo_path or entry.name == "gpt_oss":
- repo_path = config["repo_path"]
- if not repo_path:
- raise ValueError(f"Unable to determine repo from {entry}")
- if entry.config_params:
- mir_series, mir_comp, mir_suffix = mir_tag_from_config(entry, repo_path)
- # modalities = add_mode_types(mir_tag=[mir_series, mir_comp])
-
- repo_path = check_migrations(repo_path)
- tk_pkg = {}
- tokenizer_classes = TOKENIZER_MAPPING_NAMES.get(entry.name)
- if isinstance(tokenizer_classes, str):
- tokenizer_classes = [tokenizer_classes]
- # mode = modalities.get("mode")
- if tokenizer_classes:
- index = 0
- for tokenizer in tokenizer_classes:
- if tokenizer:
- tokenizer_class = import_submodules(tokenizer, "transformers")
- tk_pkg.setdefault(index, {"transformers": f"{tokenizer_class.__module__}.{tokenizer_class.__name__}"})
- index += 1
- if tk_pkg:
- mir_data.get("info.encoder.tokenizer", mir_data.setdefault("info.encoder.tokenizer", {})).update(
- {
- mir_suffix: {
- "pkg": tk_pkg,
- }
- },
- )
- mir_data.setdefault(
- mir_series,
- {
- mir_comp: {
- "repo": repo_path,
- "pkg": {
- 0: {"transformers": entry.model_name},
- },
- # "mode": mode,
- },
- },
- )
- return mir_data
-
-
-def mlx_repo_capture(base_repo: str = "mlx-community"):
- import os
- import re
-
- try:
- import mlx_audio # type: ignore
- except ImportError:
- return {}
- result = {}
- result_2 = {}
- folder_path_named: str = os.path.dirname(mlx_audio.__file__)
- for root, dir, file_names in os.walk(folder_path_named):
- for file in file_names:
- if file.endswith((".py", ".html", ".md", ".ts")):
- with open(os.path.join(root, file), "r") as open_file:
- content = open_file.read()
- if "mlx-community/" in content:
- matches = re.findall(base_repo + r'/(.*?)"', content)
- for match in matches:
- result[match] = f"{base_repo}/{match}"
- previous_data = content[content.index(match) - 75 : content.index(match)].replace(base_repo, "")
- class_match = re.findall(r"(\w+)\.from_pretrained", previous_data, re.MULTILINE)
- if class_match:
- result_2[match] = {f"{base_repo}/{match}": [*class_match]}
- else:
- if os.path.basename(root) in ["tts", "sts"]:
- folder_name = match.partition("-")[0]
- file_path = os.path.join(root, "models", folder_name, folder_name + ".py")
- if os.path.exists(file_path):
- with open(file_path, "r") as model_file:
- read_data = model_file.read() # type: ignore # noqa
- class_match = re.findall(r"(\w+)\.from_pretrained", previous_data, re.MULTILINE)
-
- return result_2
-
-
-# def mlx_repo_capture(base_repo: str = "mlx-community"):
-# import os
-# import re
-# import mlx_audio
-
-# result = {}
-# result_2 = {}
-# folder_path_named: str = os.path.dirname(mlx_audio.__file__)
-# for root, _, file_names in os.walk(folder_path_named):
-# for file in file_names:
-# if file.endswith((".py", ".html", ".md", ".ts")):
-# with open(os.path.join(root, file), "r") as open_file:
-# content = open_file.read()
-# if "mlx-community/" in content:
-# matches = re.findall(base_repo + r'/(.*?)"', content)
-# for match in matches:
-# print(file)
-# result[match] = f"{base_repo}/{match}"
-# previous_data = content[content.index(match) - 75 : content.index(match)].replace(base_repo, "")
-# matches = re.findall(r"(\w+)\.from_pretrained", previous_data, re.MULTILINE)
-# if matches:
-# result_2[match] = {f"{base_repo}/{match}": [*matches]}
-# else:
-# result_2[match] = {f"{base_repo}/{match}": None}
-# return result_2
-
-
-# def mlx_audio_scrape(base_repo: str = "mlx-community"):
-# import os
-# import re
-# import mlx_audio
-
-# result = {}
-# result_2 = {}
-# folder_path_named: str = os.path.dirname(mlx_audio.__file__)
-# for root, _, file_names in os.walk(folder_path_named):
-# for file in file_names:
-# if file.endswith((".py",)):
-# with open(os.path.join(root, file), "r") as open_file:
-# content = open_file.read()
-# if "mlx-community/" in content:
-# matches = re.findall(base_repo + r'/(.*?)"', content)
-# for match in matches:
-# result[match] = f"{base_repo}/{match}"
-# previous_data = content[content.index(match) - 75 : content.index(match)].replace(base_repo, "")
-# matches = re.findall(r"(\w+)\.from_pretrained", previous_data, re.MULTILINE)
-# if len(matches) > 1:
-# result_2[match] = {f"{base_repo}/{match}": [*matches]}
-# else:
-# if "nn.Module" in content:
-# previous_data = content[content.rindex("nn.Module") - 50 : content.rindex("nn.Module")]
-# matches = re.search(r"(\w+)\.", previous_data, re.MULTILINE)
-# result_2[match] = {f"{base_repo}/{match}": [*matches]}
-# return result_2
-
-
-# @MODE_DATA.decorator
-# def add_mode_types(mir_tag: list[str], data: dict | None = None) -> dict[str, list[str] | str]:
-# """_summary_\n
-# :param mir_tag: _description_
-# :param data: _description_, defaults to None
-# :return: _description_"""
-# fused_tag = ".".join(mir_tag)
-
-# mir_details = {
-# "mode": data.get(fused_tag, {}).get("pipeline_tag"),
-# "pkg_type": data.get(fused_tag, {}).get("library_type"),
-# "tags": data.get(fused_tag, {}).get("tags"),
-# }
-# return mir_details
diff --git a/mir/inspect/classes.py b/mir/inspect/classes.py
deleted file mode 100644
index 23b955c..0000000
--- a/mir/inspect/classes.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# ###
-# ###
-
-"""類發現和拆卸"""
-
-# pylint:disable=protected-access
-
-from typing import Callable, Dict, List, Optional, Union, Type
-from mir.config.conversion import import_submodules
-from mir.config.console import nfo
-
-
-def resolve_import_path(code_name: str, pkg_name: str) -> Optional[List[str]]:
- """Retrieve the folder path within a class. Only returns if it is a valid path in the system\n
- ### NOTE: in most cases `__module__` makes this redundant
- :param code_name: The internal name for the model in the third-party API.
- :param pkg_name: The API Package
- :return: A list corresponding to the path of the model, or None if not found
- :raises KeyError: for invalid pkg_name
- """
- import os
- from importlib import import_module
-
- pkg_paths = {
- "diffusers": "pipelines",
- "transformers": "models",
- }
- folder_name = code_name.replace("-", "_")
- pkg_name = pkg_name.lower()
- folder_path = pkg_paths[pkg_name]
- package_obj = import_module(pkg_name)
- folder_path_named = [folder_path, folder_name]
- pkg_folder = os.path.dirname(getattr(package_obj, "__file__"))
- # dbuq(os.path.exists(os.path.join(pkg_folder, *folder_path_named)))
- if os.path.exists(os.path.join(pkg_folder, *folder_path_named)) is True:
- import_path = [pkg_name]
- import_path.extend(folder_path_named)
- return import_path
-
-
-def resolve_code_names(class_name: Optional[Union[str, Type]] = None, pkg_name: Optional[str] = "transformers", path_format: Optional[bool] = False) -> Union[List[str], str]:
- """Reveal code names for class names from Diffusers or Transformers (formerly get code names)\n
- :param class_name: To return only one class, defaults to None
- :param pkg_name: optional field for library, defaults to "transformers"
- :param path_format: Retrieve just the code name, or the full module path and code name within the package
- :return: A list of all code names, or the one corresponding to the provided class"""
-
- package_map = {
- "diffusers": ("_import_structure", "diffusers.pipelines"),
- "transformers": ("MODEL_MAPPING_NAMES", "transformers.models.auto.modeling_auto"),
- }
- pkg_name = pkg_name.lower()
- MAPPING_NAMES = import_submodules(*package_map[pkg_name])
- if class_name:
- if isinstance(class_name, Type):
- class_name = class_name.__name__
- code_name = next(iter(key for key, value in MAPPING_NAMES.items() if class_name in str(value)), "")
- return resolve_import_path(code_name, pkg_name) if path_format else code_name.replace("_", "-")
- return list(MAPPING_NAMES)
-
-
-def extract_inherited_classes(model_class: Union[Callable, str], pkg_name: Optional[str] = None) -> Optional[Dict[str, List[str]]]:
- """Strips tags from module's base classes and extracts inherited class members.\n
- If `module` is a string, it requires the `library` argument to convert it into a callable.\n
- :param module: A module or string representing a module.
- :param library: Library name required if `module` is a string. Defaults to None.
- :returns: Mapping indices to class path segments, or None if invalid input."""
-
- if isinstance(model_class, str):
- if not pkg_name:
- nfo("Provide a library type argument to process strings")
- return None
- model_class = import_submodules(model_class, pkg_name)
- signature = model_class.__bases__
- class_names = []
- for index, class_annotation in enumerate(signature):
- tag_stripped = str(class_annotation)[8:-2]
- module_segments = tag_stripped.split(".")
- class_names.append(module_segments)
- return class_names
-
-
-# def pull_weight_map(repo_id: str, arch: str) -> Dict[str, str]:
-# from nnll.download.hub_cache import download_hub_file
-
-# model_file = download_hub_file(
-# repo_id=f"{repo_id}/tree/main/{arch}",
-# source="huggingface",
-# file_name="diffusion_pytorch_model.safetensors.index.json",
-# local_dir=".tmp",
-# )
diff --git a/mir/inspect/metadata.py b/mir/inspect/metadata.py
deleted file mode 100644
index 613afae..0000000
--- a/mir/inspect/metadata.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-
-from typing import Callable, Generator
-
-import diffusers
-from mir.config.constants import ClassMapEntry, DocStringEntry, extract_init_parameters
-from mir.config.conversion import retrieve_diffusers_docstrings
-
-
-# if code_name and "__" not in code_name:
-# tasks = TaskAnalyzer.show_transformers_tasks(code_name=code_name)
-# if tasks and isinstance(tasks, list): # Ensure tasks is a list
-# task_pipe = next(iter(tasks))
-# if isinstance(task_pipe, tuple):
-# task_pipe = task_pipe[0]
-# if task_pipe not in exclude_list:
-# model_class = getattr(__import__("transformers"), task_pipe) # this is done to get the path to the config
-# model_data = extract_init_params(model_class)
-# if model_data and ("inspect" not in model_data["config"]) and ("deprecated" not in list(model_data["config"])):
-# transformer_data.setdefault(model_class, model_data)
-# else:
-# model_data = None
-# # Reset task_pipe if tasks was None or not a list
-# if not tasks or not isinstance(tasks, list):
-# task_pipe = None
-
-# if not model_data and code_name not in second_exclude_list: # second attempt
-# if code_name == "donut":
-# code_name = "donut-swin"
-# if not task_pipe and code_name and MODEL_MAPPING_NAMES.get(code_name.replace("_", "-")):
-# model_class = getattr(__import__("transformers"), MODEL_MAPPING_NAMES[code_name.replace("_", "-")], None)
-# elif task_pipe:
-# model_class = getattr(__import__("transformers"), task_pipe)
-# config_class = CONFIG_MAPPING_NAMES.get(code_name.replace("_", "-"))
-# if not config_class:
-# config_class = CONFIG_MAPPING_NAMES.get(code_name.replace("-", "_"))
-# if config_class:
-# config_class_obj = getattr(__import__("transformers"), config_class)
-# model_data = {"config": str(config_class_obj.__module__ + "." + config_class_obj.__name__).split(".")}
-# if model_data and ("inspect" not in model_data) and ("deprecated" not in model_data) and model_class:
-# transformer_data.setdefault(model_class, model_data)
-# return transformer_data
-
-
-def map_transformers_classes() -> list[ClassMapEntry]:
- """Eat the 🤗Transformers classes as a treat, leaving any tasty subclass class morsels neatly arranged as a dictionary.\n
- Nom.
- :return: Tasty mapping of subclasses to their class references"""
- from transformers.models.auto.configuration_auto import CONFIG_MAPPING
- from transformers.models.auto.modeling_auto import MODEL_MAPPING # config: model map
-
- model_data = []
- for config_name, config_obj in CONFIG_MAPPING.items():
- model_params = None
- if model_obj := MODEL_MAPPING.get(config_obj, None):
- if isinstance(model_obj, Callable):
- model_obj = (model_obj,)
- assert isinstance(model_obj, tuple)
- for model_class in model_obj:
- if model_params and ("inspect" not in model_params["config"]) and ("deprecated" not in list(model_params["config"])):
- pass
- else:
- model_params = None
- model_name = model_class.__name__
- model_data.append(
- ClassMapEntry(
- name=config_name,
- model_name=model_name.split(".")[-1],
- model=model_class, # type: ignore
- config=config_obj,
- ),
- )
- return model_data
-
-
-def find_diffusers_docstrings() -> Generator[list[DocStringEntry]]:
- """Pull down docstrings from 🤗Diffusers pipelines, minimizing internet requests\n
- :return: Docstrings for common diffusers models"""
- import os
-
- from diffusers.pipelines import _import_structure
-
- from mir.config.json_io import read_json_file
-
- project_root = os.path.dirname(os.path.dirname(__file__))
- pattern_file = os.path.join(project_root, "spec", "docstring_patterns.json")
- docstring_patterns = read_json_file(pattern_file)
- exclusion_list = docstring_patterns["exclusion_list"]
- uncommon_naming = docstring_patterns["uncommon_naming"]
- for pipe_name in _import_structure.keys():
- if pipe_name not in exclusion_list:
- file_specific = uncommon_naming.get(pipe_name, pipe_name)
- if import_name := getattr(diffusers.pipelines, str(pipe_name)):
- file_names = list(getattr(import_name, "_import_structure", {}).keys()) or [f"pipeline_{file_specific}"]
- yield list(retrieve_diffusers_docstrings(pipe_name, file_names))
- else:
- continue
diff --git a/mir/inspect/parenting.py b/mir/inspect/parenting.py
deleted file mode 100644
index a0bfa26..0000000
--- a/mir/inspect/parenting.py
+++ /dev/null
@@ -1,32 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-
-from typing import List, Optional
-
-
-def class_parent(code_name: str, pkg_name: str) -> Optional[List[str]]:
- """Retrieve the folder path within a class. Only returns if it is a valid path in the system\n
- ### NOTE: in most cases `__module__` makes this redundant
- :param code_name: The internal name for the model in the third-party API.
- :param pkg_name: The API Package
- :return: A list corresponding to the path of the model, or None if not found
- :raises KeyError: for invalid pkg_name
- """
- import os
- from importlib import import_module
-
- pkg_paths = {
- "diffusers": "pipelines",
- "transformers": "models",
- }
- folder_name = code_name.replace("-", "_")
- pkg_name = pkg_name.lower()
- folder_path = pkg_paths[pkg_name]
- package_obj = import_module(pkg_name)
- folder_path_named = [folder_path, folder_name]
- pkg_folder = os.path.dirname(getattr(package_obj, "__file__"))
- # dbuq(os.path.exists(os.path.join(pkg_folder, *folder_path_named)))
- if os.path.exists(os.path.join(pkg_folder, *folder_path_named)) is True:
- import_path = [pkg_name]
- import_path.extend(folder_path_named)
- return import_path
diff --git a/mir/inspect/pipes.py b/mir/inspect/pipes.py
deleted file mode 100644
index cdec5f7..0000000
--- a/mir/inspect/pipes.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-
-from typing import List, Optional
-
-
-def show_shared_hyperparameters(parameter_filter: Optional[str] = None) -> List[str]:
- """Show all config classes in the Transformer package with the specified init annotation\n
- :param from_match: Narrow the classes to only those with an exact key inside
- :return: A list of all Classes"""
- from mir.inspect.metadata import map_transformers_classes
- from mir.config.constants import extract_init_parameters
-
- transformers_data = map_transformers_classes()
- config_data = []
- for entry in transformers_data:
- if parameter_filter:
- segments = extract_init_parameters(module=entry.config, package_name="transformers")
- if parameter_filter in list(segments):
- config_data.append(entry.config)
- else:
- config_data.append(entry.config)
- return config_data
-
-
-def get_class_parent_folder(class_name: str, pkg_name: str) -> List[str]:
- """Retrieve the folder path within a class. Only returns if it is a valid path in the system (formerly seek_class_path)\n
- ### NOTE: in most cases `__module__` makes this redundant
- :param class_name: The internal name for the model in the third-party API.
- :param pkg_name: The API Package
- :return: A list corresponding to the path of the model, or None if not found
- :raises KeyError: for invalid pkg_name
- """
- from mir.config.console import dbuq
- from mir.inspect.classes import resolve_code_names, extract_init_params
-
- pkg_name = pkg_name.lower()
- if pkg_name == "diffusers":
- parent_folder: List[str] = resolve_code_names(class_name=class_name, pkg_name=pkg_name, path_format=True)
- if not parent_folder or not parent_folder[-1].strip():
- dbuq("Data not found for", " class_name = {class_name},pkg_name = {pkg_name},{parent_folder} = parent_folder")
- return None
- elif pkg_name == "transformers":
- module_path = extract_init_params(class_name, "transformers").get("config")
- parent_folder = module_path[:3]
- return parent_folder
diff --git a/mir/config/json_io.py b/mir/json_io.py
similarity index 87%
rename from mir/config/json_io.py
rename to mir/json_io.py
index 92cd60f..6248b11 100644
--- a/mir/config/json_io.py
+++ b/mir/json_io.py
@@ -1,8 +1,6 @@
# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
#
-# pylint: disable=import-outside-toplevel
-
from typing import Any
@@ -17,8 +15,6 @@ def write_json_file(folder_path_named: str, file_name: str, data: Any, mode: str
import json
import os
- from mir.config.console import dbuq
-
if ".json" not in file_name:
file_name += ".json"
document = os.path.join(folder_path_named, os.path.basename(file_name))
@@ -26,7 +22,7 @@ def write_json_file(folder_path_named: str, file_name: str, data: Any, mode: str
try:
os.remove(document)
except FileNotFoundError as error_log:
- dbuq(f"'File was detected but not found to remove: {document}.'{error_log}", exc_info=True)
+ print(f"'File was detected but not found to remove: {document}.'{error_log}")
with open(document, mode, encoding="UTF-8") as i:
json.dump(data, i, ensure_ascii=False, indent=4, sort_keys=False)
diff --git a/mir/mir.json b/mir/mir.json
deleted file mode 100644
index c897555..0000000
--- a/mir/mir.json
+++ /dev/null
@@ -1,14941 +0,0 @@
-{
- "info.controlnet.sd-controlnet-canny": {
- "*": {
- "repo": "lllyasviel/sd-controlnet-canny",
- "pkg": {
- "0": {
- "diffusers": "ControlNetModel"
- }
- }
- }
- },
- "info.controlnet.blipdiffusion-controlnet": {
- "*": {
- "repo": "Salesforce/blipdiffusion-controlnet",
- "pkg": {
- "0": {
- "diffusers": "BlipDiffusionControlNetPipeline"
- }
- }
- }
- },
- "info.controlnet.control-v11p-sd15-inpaint": {
- "*": {
- "repo": "lllyasviel/control_v11p_sd15_inpaint",
- "pkg": {
- "0": {
- "diffusers": "ControlNetModel"
- }
- }
- }
- },
- "info.controlnet.controlnet-canny-sdxl-1": {
- "*": {
- "repo": "diffusers/controlnet-canny-sdxl-1.0",
- "pkg": {
- "0": {
- "diffusers": "ControlNetModel"
- }
- }
- }
- },
- "info.controlnet.controlnet-depth-sdxl-1": {
- "*": {
- "repo": "diffusers/controlnet-depth-sdxl-1.0-small",
- "pkg": {
- "0": {
- "diffusers": "ControlNetModel"
- }
- }
- }
- },
- "info.controlnet.stable-diffusion-xl-1": {
- "*": {
- "repo": "stabilityai/stable-diffusion-xl-base-1.0",
- "pkg": {
- "0": {
- "diffusers": "StableDiffusionXLControlNetUnionInpaintPipeline"
- }
- }
- }
- },
- "info.controlnet.controlnet-union-sdxl-1": {
- "*": {
- "repo": "xinsir/controlnet-union-sdxl-1.0",
- "pkg": {
- "0": {
- "diffusers": "ControlNetUnionModel"
- }
- }
- }
- },
- "info.controlnet.sd3-controlnet-canny": {
- "*": {
- "repo": "InstantX/SD3-Controlnet-Canny",
- "pkg": {
- "0": {
- "diffusers": "SD3ControlNetModel"
- }
- }
- }
- },
- "info.controlnet.sd3-controlnet-inpainting": {
- "*": {
- "repo": "alimama-creative/SD3-Controlnet-Inpainting",
- "pkg": {
- "0": {
- "diffusers": "SD3ControlNetModel"
- }
- }
- }
- },
- "info.controlnet.testing-conrolnetxs-sd2-canny": {
- "*": {
- "repo": "UmerHA/Testing-ConrolNetXS-SD2.1-canny",
- "pkg": {
- "0": {
- "diffusers": "ControlNetXSAdapter"
- }
- }
- }
- },
- "info.controlnet.testing-conrolnetxs-sdxl-canny": {
- "*": {
- "repo": "UmerHA/Testing-ConrolNetXS-SDXL-canny",
- "pkg": {
- "0": {
- "diffusers": "ControlNetXSAdapter"
- }
- }
- }
- },
- "info.unet.marigold-depth-v1-1": {
- "*": {
- "repo": "prs-eth/marigold-depth-v1-1",
- "pkg": {
- "0": {
- "diffusers": "MarigoldDepthPipeline"
- }
- },
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "scheduler": [
- [
- "ops.scheduler.ddim",
- "scheduler"
- ],
- [
- "ops.scheduler.lcm",
- "scheduler"
- ]
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "marigold-depth-v1-1"
- ]
- }
- }
- },
- "info.unet.marigold-iid-appearance-v1-1": {
- "*": {
- "repo": "prs-eth/marigold-iid-appearance-v1-1",
- "pkg": {
- "0": {
- "diffusers": "MarigoldIntrinsicsPipeline"
- }
- },
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "scheduler": [
- [
- "ops.scheduler.ddim",
- "scheduler"
- ],
- [
- "ops.scheduler.lcm",
- "scheduler"
- ]
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "marigold-iid-appearance-v1-1"
- ]
- }
- }
- },
- "info.unet.marigold-normals-v1-1": {
- "*": {
- "repo": "prs-eth/marigold-normals-v1-1",
- "pkg": {
- "0": {
- "diffusers": "MarigoldNormalsPipeline"
- }
- },
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "scheduler": [
- [
- "ops.scheduler.ddim",
- "scheduler"
- ],
- [
- "ops.scheduler.lcm",
- "scheduler"
- ]
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "marigold-normals-v1-1"
- ]
- }
- }
- },
- "info.unet.stable-diffusion-v1-5": {
- "*": {
- "repo": "stable-diffusion-v1-5/stable-diffusion-v1-5",
- "pkg": {
- "0": {
- "diffusers": "StableDiffusionPipeline"
- }
- },
- "identifiers": [
- "up_blocks.3.attentions.0.transformer_blocks.0.norm3.weight"
- ],
- "file_256": [
- "6ce0161689b3853acaa03779ec93eafe75a02f4ced659bee03f50797806fa2fa",
- "1a189f0be69d6106a48548e7626207dddd7042a418dbf372cefd05e0cdba61b6",
- "e1441589a6f3c5a53f5f54d0975a18a7feb7cdf0b0dee276dfc3331ae376a053",
- "cc6cb27103417325ff94f52b7a5d2dde45a7515b25c255d8e396c90014281516",
- "19da7aaa4b880e59d56843f1fcb4dd9b599c28a1d9d9af7c1143057c8ffae9f1",
- "cd1b6db09a81cb1d39fbd245a89c1e3db9da9fe8eba5e8f9098ea6c4994221d3",
- "c83908253f9a64d08c25fc90874c9c8aef9a329ce1ca5fb909d73b0c83d1ea21"
- ],
- "layer_b3": [
- "909c6ff3192ab2767e789a6125865bc23163db467ab78b1c633bad46a4293fad",
- "b52807536902cabbf84f99e4fa2f8713fb4ef77e739f06367ee0d486e3222faa",
- "d31382d71a1044b636d80d861a2b4dbca51826bed34d34b5c14608b7679ccefd",
- "5fd8b28013b7e5a64c7c235f0a93d93e48bc19a0e5dde7b646a87b429219643a",
- "731f552f29edcb4f86112cc94d296377f3533a9633ccf83e202d9e1785d94a00",
- "2d2f97574a161cf01a6f6d476b141c7be06f940d94b695ffc12c4e74eca2de1c"
- ],
- "layer_256": [
- "ece771354ad470a82d56eda413ae3dd6c00d2de28ab3c56a88201d08d4424b4b",
- "65b084dada803461ab9ca9be9b892d211870a121dd6c555a111eea470b951c54",
- "dc937b59892604f5a86ac96936cd7ff09e25f18ae6b758e8014a24c7fa039e91",
- "92565dec90f7c8412dc872e820f66cd0c56263bbbc392439645b6fee270f41bb"
- ],
- "tasks": [
- "StableDiffusion3ControlNetInpaintingPipeline",
- "StableDiffusion3ControlNetPipeline",
- "StableDiffusion3Img2ImgPipeline",
- "StableDiffusion3InpaintPipeline",
- "StableDiffusion3PAGImg2ImgPipeline",
- "StableDiffusion3PAGPipeline",
- "StableDiffusion3Pipeline",
- "StableDiffusionControlNetImg2ImgPipeline",
- "StableDiffusionControlNetInpaintPipeline",
- "StableDiffusionControlNetPAGInpaintPipeline",
- "StableDiffusionControlNetPAGPipeline",
- "StableDiffusionControlNetPipeline",
- "StableDiffusionImg2ImgPipeline",
- "StableDiffusionInpaintPipeline",
- "StableDiffusionPAGImg2ImgPipeline",
- "StableDiffusionPAGInpaintPipeline",
- "StableDiffusionPAGPipeline",
- "StableDiffusionPipeline",
- "StableDiffusionXLControlNetImg2ImgPipeline",
- "StableDiffusionXLControlNetInpaintPipeline",
- "StableDiffusionXLControlNetPAGImg2ImgPipeline",
- "StableDiffusionXLControlNetPAGPipeline",
- "StableDiffusionXLControlNetPipeline",
- "StableDiffusionXLControlNetUnionImg2ImgPipeline",
- "StableDiffusionXLControlNetUnionInpaintPipeline",
- "StableDiffusionXLControlNetUnionPipeline",
- "StableDiffusionXLImg2ImgPipeline",
- "StableDiffusionXLInpaintPipeline",
- "StableDiffusionXLPAGImg2ImgPipeline",
- "StableDiffusionXLPAGInpaintPipeline",
- "StableDiffusionXLPAGPipeline",
- "StableDiffusionXLPipeline"
- ],
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "stable-diffusion-v1-5"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "safety_checker": [
- "StableDiffusionSafetyChecker"
- ],
- "feature_extractor": [
- "info.dit.flux1-schnell",
- "*"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ]
- }
- }
- },
- "info.unet.stable-unclip-2-1-l": {
- "*": {
- "repo": "fusing/stable-unclip-2-1-l",
- "pkg": {
- "0": {
- "diffusers": "StableUnCLIPPipeline"
- }
- },
- "tasks": [
- "StableDiffusion3ControlNetInpaintingPipeline",
- "StableDiffusion3ControlNetPipeline",
- "StableDiffusion3Img2ImgPipeline",
- "StableDiffusion3InpaintPipeline",
- "StableDiffusion3PAGImg2ImgPipeline",
- "StableDiffusion3PAGPipeline",
- "StableDiffusion3Pipeline",
- "StableDiffusionControlNetImg2ImgPipeline",
- "StableDiffusionControlNetInpaintPipeline",
- "StableDiffusionControlNetPAGInpaintPipeline",
- "StableDiffusionControlNetPAGPipeline",
- "StableDiffusionControlNetPipeline",
- "StableDiffusionImg2ImgPipeline",
- "StableDiffusionInpaintPipeline",
- "StableDiffusionPAGImg2ImgPipeline",
- "StableDiffusionPAGInpaintPipeline",
- "StableDiffusionPAGPipeline",
- "StableDiffusionPipeline",
- "StableDiffusionXLControlNetImg2ImgPipeline",
- "StableDiffusionXLControlNetInpaintPipeline",
- "StableDiffusionXLControlNetPAGImg2ImgPipeline",
- "StableDiffusionXLControlNetPAGPipeline",
- "StableDiffusionXLControlNetPipeline",
- "StableDiffusionXLControlNetUnionImg2ImgPipeline",
- "StableDiffusionXLControlNetUnionInpaintPipeline",
- "StableDiffusionXLControlNetUnionPipeline",
- "StableDiffusionXLImg2ImgPipeline",
- "StableDiffusionXLInpaintPipeline",
- "StableDiffusionXLPAGImg2ImgPipeline",
- "StableDiffusionXLPAGInpaintPipeline",
- "StableDiffusionXLPAGPipeline",
- "StableDiffusionXLPipeline"
- ],
- "pipe_names": {
- "prior_tokenizer": [
- "info.encoder.tokenizer",
- "stable-unclip-2-1-l"
- ],
- "prior_text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "prior": [
- "PriorTransformer"
- ],
- "prior_scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "image_normalizer": [
- "info.dit.flux1-schnell",
- "*"
- ],
- "image_noising_scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "stable-unclip-2-1-l"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "vae": [
- "AutoencoderKL"
- ]
- }
- }
- },
- "info.unet.stable-diffusion-2-1-unclip": {
- "*": {
- "repo": "stabilityai/stable-diffusion-2-1-unclip-small",
- "pkg": {
- "0": {
- "diffusers": "StableUnCLIPImg2ImgPipeline"
- }
- },
- "tasks": [
- "StableDiffusion3ControlNetInpaintingPipeline",
- "StableDiffusion3ControlNetPipeline",
- "StableDiffusion3Img2ImgPipeline",
- "StableDiffusion3InpaintPipeline",
- "StableDiffusion3PAGImg2ImgPipeline",
- "StableDiffusion3PAGPipeline",
- "StableDiffusion3Pipeline",
- "StableDiffusionControlNetImg2ImgPipeline",
- "StableDiffusionControlNetInpaintPipeline",
- "StableDiffusionControlNetPAGInpaintPipeline",
- "StableDiffusionControlNetPAGPipeline",
- "StableDiffusionControlNetPipeline",
- "StableDiffusionImg2ImgPipeline",
- "StableDiffusionInpaintPipeline",
- "StableDiffusionPAGImg2ImgPipeline",
- "StableDiffusionPAGInpaintPipeline",
- "StableDiffusionPAGPipeline",
- "StableDiffusionPipeline",
- "StableDiffusionXLControlNetImg2ImgPipeline",
- "StableDiffusionXLControlNetInpaintPipeline",
- "StableDiffusionXLControlNetPAGImg2ImgPipeline",
- "StableDiffusionXLControlNetPAGPipeline",
- "StableDiffusionXLControlNetPipeline",
- "StableDiffusionXLControlNetUnionImg2ImgPipeline",
- "StableDiffusionXLControlNetUnionInpaintPipeline",
- "StableDiffusionXLControlNetUnionPipeline",
- "StableDiffusionXLImg2ImgPipeline",
- "StableDiffusionXLInpaintPipeline",
- "StableDiffusionXLPAGImg2ImgPipeline",
- "StableDiffusionXLPAGInpaintPipeline",
- "StableDiffusionXLPAGPipeline",
- "StableDiffusionXLPipeline"
- ],
- "pipe_names": {
- "feature_extractor": [
- "info.dit.flux1-schnell",
- "*"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "image_normalizer": [
- "info.dit.flux1-schnell",
- "*"
- ],
- "image_noising_scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "stable-diffusion-2-1-unclip"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "vae": [
- "AutoencoderKL"
- ]
- }
- }
- },
- "info.unet.stable-diffusion-xl-1": {
- "*": {
- "repo": "stabilityai/stable-diffusion-xl-base-1.0",
- "pkg": {
- "0": {
- "precision": "ops.precision.float.F16",
- "generation": {
- "denoising_end": 0.8,
- "num_inference_steps": 40,
- "output_type": "latent",
- "safety_checker": false,
- "width": 1024,
- "height": 1024
- }
- },
- "1": {
- "diffusers": "DiffusionPipeline"
- }
- },
- "file_256": [
- "357650fbfb3c7b4d94c1f5fd7664da819ad1ff5a839430484b4ec422d03f710a",
- "83e012a805b84c7ca28e5646747c90a243c65c8ba4f070e2d7ddc9d74661e139",
- "31e35c80fc4829d14f90153f4c74cd59c90b779f6afe05a74cd6120b893f7e5b",
- "6f001c090fb13c0d0f8b0a5916da814712a94400b99471fabe77c1c4a51ecaaf"
- ],
- "layer_256": [
- "62a5ab1b5fdfa4fedb32323841298c6effe1af25be94a8583350b0a7641503ef",
- "34dff8d98898baa0f10e71943e56b588cc114253b0d2f1051f3ce7a8a45fee0b",
- "56b1ccd89b0d6ab658048aa34d659788b6ed663f13ef566f4b11bccef590b9da"
- ],
- "layer_b3": [
- "8be44fa13c1efa60f8bcadaa57f1d718473f9660f03c4f0e65dc037960d8cba1",
- "c9ab95ed1851418b65ef99651c1eb6bbdd2e3b0715e0e435d6d1e56ce310fac3",
- "adfa260098d87616d748e3cf9c10bb2c90ff8890a84abbb2853d4aa69664070b"
- ],
- "identifiers": [
- "logit_scale",
- "conditioner.embedders.0.transformer.text_model.encoder.layers.0.self_attn.k_proj.weight",
- "add_embedding.linear_2.bias"
- ],
- "pipe_names": {}
- },
- "pony-diffusion": {
- "file_256": [
- "67ab2fd8ec439a89b3fedb15cc65f54336af163c7eb5e4f2acc98f090a29b0b3"
- ],
- "layer_256": [
- "465425d4420dcf5aa4b4d5b456db11a1fcc7c8f61b2e4a87e2470297c98bb96e"
- ],
- "layer_b3": [
- "bf4c2154daa4ece7292277b210d081f98759e9ed4d5c889564632e3ccc4a1071"
- ]
- },
- "pony-diffusion-turbo": {
- "file_256": [
- "7555ac941f3a767833830ba5cc9a4508a9777cbf97b487b6baf0400ab7000587",
- "9322f9d91b28abf09e4137bc02ec806af23510221a164e71b81778e61cc3b4b2"
- ],
- "layer_256": [
- "7edf51ef09b39c46937a4e4141707c040cd12af0d95299a4d3cd2b7d3fabe035",
- "74e4dbc89d57d61ff7e8af8b0fddcf7466ba233d53ca4ffb7777138991bc3d52"
- ],
- "layer_b3": [
- "1e8f23fcd4be0f00eb52368b91c709fffa8a3b8e21772b92b2e0671eed9117d0",
- "5c8b3f34f9d0a58135cf72fbfe9b5d75b5545a10e3d726478543fa7cc510a8bc"
- ]
- },
- "animagine-xl-4": {
- "repo": "cagliostrolab/animagine-xl-4.0",
- "file_256": [
- "8ece83aa1bed1fb39a2b81f1660f0ce6889218e493c1f2ed55e9f15f59a7e03f",
- "6327eca98bfb6538dd7a4edce22484a1bbc57a8cff6b11d075d40da1afb847ac",
- "1449e5b0b9de87b0f414c5f29cb11ce3b3dc61fa2b320e784c9441720bf7b766",
- "e3c47aedb06418c6c331443cd89f2b3b3b34b7ed2102a3d4c4408a8d35aad6b0"
- ],
- "layer_256": [
- "c21d1c38813e078817122e12866ab39f5aa7f56945dd4a8beee3cae1e0f139e7",
- "b916c162c981155aaf74e93d5314038af6767bb5a129c51ee05a1fb6a206c6ac",
- "ecc6bfc73824a2d7c3b0ca184854a235859f329c83768f017b07a19a535d17b4",
- "97f6ca05de7fbdae7aacb2427a552f924492176c474a23dd252c192e1c0e9d65"
- ],
- "layer_b3": [
- "268ffbb120670b9c4b25158bd474c787740884b7738b48203aa03c4c3f00028f",
- "18fda1a55cad137d62c81d4328f5ece85d88b126261e06b9e14ab68055d5d484",
- "bae9bc8a5c43145bcf92ee3391618d9eaddd689f626991bae202de9cf5f1e70e",
- "d6bc5ccafa2b97c867b13a1e7a8c2c7ad9c4877055a66c71bb773557bc306447"
- ]
- },
- "illustrious-xl-v2": {
- "repo": "OnomaAIResearch/Illustrious-XL-v2.0",
- "file_256": [
- "c2a1a3eaa13d4c107dc7e00c3fe830cab427aa026362740ea094745b3422a331",
- "536863e9f0c13b0ce834e2f8a19ada425ee4f722c0ad3d0051ec7e6adaa8156c",
- "3e15ba00387db678ab4a099f75771c4f5ac67fda9e7100a01d263eaf30145aa9",
- "e3d12d0f76d61aa31d2668a2217e5b642592193f2946842c44d7056ea5469cce",
- "735cf3fefcbdc4f7817f53247e38b836ffd27c7641af6d8daa21d245242cb4bd"
- ],
- "layer_256": [
- "397791b3d77affb7bd35c5ded7377493c6bf456920a41388ba95bd0157109803",
- "b23c02b8519c6777a1f271662f4251a59468c4b3e11184a2d722fa8929b4ea48",
- "a373981494f5508c124a1960bdd096bbc96935fbb54b1218f563206d3892c176",
- "b709df257c40d9d981f686f2880bbe64f43b78805b7213768d659a142a593efd",
- "f1e6b4cab0fce608dca6fa851384e8728202449f16270fbd1f0c4c5ec4946c10"
- ],
- "layer_b3": [
- "93b061baf21d743d592327a61f027d099d8e18da9808a76c7704ad123eba4a29",
- "dc05fed2acbc73cef4c377cfa2a681c5cf6d065b88d8bf70d371bbcce6a223a8",
- "8eb1c30327e5b71b35b9a4513dc5f2cac9f244667393c0eedb10a26aa9991cd8",
- "3dafbe31f6ebaffa3d054e1b37049e1147faa2474ceb6dab7bc3c4cded0c845e",
- "892533778ee14454938f7b50830093f58e12f1e14560a148f71927e4ccff5f5c"
- ]
- },
- "playground-v2---aesthetic": {
- "repo": "playgroundai/playground-v2.5-1024px-aesthetic",
- "pkg": {
- "0": {
- "diffusers": "DiffusionPipeline",
- "precision": "ops.precision.float.F16",
- "generation": {
- "num_inference_steps": 50,
- "guidance_scale": 3
- }
- }
- },
- "file_256": [
- "11b6d7bce65674659cc6b7ea960658436edfd80e566cb240ebd4bfbc3e2076c8",
- "bcaa7dd6780974f000b17b5a6c63e6f867a75c51ffa85c67d6b196882c69b992",
- "956dca99114aaa5c3eb526381309d37ee96737e78ed64c8ae613409f47c3f65a",
- "933778ce76c1fc0ca918b37e1488411b8a99bbd3279c12f527a3ac995a340864",
- "5c7d38880d0940e6795158b7608ccef89217272b1f2a9331c5b0a2adffcd82c4",
- "0411e988479884b1a3ecd184123efe38d051d8d0ef24270585a7d1d57499464a"
- ],
- "layer_256": [
- "adb7be228d4ee6e583c3e5ae4ddb579fef64c3987617ce4d4aff3eb7f8d6a3f7",
- "d4813e9f984aa76cb4ac9bf0972d55442923292d276e97e95cb2f49a57227843",
- "fe2e9edf7e3923a80e64c2552139d8bae926cc3b028ca4773573a6ba60e67c20",
- "bc7021473a04a6de3fe0d0fed600875d852ad1ad9d47c445278f66ce9e8ec7a0fc94481f0c52b21c5ac1fdade8d9c5b210f7239253f86ef21e6198fe393ed60e",
- "a6f31493ceeb51c88c5239188b9078dc64ba66d3fc5958ad48c119115b06120c"
- ],
- "layer_b3": [
- "d55b22740da2d5b98020ad2390cdc0a7ee08cf9e0d98c11957f16cc20c49815b",
- "7e9be9bd9a3aed1ad7207e2f77c98c24c3a75f6adcc9b53514033c6c3365d289",
- "5c6dfcc8d01dfb64723f8f5785caa080e2987859c0a050470bfdbe5312be9efc",
- "703f775c6e48ed5b0eba6e847414f047bcd4adc677dbc1bf221b3ef05b2ac471",
- "72d4ebe4af61f8a7add8fe36b8acd16602894279fb5a744ad50b5b5bac7067b8",
- "acb757b851db12cdf9d4365a45ee0d6e64afa77ac95583bb82711baf7c4125fd"
- ],
- "pipe_names": {}
- },
- "segmind-vega": {
- "repo": "segmind/Segmind-Vega",
- "file_256": [
- "94762e983e5942056be73c5c1d4464b8ffa1ada500b4fef1267550e2447953ce",
- "1ab33e37fbb2566c55cd729e4ab79cc2f99cd9d0a578fabc7a2cf4ee47968be1",
- "8cfa375669b1222d6fecf470f41b2abb370c76a90ab9568964c4bb15b34ec8a2"
- ],
- "layer_256": [
- "029b89ee311110c8f945dbdfc52c1d5daeb1e78c353c38aa3141ec68ce28e7cc",
- "5cdb948e5f3873300679073391d48fc648171f02093d7737d078557ff75762bb",
- "f73afbe43cc76571cb86ebcfced618668a2fb2252b0bc6ba88d6e942bae75741"
- ],
- "layer_b3": [
- "2f353c5e6ed0a2c05af00d014e18e65f69f1ce8c48f8eefbf8ad71b34f940fbf",
- "cc34bd3135d7cafc3cb6e3f6e7cb6896c98277bad52877a952ddbd2ffe222e01",
- "b90efdc848f5386d5250b6fb233ce380cf6cc299f497cfa1d2feaef22f87c9d1"
- ]
- },
- "ssd": {
- "repo": "segmind/SSD-1B",
- "file_256": [
- "7cb406ec0662e91570a79f3c4fb8f0ea5325bffe6af5d9382edae838698f72bd",
- "1895a00bfc769a00b0c0c43a95e433e79e9db8a85402b45a33e8448785bde94d",
- "0bf1ce6b065a6b969ab02dc8e8fa21eb20ee189b10935c49ce68c77a7e432c1c",
- "02ed8ebd0ed55aec686fcf20946d7a1659a31f9f8d9c3798cd254ba6b67434ca",
- "40d8ea9159f3e875278dacc7879442d58c45850cf13c62f5e26681061c51829a"
- ],
- "layer_256": [
- "52267d5d327a2ba92c7a14261a9d081df621b8366819b1bb3a47d130523a813c",
- "b365a3631c6c74532f3a571c84c68e088be35496d35be1e932031713ddd2a2f4",
- "52267d5d327a2ba92c7a14261a9d081df621b8366819b1bb3a47d130523a813c",
- "89f86d9c846495870416b4945b6a46a517f28405e5bab666feb4057f012340be",
- "535b47e9b70da6494878ca6d45af3f2e201b7f17748432911c12232e586855e6"
- ],
- "layer_b3": [
- "c074dc38e8ec836816b91cbcc2ca17f80d6106de8d196d416ef9a27c8837ee45",
- "1d6c0216da57fe98e7ad29e9653566725f5b2a87845fdbdcda257b3be817b5f4",
- "c074dc38e8ec836816b91cbcc2ca17f80d6106de8d196d416ef9a27c8837ee45",
- "89f86d9c846495870416b4945b6a46a517f28405e5bab666feb4057f012340be",
- "535b47e9b70da6494878ca6d45af3f2e201b7f17748432911c12232e586855e6"
- ]
- }
- },
- "info.unet.stable-diffusion-xl-refiner-1": {
- "*": {
- "repo": "stabilityai/stable-diffusion-xl-refiner-1.0",
- "pkg": {
- "0": {
- "diffusers": "StableDiffusionXLImg2ImgPipeline"
- },
- "1": {
- "diffusers": "DiffusionPipeline",
- "generation": {
- "num_inference_steps": 40,
- "denoising_end": 0.8
- }
- }
- },
- "identifiers": [
- "conditioner.embedders.0.model.transformer.resblocks.9.mlp.c_proj.bias"
- ],
- "file_256": [
- "54f9cd2f2daf3aeec0b2708fa3dbc0e84e4f8ddd1ddead42e5bc60c6572c989f",
- "7440042bbdc8a24813002c09b6b69b64dc90fded4472613437b7f55f9b7d9c5f",
- "3ea0376dcf065eaefd27806394a90e310001b1a71d4f1cf1f655e86c0e566ffe"
- ],
- "layer_b3": [
- "6281355dbb37e5769c9460ae0ac75506d89932e2f97b09d9ade32ecf191e75ba",
- "afb0639aae2eb65577c12d4a30cf7c9b3620ae63ba64a8fa632b58608c8a7a2e",
- "669046014b69d98ab0f6fbb59547644436e0275f8b638f467ce2a873c3313683"
- ],
- "layer_256": [
- "bb9eadbfabb52c0d8645783525a3fa70b59e9d7d09d5290d742a303262e793a2",
- "c5adb56fe51343af2c3d493eb9f41515c204bd91eb9f40b983d45f70a1fa3b6d",
- "1f838e39ed6e916258aee6990b72c09b34aa8eb3b5342234a497b8852b3df1c6"
- ],
- "tasks": [
- "StableDiffusionXLControlNetImg2ImgPipeline",
- "StableDiffusionXLControlNetInpaintPipeline",
- "StableDiffusionXLControlNetPAGImg2ImgPipeline",
- "StableDiffusionXLControlNetPAGPipeline",
- "StableDiffusionXLControlNetPipeline",
- "StableDiffusionXLControlNetUnionImg2ImgPipeline",
- "StableDiffusionXLControlNetUnionInpaintPipeline",
- "StableDiffusionXLControlNetUnionPipeline",
- "StableDiffusionXLImg2ImgPipeline",
- "StableDiffusionXLInpaintPipeline",
- "StableDiffusionXLPAGImg2ImgPipeline",
- "StableDiffusionXLPAGInpaintPipeline",
- "StableDiffusionXLPAGPipeline",
- "StableDiffusionXLPipeline"
- ],
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "text_encoder_2": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "stable-diffusion-xl-refiner-1"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "stable-diffusion-xl-refiner-1"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "feature_extractor": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.unet.sdxl-pix2pix-768": {
- "*": {
- "repo": "diffusers/sdxl-instructpix2pix-768",
- "pkg": {
- "0": {
- "diffusers": "StableDiffusionXLInstructPix2PixPipeline"
- }
- },
- "tasks": [
- "StableDiffusionXLControlNetImg2ImgPipeline",
- "StableDiffusionXLControlNetInpaintPipeline",
- "StableDiffusionXLControlNetPAGImg2ImgPipeline",
- "StableDiffusionXLControlNetPAGPipeline",
- "StableDiffusionXLControlNetPipeline",
- "StableDiffusionXLControlNetUnionImg2ImgPipeline",
- "StableDiffusionXLControlNetUnionInpaintPipeline",
- "StableDiffusionXLControlNetUnionPipeline",
- "StableDiffusionXLImg2ImgPipeline",
- "StableDiffusionXLInpaintPipeline",
- "StableDiffusionXLPAGImg2ImgPipeline",
- "StableDiffusionXLPAGInpaintPipeline",
- "StableDiffusionXLPAGPipeline",
- "StableDiffusionXLPipeline"
- ],
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "text_encoder_2": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "sdxl-pix2pix-768"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "sdxl-pix2pix-768"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ]
- }
- }
- },
- "info.dit.allegro": {
- "*": {
- "repo": "rhymes-ai/Allegro",
- "pkg": {
- "0": {
- "precision": "ops.precision.bfloat.B16",
- "generation": {
- "guidance_scale": 7.5,
- "max_sequence_length": 512,
- "num_inference_steps": 100
- }
- }
- },
- "file_256": [
- "6927dcc812841c1da549bf11c97ddf30532aee0e708a6642fa64cf8e0dfcdef7"
- ],
- "layer_b3": [
- "8b20714a6af89ea4bf4ada1f805c5b9d529ef136c229e9b75392242d62d80c3e"
- ],
- "layer_256": [
- "9e44e6c919dc71c24a193641e6265cd9983a2a773b9bbaf527c10ac4837b29fd"
- ]
- }
- },
- "info.dit.amused-512": {
- "*": {
- "repo": "amused/amused-512",
- "pkg": {
- "0": {
- "diffusers": "AmusedInpaintPipeline"
- }
- },
- "pipe_names": {
- "vqvae": [
- "VQModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "amused-512"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "transformer": [
- "UVit2DModel"
- ],
- "scheduler": [
- "ops.scheduler.amused",
- "scheduler"
- ]
- }
- }
- },
- "info.lora.animatediff-motion-adapter-v1-5-2": {
- "*": {
- "repo": "guoyww/animatediff-motion-adapter-v1-5-2",
- "pkg": {
- "0": {
- "diffusers": "AnimateDiffVideoToVideoPipeline"
- }
- }
- }
- },
- "info.lora.animatelcm": {
- "*": {
- "repo": "wangfuyun/AnimateLCM",
- "pkg": {
- "0": {
- "diffusers": "MotionAdapter"
- }
- }
- }
- },
- "info.lora.animatediff-motion-adapter-sdxl": {
- "*": {
- "repo": "a-r-r-o-w/animatediff-motion-adapter-sdxl-beta",
- "pkg": {
- "0": {
- "diffusers": "AnimateDiffSDXLPipeline"
- }
- }
- }
- },
- "info.controlnet.animatediff-sparsectrl-scribble": {
- "*": {
- "repo": "guoyww/animatediff-sparsectrl-scribble",
- "pkg": {
- "0": {
- "diffusers": "SparseControlNetModel"
- }
- }
- }
- },
- "info.controlnet.animatelcm": {
- "*": {
- "repo": "wangfuyun/AnimateLCM",
- "pkg": {
- "0": {
- "diffusers": "ControlNetModel"
- }
- }
- }
- },
- "info.dit.bria-3": {
- "*": {
- "repo": "briaai/BRIA-3.2",
- "pkg": {
- "0": {
- "diffusers": "BriaPipeline"
- }
- },
- "pipe_names": {
- "transformer": [
- "BriaTransformer2DModel"
- ],
- "scheduler": [
- [
- "ops.scheduler.euler",
- "discrete"
- ],
- [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ]
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "bria-3"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "feature_extractor": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.flux2-dev": {
- "*": {
- "repo": "black-forest-labs/FLUX.2-dev",
- "pkg": {
- "0": {
- "diffusers": "Flux2Pipeline"
- }
- },
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "text_encoder": [
- "info.vit.mistral-3-2503",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "flux2-dev"
- ],
- "transformer": [
- "Flux2Transformer2DModel"
- ]
- }
- }
- },
- "info.dit.flux1-schnell": {
- "*": {
- "repo": "black-forest-labs/FLUX.1-schnell",
- "pkg": {
- "0": {
- "precision": "ops.precision.bfloat.B16",
- "generation": {
- "height": 1024,
- "width": 1024,
- "guidance_scale": 0.0,
- "num_inference_steps": 4,
- "max_sequence_length": 256
- }
- },
- "1": {
- "mflux": "flux.flux.Flux1",
- "generation": {
- "height": 1024,
- "width": 1024,
- "num_inference_steps": 4
- }
- }
- },
- "identifiers": [
- "double_blocks.12.txt_mod.lin.weight",
- "add_q_proj.weight",
- "single_transformer_blocks.9.norm.linear.weight"
- ],
- "file_256": [
- "9403429e0052277ac2a87ad800adece5481eecefd9ed334e1f348723621d2a0a",
- "9b633dbe87316385c5b1c262bd4b5a01e3d955170661d63dcec8a01e89c0d820"
- ],
- "layer_b3": [
- "c65ba812ce3ce056eb1585673f62fb896afe6ec049faaf00a97bc35c9a398c44",
- "03049273329fc7db2da10de6d3eb27cb03f190e379c0556cc97b3f0f29001d0c",
- "483c4be8ef031c56bc8450d1a3cfbe54445ed317bcd801be5abe89f1d3c48790"
- ],
- "layer_256": [
- "79c07e339865fe9e22c80f723d728c778130acd07a330339c68218b92bb7b3b8",
- "ef5c9cd1ebe6e3be5e8b1347eca0a6f0b138986c71220a7f1c2c14f29d01beed",
- "27bc71eca2d2ff7459165acc12010230911db7709a4f6a5c255befedfa6b1649"
- ],
- "tasks": [
- "Image",
- "Redux",
- "Kontext",
- "Depth",
- "Fill",
- "ConceptAttention",
- "ControlNet",
- "CavTon",
- "IC-Edit"
- ]
- },
- "shuttle-3-aesthetic": {
- "repo": "shuttleai/shuttle-3.1-aesthetic",
- "pkg": {
- "2": {
- "diffusers": "DiffusionPipeline",
- "generation": {
- "guidance_scale": 3.5,
- "num_inference_steps": 4
- }
- }
- },
- "file_256": [
- "176871da1d5d2d511a52ae9b0dd70faa1f5d1b7734b7e33ed6b4bffa52050e0d",
- "4b80d37681eaed07b7f5b3825a392da929d1620933ede7c2749ef3613cc53f42"
- ],
- "layer_256": [
- "e5d95de314cbfc49b79479118a1ac0b90fc95ccd6bb1a5c95803996d6cebf8fe",
- "d299e8ea4a605917ab98a4a7330d4d398b4ae295efbf458eeeceb5ff1bd7959a"
- ],
- "layer_b3": [
- "ff422d1734abf33366e87bbf44267dc6096c5d499e695287c35558174877412e",
- "5ad8034eac6b82d842311437101c52b5d35826ce34994940d9e667e702a0d45c"
- ]
- },
- "shuttle-3-diffusion": {
- "repo": "shuttleai/shuttle-3-diffusion",
- "pkg": {
- "2": {
- "diffusers": "DiffusionPipeline",
- "generation": {
- "guidance_scale": 3.5,
- "num_inference_steps": 4
- }
- }
- },
- "file_256": [
- "a5b04df4072698395387c21e8da0176d03f6557e0c38ff1dd3bf469ebab9d0fd",
- "a91b46de2055b3511ee87523b57862648856e8c00100161d5b520543a7302755",
- "23a77c86189d5934da48bf44bb871cf80ba99177ffd3fd5272cdecb208c8b8be",
- "d3782d5a8f6e82c6676e8e26d54020934ada589d2aceb17fc5ca604b1bd55da8"
- ],
- "layer_256": [
- "14d0e1b573023deb5a4feaddf85ebca10ab2abf3452c433e2e3ae93acb216443",
- "7ce8d449b32a9c959431ade729b513ee7a6457f11e1c13e3ef04dd8db3494621",
- "9c3395f67a3d844483b77f0ddd5e2ea64b61732fa9d9da19845bb8ae574c1f8c"
- ],
- "layer_b3": [
- "4dd3174edf6b680ce9daf3de643e33ae2c4f09a4d5968da61ea48885f3a193c0",
- "9fdf191b2c58b2a6e190396e12314530593dca4f2a2bee389ec5175da5e52af8",
- "ad203ad6a00d8b1315337e34069e7c41016ea407469a536de8ad6807042017fd"
- ]
- },
- "shuttle-jaguar": {
- "repo": "shuttleai/shuttle-jaguar",
- "pkg": {
- "2": {
- "diffusers": "DiffusionPipeline",
- "generation": {
- "guidance_scale": 3.5,
- "num_inference_steps": 4
- }
- }
- },
- "file_256": [
- "dcbc4f2470b177eed12c7d7515c0e7342515a849ebd31a50c8d8d43913d7bd32",
- "26a7aa64c0798a3549e1d767932da0a7fb82b49f8edcbdcde804a20d9ed1478f"
- ],
- "layer_b3": [
- "9906c29933d0c33a6ee8d9712f33fa8bd4b35b46a1c7b565ae48832b757dd980",
- "89c453c4bf99220405687eed984dace4492bdae1b6fb08f3d9629145b1a11672"
- ]
- }
- },
- "info.controlnet.flux1-canny-dev": {
- "*": {
- "repo": "black-forest-labs/FLUX.1-Canny-dev",
- "pkg": {
- "0": {
- "diffusers": "FluxControlPipeline"
- }
- }
- }
- },
- "info.controlnet.flux1-depth-dev": {
- "*": {
- "repo": "black-forest-labs/FLUX.1-Depth-dev",
- "pkg": {
- "0": {
- "diffusers": "FluxControlInpaintPipeline"
- }
- }
- }
- },
- "info.controlnet.flux1-dev-controlnet-canny": {
- "*": {
- "repo": "InstantX/FLUX.1-dev-controlnet-canny",
- "pkg": {
- "0": {
- "diffusers": "FluxControlNetModel"
- }
- }
- }
- },
- "info.controlnet.flux1-dev-controlnet-canny-alpha": {
- "*": {
- "repo": "InstantX/FLUX.1-dev-Controlnet-Canny-alpha",
- "pkg": {
- "0": {
- "diffusers": "FluxControlNetModel"
- }
- }
- }
- },
- "info.dit.flux1-fill-dev": {
- "*": {
- "repo": "black-forest-labs/FLUX.1-Fill-dev",
- "pkg": {
- "0": {
- "diffusers": "FluxFillPipeline"
- }
- },
- "tasks": [
- "FluxControlImg2ImgPipeline",
- "FluxControlInpaintPipeline",
- "FluxControlNetImg2ImgPipeline",
- "FluxControlNetInpaintPipeline",
- "FluxControlNetPipeline",
- "FluxControlPipeline",
- "FluxImg2ImgPipeline",
- "FluxInpaintPipeline",
- "FluxKontextPipeline",
- "FluxPipeline"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "flux1-fill-dev"
- ],
- "text_encoder_2": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "flux1-fill-dev"
- ],
- "transformer": [
- "FluxTransformer2DModel"
- ]
- }
- }
- },
- "info.dit.flux1-kontext-dev": {
- "*": {
- "repo": "black-forest-labs/FLUX.1-Kontext-dev",
- "pkg": {
- "0": {
- "diffusers": "FluxKontextInpaintPipeline"
- }
- },
- "tasks": [
- "FluxControlImg2ImgPipeline",
- "FluxControlInpaintPipeline",
- "FluxControlNetImg2ImgPipeline",
- "FluxControlNetInpaintPipeline",
- "FluxControlNetPipeline",
- "FluxControlPipeline",
- "FluxImg2ImgPipeline",
- "FluxInpaintPipeline",
- "FluxKontextPipeline",
- "FluxPipeline"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "flux1-kontext-dev"
- ],
- "text_encoder_2": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "flux1-kontext-dev"
- ],
- "transformer": [
- "FluxTransformer2DModel"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "feature_extractor": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.prx-512-t2i-sft": {
- "*": {
- "repo": "Photoroom/prx-512-t2i-sft",
- "pkg": {
- "0": {
- "diffusers": "PRXPipeline"
- }
- },
- "pipe_names": {
- "transformer": [
- "PRXTransformer2DModel"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "text_encoder": [
- "info.stst.t5gemma-prefixlm",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "prx-512-t2i-sft"
- ],
- "vae": [
- "AutoencoderKL",
- [
- "info.vae.dc",
- "sana-1024px-bf16"
- ],
- "NoneType"
- ]
- }
- }
- },
- "info.unet.audioldm-s-v2": {
- "*": {
- "repo": "cvssp/audioldm-s-full-v2",
- "pkg": {
- "0": {
- "diffusers": "AudioLDMPipeline"
- }
- },
- "file_256": [
- "fc30d5b5a3bb8d08672736efb1fff10755ba7024dace39b2dcb579a105aa2a5a"
- ],
- "layer_b3": [
- "82fbcc553c1ad770d28fd1866b935249c5ebfbf75f3166ae823e1bc6ef39a95a"
- ],
- "layer_256": [
- "d076446a58a36bf436e37444679d62bcf2f45689d4aa3d799b3fe801c71ed2c8"
- ],
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.clap-htsat-fused",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "vocoder": [
- "info.stst.speecht5-asr",
- "*"
- ]
- }
- }
- },
- "info.unet.audioldm2": {
- "*": {
- "repo": "cvssp/audioldm2",
- "pkg": {
- "0": {
- "precision": "ops.precision.float.F16",
- "generation": {
- "num_inference_steps": 200,
- "audio_length_in_s": 10.0
- }
- }
- },
- "file_256": [
- "359a5ffb89a844beb2fcfac584aae2cd7cd6e87c3ab1ec4e892ef45d91db77c2"
- ],
- "layer_b3": [
- "eac241273f9f30982fc04aa88b4dc1c38b533430956a55b9ed4d3e5c717ec962"
- ],
- "layer_256": [
- "ab109d01b43788063802f00c6ecab024c830ea58d668f5c2df9e3ae5b87d86cb"
- ]
- }
- },
- "info.unet.blipdiffusion": {
- "*": {
- "repo": "Salesforce/blipdiffusion",
- "pkg": {
- "0": {
- "diffusers": "BlipDiffusionPipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "blipdiffusion"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "scheduler": [
- "ops.scheduler.pndm",
- "scheduler"
- ],
- "qformer": [
- "info.vit.blip2-opt",
- "*"
- ],
- "image_processor": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.chroma": {
- "*": {
- "repo": "lodestones/Chroma",
- "pkg": {
- "0": {
- "diffusers": "ChromaPipeline"
- },
- "1": {
- "generation": {
- "neg_text": "",
- "num_steps": "28",
- "latent_size": [
- 64,
- 64
- ]
- }
- }
- },
- "file_256": [
- "53adcb3b6b6005758d40e2d8058b044ed4892bc8616efb7a62cc2dd384be07de",
- "2c41e8a9831f3be1eaff2c2ed590abb62e4534e814f7ec58a5fd74ff71dc2036",
- "0a7b2d9699dbd22b3744ee2692900cabcfb731a43dac13729c33807f2bb7c9f6",
- "6ddc9e2bbe3376ab5ee9f10b2d947f127b6bf6f879f06f316a2208bb0da357b8"
- ],
- "layer_b3": [
- "15e227ced8a89c41abaa9cc44f84dfffdf5ead0c626035e5a2dde2bbb0935479"
- ],
- "layer_256": [
- "a4daa6ff6f45ca70c738adb8c19bc3b6f228df931e6bf2a3394463e4dd7ec882"
- ],
- "tasks": [
- "ChromaPipeline"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "chroma"
- ],
- "transformer": [
- "ChromaTransformer2DModel"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "feature_extractor": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- },
- "chroma1-hd": {
- "repo": "lodestones/Chroma1-HD",
- "pkg": {
- "0": {
- "generation": {
- "num_inference_steps": 40
- }
- }
- },
- "file_256": [
- "d845553f11e6afe8139c41ca73678f9f03eab2e68d2e1c6f03ae19509a4d546",
- "1b2993a44e63b2250496f69edce643bac2fb79833cf92ba8dd95cbd764d970c7",
- "2dd46f08516246df1f582047cc09268ce4f747357baff05b13148e71519029fc"
- ]
- },
- "chroma1-flash": {
- "repo": "lodestones/Chroma1-Flash",
- "pkg": {
- "0": {
- "diffusers": "ChromaPipeline",
- "generation": {
- "num_inference_steps": 8,
- "guidance_scale": 1.0,
- "num_images_per_prompt": 1
- }
- }
- },
- "file_256": [
- "2c0c7d908d04418a48b453c293237a9826d54472cf0ba76e28697d1309d1021b",
- "c88f6794753ba23e8f6bf8c84cf220daa35a6aa16d54ea0c3e0136f52e5da7e1",
- "c759d67ca3ef50a9a1c242e3291c57f406646f226a95f43f66577996494986db"
- ],
- "tasks": [
- "ChromaPipeline"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "chroma"
- ],
- "transformer": [
- "ChromaTransformer2DModel"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "feature_extractor": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.chroma1-hd": {
- "*": {
- "repo": "lodestones/Chroma1-HD",
- "pkg": {
- "0": {
- "diffusers": "ChromaImg2ImgPipeline"
- }
- },
- "tasks": [
- "ChromaPipeline"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "chroma1-hd"
- ],
- "transformer": [
- "ChromaTransformer2DModel"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "feature_extractor": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.cogvideox": {
- "*": {
- "repo": "zai-org/CogVideoX-2b",
- "pkg": {
- "0": {
- "precision": "ops.precision.float.F16",
- "generation": {
- "num_videos_per_prompt": 1,
- "num_inference_steps": 50,
- "num_frames": 49,
- "guidance_scale": 6
- }
- }
- },
- "file_256": [
- "8fbb6a5e67c70885a8ed8e33df144ac61253e45977be5035fa18cfdf77d386c7"
- ],
- "layer_b3": [
- "1db3439649b5362448455fb2ed6ebde0c3b973655a206832731149757ad165bb"
- ],
- "layer_256": [
- "edd6bd51f1236f528ff8d32dc754f0b86cfac901b800642ea497358156dc00bd"
- ]
- }
- },
- "info.controlnet.cogvideox-fun-v-pose": {
- "*": {
- "repo": "alibaba-pai/CogVideoX-Fun-V1.1-5b-Pose",
- "pkg": {
- "0": {
- "diffusers": "CogVideoXFunControlPipeline"
- }
- }
- }
- },
- "info.dit.cogvideox-i2v": {
- "*": {
- "repo": "zai-org/CogVideoX-5b-I2V",
- "pkg": {
- "0": {
- "diffusers": "CogVideoXImageToVideoPipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "cogvideox-i2v"
- ],
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "vae": [
- "info.vae.cogvideox",
- "cogvideox-i2v"
- ],
- "transformer": [
- "CogVideoXTransformer3DModel"
- ],
- "scheduler": [
- [
- "ops.scheduler.cogvideoxddim",
- "scheduler"
- ],
- [
- "ops.scheduler.cogvideoxdpm",
- "scheduler"
- ]
- ]
- }
- }
- },
- "info.dit.cogview3": {
- "*": {
- "repo": "zai-org/CogView3-Plus-3B",
- "pkg": {
- "0": {
- "precision": "ops.precision.float.F16",
- "generation": {
- "guidance_scale": 7.0,
- "num_images_per_prompt": 1,
- "num_inference_steps": 50,
- "width": 1024,
- "height": 1024
- }
- }
- }
- }
- },
- "info.dit.cogview4": {
- "*": {
- "repo": "zai-org/CogView4-6B",
- "pkg": {
- "0": {
- "diffusers": "CogView4Pipeline"
- }
- },
- "tasks": [
- "CogView4ControlPipeline",
- "CogView4Pipeline"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "cogview4"
- ],
- "text_encoder": [
- "info.stst.glm-4-chat",
- "*"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "transformer": [
- "CogView4Transformer2DModel"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.controlnet.cogview4-control": {
- "*": {
- "repo": "zai-org/CogView4-6B-Control",
- "pkg": {
- "0": {
- "diffusers": "CogView4ControlPipeline"
- }
- }
- }
- },
- "info.dit.pre-trianed": {
- "*": {
- "repo": "model_id, revision=\"diffusers/base/pre-trianed",
- "pkg": {
- "0": {
- "diffusers": "Cosmos2_5_PredictBasePipeline"
- }
- },
- "pipe_names": {
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "pre-trianed"
- ],
- "transformer": [
- "CosmosTransformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.unipc",
- "multistep"
- ],
- "safety_checker": [
- "CosmosSafetyChecker"
- ]
- }
- }
- },
- "info.dit.cosmos-predict2-text2image": {
- "*": {
- "repo": "nvidia/Cosmos-Predict2-2B-Text2Image",
- "pkg": {
- "0": {
- "diffusers": "Cosmos2TextToImagePipeline"
- }
- },
- "file_256": [
- "7fbd20dae97cc26a55c7aff3024bc84e554cff8f69966c725a24c8238c5431ec",
- "6d211f1c14cd793156da3a840dd5462ae072046fcd6f1dc64c613a5343bfe896",
- "95a2b32ad31a271eb64d35985c7ea46f1448528af70932eb1f35d57f90c27be2",
- "344e67faf333b7849fa94290c9028bdd5e40eb19700754c833cda0423bc10ad0",
- "ce15ef565cbb9ef414a6f7a396c455d82d5f762d2174493da87fe009c5fee75b",
- "94aa9f2b59330b88e97b6b439e2f206a51c86e6b154fb66d43ed149bfac23cf8",
- "636de5388da249130d51752991a1792b90af31cbf43f021ae07f75756ee2d79a",
- "472c5e4cf5056a1a59085addb5a86d801de39bf5e000d253f206a7f63c710029",
- "663266ace67c22529c3b6bfa0e8bd69f0ba6e683f5f02b8e3da50881057ba142",
- "21a674b314c1364d0dbb3712f5ed702996a7b7403c452835cac22709e01c2f77",
- "3bf2df806c6472e039efc9e8d3181163d7faa7b385e61519b7d17d5e9c993a49",
- "1de35e1603c4c30bc80b132ccea15fc0503369caf68290708f17e679e98cd41f",
- "0738e559bbd71f7351ccba34b2b47362a3f829b92f3dbcffeaf1e44b0d52f42c"
- ],
- "layer_b3": [
- "5a18ba14c41c6601dcc1195ca180ac7744357eb15ace39272788bda1a7151e9b",
- "67cc3eaf7987c89cd7ccff13de6bc03e3eec59d260d44486e2367cd946ce6f20",
- "3c6fefa107742488d2e6856714198a762f2fd35c67edd50d4657eaf4b59c7ca3",
- "4e1f90ee1e8959d334c9b1ea2cc5e58d0b8340e271c35f81c8a5ec26e16d9d76",
- "f8171071e828524fcc2806126ad100a2198e450c82c0864c8fe8b358c5cbbfbd",
- "8126101a0207ecfbd741394fd59f306bcb4c492b2a921e0921c426ca7bd38985",
- "c942c5a85ff7cb602d8ca894f5d180c2224e91f0b62c3a21f6a425f9e0e8554b",
- "c8c500de74da879a547875fe1046f62ab18bdfd09c09eb3da723cbc2319cb4e3",
- "c0ac3f67501004e9e9a55d1658402ad97e42bf8a266edf81f6f3bb835ee476b9",
- "84f5926eb4e11d826815682b076ed7d3bba4c86520859be80aa1ef92c72b26a4",
- "1d4375aab5548708559b0fde150754a2163cd211eb20a5471e17afaeeb26e082",
- "68bd8982f59c60d69c301d16dfb5a60f5d43d66c0b60138d48a22f5ded598e7b",
- "c3e9a10cad7aebf979072092008be6e2815d03d28cbf316c15e8daf22116bd7d"
- ],
- "layer_256": [
- "38f2a75eab667c0cc85f3946a23ca6dc2278438c25a9f93aaaa9f79c3808e180",
- "ee8434a5e9bc6fa07199de2d0c69fb87f7922c31792bafd13f527c9d92fecb0c",
- "2f8382657babb4d0ae4f8e425ae33b21ad71deb6ba457fd6734f05208d52e06a",
- "34b181a8291b571857cdbf67ac0081fea594a2f223bf20bd2fc8b0c889e9602d",
- "d198c412b972e381acfb812304fa98ed0d97a2f072ddc195cd9a1eb83b1d8146",
- "79580a13aff9859e67b0a9f4f8893236cdcfa58c3d43770641aaac8daee55a94",
- "cfd48c7ad71c913fa8768167ed0c2ee8c207311b22b1e5a8761369b5a780e8d6",
- "da91362ad85d4d2e80a2cb7a55e4ae0e52c9eef8b437a95894ce5ab75d36568c",
- "15f84001f5205b6dd8c6f1334cb51c46f6171c7795fb2a557ea16b874f0c71e5",
- "5d29179ad15a15d2561defcdda66f1d1e4d065c1e0738f9cba4db5b68b93d2ea",
- "7ec489d1e461f5fb2af627b68034ca57f19c516aeccbc5d188b3bd27e3353a15",
- "c8dc42fe7b411d746ebdf86286b91cd6893c5f028076b8fe4103f7ea8e1d8833",
- "86df7c095aee01588e961438f322b85ca0100a9e440b8a2b6c724e00f748d8b5"
- ],
- "pipe_names": {
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "cosmos-predict2-text2image"
- ],
- "transformer": [
- "CosmosTransformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "safety_checker": [
- "CosmosSafetyChecker"
- ]
- }
- }
- },
- "info.dit.cosmos-predict2-video2world": {
- "*": {
- "repo": "nvidia/Cosmos-Predict2-2B-Video2World",
- "pkg": {
- "0": {
- "diffusers": "Cosmos2VideoToWorldPipeline"
- }
- },
- "pipe_names": {
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "cosmos-predict2-video2world"
- ],
- "transformer": [
- "CosmosTransformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "safety_checker": [
- "CosmosSafetyChecker"
- ]
- }
- }
- },
- "info.dit.cosmos-1-diffusion-text2world": {
- "*": {
- "repo": "nvidia/Cosmos-1.0-Diffusion-7B-Text2World",
- "pkg": {
- "0": {
- "diffusers": "CosmosTextToWorldPipeline"
- }
- },
- "pipe_names": {
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "cosmos-1-diffusion-text2world"
- ],
- "transformer": [
- "CosmosTransformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "cosmos-1-diffusion-video2world"
- ],
- "scheduler": [
- "ops.scheduler.edmeuler",
- "scheduler"
- ],
- "safety_checker": [
- "CosmosSafetyChecker"
- ]
- }
- }
- },
- "info.dit.cosmos-1-diffusion-video2world": {
- "*": {
- "repo": "nvidia/Cosmos-1.0-Diffusion-7B-Video2World",
- "pkg": {
- "0": {
- "diffusers": "CosmosVideoToWorldPipeline"
- }
- },
- "pipe_names": {
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "cosmos-1-diffusion-video2world"
- ],
- "transformer": [
- "CosmosTransformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "cosmos-1-diffusion-video2world"
- ],
- "scheduler": [
- "ops.scheduler.edmeuler",
- "scheduler"
- ],
- "safety_checker": [
- "CosmosSafetyChecker"
- ]
- }
- }
- },
- "info.unet.if-ii-l-v1": {
- "*": {
- "repo": "DeepFloyd/IF-II-L-v1.0",
- "pkg": {
- "0": {
- "diffusers": "IFSuperResolutionPipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "if-ii-l-v1"
- ],
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "scheduler": [
- "ops.scheduler.ddpm",
- "scheduler"
- ],
- "image_noising_scheduler": [
- "ops.scheduler.ddpm",
- "scheduler"
- ]
- }
- }
- },
- "info.dit.easyanimatev5-zh": {
- "diffusers": {
- "repo": "alibaba-pai/EasyAnimateV5.1-7b-zh-diffusers",
- "pkg": {
- "0": {
- "diffusers": "EasyAnimatePipeline"
- }
- },
- "pipe_names": {
- "vae": [
- "info.vae.kl",
- "easyanimatev5-zh"
- ],
- "text_encoder": [
- "Qwen2VLForConditionalGeneration",
- [
- "info.art.bert-uncased",
- "*"
- ]
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "easyanimatev5-zh"
- ],
- "transformer": [
- "EasyAnimateTransformer3DModel"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.controlnet.easyanimatev5-zh-control": {
- "diffusers": {
- "repo": "alibaba-pai/EasyAnimateV5.1-12b-zh-Control-diffusers",
- "pkg": {
- "0": {
- "diffusers": "EasyAnimateControlPipeline"
- }
- }
- }
- },
- "info.dit.easyanimatev5-zh-inp": {
- "diffusers": {
- "repo": "alibaba-pai/EasyAnimateV5.1-12b-zh-InP-diffusers",
- "pkg": {
- "0": {
- "diffusers": "EasyAnimateInpaintPipeline"
- }
- },
- "pipe_names": {
- "vae": [
- "info.vae.kl",
- "easyanimatev5-zh"
- ],
- "text_encoder": [
- "Qwen2VLForConditionalGeneration",
- [
- "info.art.bert-uncased",
- "*"
- ]
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "easyanimatev5-zh-inp"
- ],
- "transformer": [
- "EasyAnimateTransformer3DModel"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.dit.hidream-i1": {
- "*": {
- "repo": "HiDream-ai/HiDream-I1-Full",
- "pkg": {
- "0": {
- "diffusers": "HiDreamImagePipeline"
- }
- },
- "file_256": [
- "3cb3f6d77a3fce19b90fa7f66da0cbe997b0785a38a788b559290d3062f6fd26"
- ],
- "layer_b3": [
- "612eb9b2676a3e7b28b10aae045a97a95de2a399fe3801c8f6369589c3a832a6"
- ],
- "layer_256": [
- "78fbfb7fddb9ccbdf91f22b0c3d304cbf0cc7305dbccb216982233849ec727df"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "hidream-i1"
- ],
- "text_encoder_2": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "hidream-i1"
- ],
- "text_encoder_3": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer_3": [
- "info.encoder.tokenizer",
- "hidream-i1"
- ],
- "text_encoder_4": [
- "info.stst.llama-2-hf",
- "*"
- ],
- "tokenizer_4": [
- "info.encoder.tokenizer",
- "hidream-i1"
- ],
- "transformer": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.hunyuandit-v1": {
- "diffusers": {
- "repo": "tencent-hunyuan/hunyuandiT-v1.2-diffusers",
- "pkg": {
- "0": {
- "precision": "ops.precision.float.F16"
- }
- },
- "identifiers": [
- "extra_embedder",
- "model.blocks",
- "skip_norm.weight"
- ],
- "file_256": [
- "4fb84f84079cda457d171b3c6b15d1be95b5a3e5d9825703951a99ddf92d1787",
- "e01db5e129e8ca1117e9cf473fc5a2b096949f03ab90048aeabbc328de7ec800",
- "8af691cadb78047d55721259355d708e87ddbba1b7845df9377d9a5ae917b45d"
- ],
- "layer_b3": [
- "aead6b61b17ebc77c4c186a4b82c193f11ec267b20d909726422ee9852e2e0b2",
- "885a056b94f6f9844c0660be489844d63bb74cc13316f441d10968fff3dd3120",
- "390d951cbdda6e2cffb690031b60f02921624651534c2effaaa7d68ab476c700"
- ],
- "layer_256": [
- "d4842ce2b7f927203326b25ff4d6738ec9a8b95327f06791c387e4a351ed6ed0",
- "5af943f96f5dc9fecb1e92fe2b1fa17c94dd6947690201f4a5ee1a4a2721a68e",
- "4a1f2b8234fa4336e263842e042d42e8d64d8a4d3941d9c0c78366b50303950c"
- ]
- }
- },
- "info.dit.hunyuanvideo": {
- "*": {
- "repo": "hunyuanvideo-community/HunyuanVideo",
- "pkg": {
- "0": {
- "diffusers": "HunyuanVideoPipeline"
- }
- },
- "file_256": [
- "bdb957b35585ea74ae42ca92865a68fa1bf1ebc6c5b7e686a889e5c977dc24c7"
- ],
- "layer_b3": [
- "d31c56b4c9444d4c2f1b10120fe964e0956f6b8c7e7c1e4cc5a1f37406fc49f5"
- ],
- "layer_256": [
- "fe741fdfd163bcb1e0ed81d80f79ac3576dbf6e6740674efadfeff782a48bed4"
- ],
- "pipe_names": {
- "text_encoder": [
- "info.stst.llama-2-hf",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "hunyuanvideo"
- ],
- "transformer": [
- "HunyuanVideoTransformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "hunyuanvideo-i2v"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "text_encoder_2": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "hunyuanvideo"
- ]
- }
- }
- },
- "info.dit.hunyuanvideo-i2v": {
- "*": {
- "repo": "hunyuanvideo-community/HunyuanVideo-I2V",
- "pkg": {
- "0": {
- "diffusers": "HunyuanVideoImageToVideoPipeline"
- }
- },
- "pipe_names": {
- "text_encoder": [
- "info.vit.llava",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "hunyuanvideo-i2v"
- ],
- "transformer": [
- "HunyuanVideoTransformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "hunyuanvideo-i2v"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "text_encoder_2": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "hunyuanvideo-i2v"
- ],
- "image_processor": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.hunyuanvideo-1-480p-t2v": {
- "*": {
- "repo": "hunyuanvideo-community/HunyuanVideo-1.5-480p_t2v",
- "pkg": {
- "0": {
- "diffusers": "HunyuanVideo15Pipeline"
- }
- },
- "pipe_names": {
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "hunyuanvideo-1-480p-t2v"
- ],
- "transformer": [
- "HunyuanVideo15Transformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "hunyuanvideo-i2v"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "text_encoder_2": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "hunyuanvideo-1-480p-t2v"
- ],
- "guider": [
- "ClassifierFreeGuidance"
- ]
- }
- }
- },
- "info.dit.hunyuanvideo-1-480p-i2v": {
- "*": {
- "repo": "hunyuanvideo-community/HunyuanVideo-1.5-480p_i2v",
- "pkg": {
- "0": {
- "diffusers": "HunyuanVideo15ImageToVideoPipeline"
- }
- },
- "pipe_names": {
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "hunyuanvideo-1-480p-i2v"
- ],
- "transformer": [
- "HunyuanVideo15Transformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "hunyuanvideo-i2v"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "text_encoder_2": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "hunyuanvideo-1-480p-i2v"
- ],
- "guider": [
- "ClassifierFreeGuidance"
- ],
- "image_encoder": [
- "SiglipVisionModel"
- ],
- "feature_extractor": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.hunyuanimage-2": {
- "diffusers": {
- "repo": "hunyuanvideo-community/HunyuanImage-2.1-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "HunyuanImagePipeline"
- }
- },
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "hunyuanimage-2"
- ],
- "text_encoder_2": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "hunyuanimage-2"
- ],
- "transformer": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.hunyuanimage-2-refiner": {
- "diffusers": {
- "repo": "hunyuanvideo-community/HunyuanImage-2.1-Refiner-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "HunyuanImageRefinerPipeline"
- }
- },
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "hunyuanimage-2-refiner"
- ],
- "transformer": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.unet.kandinsky-2-1": {
- "prior": {
- "repo": "kandinsky-community/kandinsky-2-1-prior",
- "pkg": {
- "0": {
- "diffusers": "KandinskyPriorPipeline"
- }
- },
- "tasks": [
- "Kandinsky3Img2ImgPipeline",
- "Kandinsky3Pipeline",
- "KandinskyCombinedPipeline",
- "KandinskyImg2ImgCombinedPipeline",
- "KandinskyImg2ImgPipeline",
- "KandinskyInpaintCombinedPipeline",
- "KandinskyInpaintPipeline",
- "KandinskyPipeline",
- "KandinskyV22CombinedPipeline",
- "KandinskyV22Img2ImgCombinedPipeline",
- "KandinskyV22Img2ImgPipeline",
- "KandinskyV22InpaintCombinedPipeline",
- "KandinskyV22InpaintPipeline",
- "KandinskyV22Pipeline"
- ],
- "pipe_names": {
- "prior": [
- "PriorTransformer"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "kandinsky-2-1"
- ],
- "scheduler": [
- "ops.scheduler.unclip",
- "scheduler"
- ],
- "image_processor": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.unet.kandinsky-2-2": {
- "prior": {
- "repo": "kandinsky-community/kandinsky-2-2-prior",
- "pkg": {
- "0": {
- "diffusers": "KandinskyV22PriorPipeline"
- }
- },
- "pipe_names": {
- "prior": [
- "PriorTransformer"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "kandinsky-2-2"
- ],
- "scheduler": [
- "ops.scheduler.unclip",
- "scheduler"
- ],
- "image_processor": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.latte-1": {
- "*": {
- "repo": "maxin-cn/Latte-1",
- "pkg": {
- "0": {
- "diffusers": "LattePipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "latte-1"
- ],
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "transformer": [
- "LatteTransformer3DModel"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ]
- }
- }
- },
- "info.dit.ltx-video": {
- "*": {
- "repo": "Lightricks/LTX-Video",
- "pkg": {
- "0": {
- "diffusers": "LTXImageToVideoPipeline"
- }
- },
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "info.vae.kl",
- "ltx-video"
- ],
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "ltx-video"
- ],
- "transformer": [
- "LTXVideoTransformer3DModel"
- ]
- }
- }
- },
- "info.dit.ltx-video-09": {
- "*": {
- "repo": "Lightricks/LTX-Video-0.9.5",
- "pkg": {
- "0": {
- "diffusers": "LTXConditionPipeline"
- }
- },
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "info.vae.kl",
- "ltx-video"
- ],
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "ltx-video-09"
- ],
- "transformer": [
- "LTXVideoTransformer3DModel"
- ]
- }
- }
- },
- "info.dit.lumina-next-sft": {
- "diffusers": {
- "repo": "Alpha-VLLM/Lumina-Next-SFT-diffusers",
- "pkg": {
- "0": {
- "precision": " ops.precision.bfloat.B16"
- }
- },
- "identifiers": [
- "time_caption",
- "feed_forward"
- ],
- "file_256": [
- "371153b7c7b7a64899d4016970c7cc472039f9c9b21ebe073adf0b8525cdf1bd"
- ],
- "layer_b3": [
- "fa134efd6e9672e7de2965e4895fc58879bd0a6c4fdf9165c278f2748254675f",
- "4d960ec35c53f72f065b94b836bcd923ea6074d38ad49881061f315d62e3c839"
- ],
- "layer_256": [
- "3938a85568d9df186923edf04391d79e89e6199123bc175afb520e0948d1ae05",
- "c0ca51fdea051fcd042bf4b56d32e1e8bb9525a921f2e197f370f101e90527f0"
- ]
- }
- },
- "info.dit.lumina-image-2": {
- "*": {
- "repo": "Alpha-VLLM/Lumina-Image-2.0",
- "pkg": {
- "0": {
- "diffusers": "Lumina2Pipeline"
- }
- },
- "file_256": [
- "132b4d213fdd3cfc14333746fc3eb8bbe6358cd73c3bc95ac4ccec230b97dca3",
- "a7c09ebae62996a8289782161338a3cdba58c11d2d849c50b2d6502e152b0d6d"
- ],
- "layer_b3": [
- "198bde52f09736f1fc650dcdbd0e6b0f6a5ce186582554c1d9ee8ab16ac0feb2",
- "b52807536902cabbf84f99e4fa2f8713fb4ef77e739f06367ee0d486e3222faa"
- ],
- "layer_256": [
- "982893c99860aac8198c2e435cf85f782fce8f10732daf1f2881a26864400a4e",
- "dc937b59892604f5a86ac96936cd7ff09e25f18ae6b758e8014a24c7fa039e91"
- ],
- "tasks": [
- "Lumina2Pipeline"
- ],
- "pipe_names": {
- "transformer": [
- "Lumina2Transformer2DModel"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.stst.gemma2",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "lumina-image-2"
- ]
- }
- },
- "illustrious-lumina-v3": {
- "repo": "OnomaAIResearch/Illustrious-Lumina-v0.03",
- "file_256": [
- "dc6cffcfb0ccfca6332ddb5d2fe25bcb5f496f44b481627f48c42626156fa6a8",
- "2ac549741fa1c6de2d6cd8be06abcdce52d472eeae2439f948e285258b66a214"
- ],
- "layer_256": [
- "39086c199b9ac296dcba53461ba1e113906d91fbc1b12556d92f5cc77ca11f9f",
- "e51ba2ded40f1af5ca6f78c46eed8305fbd87cd6401e9d439837e10d35cc5828"
- ],
- "layer_b3": [
- "a97b4a63e1e7678e8e7154fae55252267bd1f0ba76b03dba622d801644e657ac",
- "aa6c1b2d1971cea3c4ed0963c8d68d4c50db683f8eab9f77f60ea2d04ed6ce5c"
- ]
- }
- },
- "info.dit.lucy-edit-dev": {
- "*": {
- "repo": "decart-ai/Lucy-Edit-Dev",
- "pkg": {
- "0": {
- "diffusers": "LucyEditPipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "lucy-edit-dev"
- ],
- "text_encoder": [
- "info.stst.mt5",
- "*"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.dit.longcat-image": {
- "*": {
- "repo": "meituan-longcat/LongCat-Image",
- "pkg": {
- "0": {
- "diffusers": "LongCatImagePipeline"
- }
- },
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "longcat-image"
- ],
- "text_processor": [
- "Qwen2VLProcessor"
- ],
- "transformer": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.longcat-image-edit": {
- "*": {
- "repo": "meituan-longcat/LongCat-Image-Edit",
- "pkg": {
- "0": {
- "diffusers": "LongCatImageEditPipeline"
- }
- },
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "longcat-image-edit"
- ],
- "text_processor": [
- "Qwen2VLProcessor"
- ],
- "transformer": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.mochi-1": {
- "*": {
- "repo": "genmo/mochi-1-preview",
- "pkg": {
- "0": {
- "diffusers": "MochiPipeline"
- }
- },
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "info.vae.kl",
- "mochi-1"
- ],
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "mochi-1"
- ],
- "transformer": [
- "MochiTransformer3DModel"
- ]
- }
- }
- },
- "info.unet.musicldm": {
- "*": {
- "repo": "ucsd-reach/musicldm",
- "pkg": {
- "0": {
- "generation": {
- "num_inference_steps": 200,
- "audio_length_in_s": 10.0
- }
- }
- },
- "file_256": [
- "853d0ef1d61cbf5d682872322ea8b761ba3d2f85bfbccd58363bd6b2f837268f"
- ],
- "layer_b3": [
- "82fbcc553c1ad770d28fd1866b935249c5ebfbf75f3166ae823e1bc6ef39a95a"
- ],
- "layer_256": [
- "d076446a58a36bf436e37444679d62bcf2f45689d4aa3d799b3fe801c71ed2c8"
- ]
- }
- },
- "info.dit.omnigen-v1": {
- "diffusers": {
- "repo": "Shitao/OmniGen-v1-diffusers",
- "pkg": {
- "0": {
- "diffusers": "OmniGenPipeline"
- }
- },
- "pipe_names": {
- "transformer": [
- "OmniGenTransformer2DModel"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "omnigen-v1"
- ]
- }
- }
- },
- "info.dit.ovis-image": {
- "*": {
- "repo": "AIDC-AI/Ovis-Image-7B",
- "pkg": {
- "0": {
- "diffusers": "OvisImagePipeline"
- }
- },
- "tasks": [
- "OvisImagePipeline"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.stst.qwen3",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "ovis-image"
- ],
- "transformer": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.visualclozepipeline-384": {
- "*": {
- "repo": "VisualCloze/VisualClozePipeline-384",
- "pkg": {
- "0": {
- "diffusers": "VisualClozeGenerationPipeline"
- }
- },
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "visualclozepipeline-384"
- ],
- "text_encoder_2": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "visualclozepipeline-384"
- ],
- "transformer": [
- "FluxTransformer2DModel"
- ]
- }
- }
- },
- "info.lora.pia-condition-adapter": {
- "*": {
- "repo": "openmmlab/PIA-condition-adapter",
- "pkg": {
- "0": {
- "diffusers": "PIAPipeline"
- }
- }
- }
- },
- "info.dit.pixart-xl-2-1024-ms": {
- "*": {
- "repo": "PixArt-alpha/PixArt-XL-2-1024-MS",
- "pkg": {
- "0": {
- "diffusers": "PixArtAlphaPipeline"
- }
- },
- "identifiers": [
- "aspect_ratio",
- "y_embedding",
- "emb.resolution",
- "caption_projection"
- ],
- "file_256": [
- "809a92d52a4a228f381a4b4f4b76051294b73285fb0cbb02f0ad24f9372217a8"
- ],
- "layer_b3": [
- "c5be83545ce9dbc564bcc9fd8fe4157d131347ccfc8f62adc877ec205b20acee"
- ],
- "layer_256": [
- "117225c0e91423746114b23d3e409708ad55c90ff52b21fa7a1c5105d2e935a5"
- ],
- "tasks": [
- "PixArtAlphaPipeline"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "pixart-xl-2-1024-ms"
- ],
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "transformer": [
- "PixArtTransformer2DModel"
- ],
- "scheduler": [
- "ops.scheduler.dpm",
- "multistep"
- ]
- }
- }
- },
- "info.dit.pixart-sigma-xl-2-1024-ms": {
- "*": {
- "repo": "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS",
- "pkg": {
- "0": {
- "diffusers": "PixArtSigmaPipeline"
- }
- },
- "identifiers": [
- "adaln_single",
- "scale_shift_table"
- ],
- "file_256": [
- "c34b520ef473329b945c2a21083cdf1337c5a468d23b3215b65576789bfd0305",
- "2fa4dee9229c02b03163f57bdb8e80c7a5ee364b7161796abe9c05e8dd13f239"
- ],
- "layer_b3": [
- "a199930ff537994872da77391955f0dd52eddd22ab9105388f0c5852f1b8021f",
- "ee6f980c32e98da6885f3e97d3f88d9158031e362cd3a49b20d1e23924b251e3"
- ],
- "layer_256": [
- "e0afd203aff5a1d192e325d0f59361373273d85d138b51768c3f10a75c154dc0",
- "987f3c2ff5d399191e5fd7dd7b1f1f285c197dc8124ad77f05cde7f2fb677a3c"
- ],
- "tasks": [
- "PixArtAlphaPipeline",
- "PixArtSigmaPipeline"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "pixart-sigma-xl-2-1024-ms"
- ],
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "transformer": [
- "PixArtTransformer2DModel"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ]
- }
- }
- },
- "info.dit.sana-1024px-bf16": {
- "diffusers": {
- "repo": "Efficient-Large-Model/Sana_1600M_1024px_BF16_diffusers",
- "pkg": {
- "0": {
- "generation": {
- "height": 1024,
- "width": 1024,
- "guidance_scale": 4.5,
- "num_inference_steps": 20
- },
- "precision": "ops.precision.bfloat.B16"
- }
- },
- "file_256": [
- "b0b50c33be8758713459aa3c760feef6315d4bea31521fb5b8c3e8fdd9841ffe"
- ],
- "layer_b3": [
- "461e3d83dfa7e075ef21e2138ef153922ecfadde3db464b03dff92819f3e86dd"
- ],
- "layer_256": [
- "b928bbcc2ce99d55d21c189e2b1c57498bc313ef5b1457036e356107d567fc4e"
- ]
- }
- },
- "info.controlnet.sana-1024px-controlnet": {
- "diffusers": {
- "repo": "ishan24/Sana_600M_1024px_ControlNetPlus_diffusers",
- "pkg": {
- "0": {
- "diffusers": "SanaControlNetPipeline"
- }
- }
- }
- },
- "info.dit.sana-sprint-1024px": {
- "diffusers": {
- "repo": "Efficient-Large-Model/Sana_Sprint_1.6B_1024px_diffusers",
- "pkg": {
- "0": {
- "diffusers": "SanaSprintPipeline"
- }
- },
- "tasks": [
- "SanaPAGPipeline",
- "SanaPipeline"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "sana-sprint-1024px"
- ],
- "text_encoder": [
- "info.stst.gemma2",
- "*"
- ],
- "vae": [
- "info.vae.dc",
- "sana-1024px-bf16"
- ],
- "transformer": [
- "SanaTransformer2DModel"
- ],
- "scheduler": [
- "ops.scheduler.dpm",
- "multistep"
- ]
- }
- }
- },
- "info.dit.sana-video": {
- "*": {
- "repo": "Efficient-Large-Model/SANA-Video_2B_480p_diffusers",
- "pkg": {
- "0": {
- "diffusers": "SanaImageToVideoPipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "sana-video"
- ],
- "text_encoder": [
- "info.stst.gemma2",
- "*"
- ],
- "vae": [
- [
- "info.vae.dc",
- "sana-1024px-bf16"
- ],
- [
- "info.vae.kl",
- "audioldm-s-v2"
- ]
- ],
- "transformer": [
- "SanaVideoTransformer3DModel"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.unet.shap-e": {
- "*": {
- "repo": "openai/shap-e",
- "pkg": {
- "0": {
- "precision": "ops.precision.float.F16",
- "generation": {
- "num_inference_steps": 64,
- "size": 256,
- "guidance_scale": 15
- }
- }
- }
- }
- },
- "info.dit.stable-audio-open-1": {
- "*": {
- "repo": "stabilityai/stable-audio-open-1.0",
- "pkg": {
- "0": {
- "precision": "ops.precision.float.F16",
- "generation": {
- "num_inference_steps": 200,
- "audio_end_in_s": 10,
- "num_waveforms_per_prompt": 3
- }
- }
- }
- }
- },
- "info.unet.stable-cascade": {
- "prior": {
- "repo": "stabilityai/stable-cascade-prior",
- "pkg": {
- "0": {
- "precision": "ops.precision.bfloat.B16",
- "generation": {
- "negative_prompt": "",
- "num_images_per_prompt": 1,
- "num_inference_steps": 20,
- "guidance_scale": 4.0,
- "width": 1024,
- "height": 1024
- }
- }
- },
- "file_256": [
- "673b3173b037fb5f65b14fde37267390641a36726683de75dcf9df76fce2b866",
- "45c1eb5ce9b69efac891ad459b15c215cd90a986adbbfaf3effd3a89578cbcaf",
- "088ddf1e444abf399007b2da2bac87791df165c69f477994f6b3c745a20904b0",
- "39cec96c7212607f9e526db719bf1df507166d09f4748676c13b0d31cd4adb07",
- "31ffe2f1a3e2351d658fc7d3002a4eca22466a680f7fb3715b1e3768476f9633",
- "dfe24009fc881011f350d08d9d13be13a1a3b3cbfed667435efe0fd419aca099"
- ],
- "layer_b3": [
- "c55c83fa435ed128457f605bf1312e54727996d1c94413fc5ab5b49e9933857c",
- "6fb07ed9fc6ee636e50783802754b3a37bbecfc67037813b616223aeaf6fe877",
- "2ea194240e105c8962923e2baca88cb6a0c826794afc2ef82474301694711d68",
- "3412c8a184805621e4595d57268ced0b5c3c1974cd221bf67b2c908eec4fd61c",
- "53abfb013cfb0e41d0bc7b96bb83e42a4d4c67cb7325f9acf645b02d90efd8fe",
- "34556558f680c183adc2accd493cb9888a98ba853226bbecb07d95eb2055ff4f"
- ],
- "layer_256": [
- "4f5e0a738b963d3d4f8413387a0966ac1ce51f0f985bcbcc124fa221a2fff467",
- "8aa77e732a398b7d0dcd9a35d5682c2b5ab090ae90e915c7c91878abff0284d8",
- "4bbd46ded0916de3108f0da7145a80f5c7acea26ed35b0aaa29af12008352453",
- "415d1f3ecd06416708c1b83ab21e50b39c9d88d19dc33e60b977b7b7061880b9",
- "f678c32815c238e14091f690c8a83c3375c8f7738dc7abff79ff086ed9b59204",
- "17c8da803df7b9bbc8b1d7cc0c44916fea5b5ac0891330c4fdf0326fcd4496cb"
- ],
- "identifiers": [
- "down_blocks.0.2.kv_mapper",
- "previewer",
- "backbone"
- ]
- },
- "decoder": {
- "pkg": {
- "0": {
- "generation": {
- "negative_prompt": "",
- "guidance_scale": 0.0,
- "output_type": "pil",
- "num_inference_steps": 10
- },
- "precision": "ops.precision.bfloat.B16"
- }
- },
- "file_256": [
- "fe92687deefcfb33bb3ec181254b55fe4e434c5084ce9d38815eaa32487ad376",
- "2c8d58b267678aecfa6705a0a0375c88613065a8a8d32ad3a4c3867f5461cb3a",
- "6c218dc948575e3b14b03dffe2014d7870ac505005770ce3abdc28e920a03c05",
- "a6c3d534a9be308e95d2c3224af94a854bebd9b503f620f1ae3c8e6ba4a341bf",
- "7b431ea7d0f10e72b3eaece353bf6bf2f6bc717b6f4207411be186b40dec1f43"
- ],
- "layer_b3": [
- "9506d989de0226018de214f7ced4670eb5aad4a0c399a9229488ceccdf9a3ceb",
- "6c09dcb83e0cd7ad735eb763c5e3721c579d796853f0b9d31ba74fb13cad4f94",
- "e07025965cee925e31f1d617ea8baa575e7db910d40cc0482fd83df317c0812b",
- "d9a42e4226fb2778aaeaf0d6bda173a4ff95aa574c6d9e27e41542aa469e40a3",
- "8dcd87dc7a9b877e8e2a00abac44c4da9eadf2b8df4ae68f27415bb791381a96"
- ],
- "layer_256": [
- "630ec0f3adf97145316c034139836f9df952060d0237ac4e478c55d9a3a50bc8",
- "80904f707c192ddd06be2cebeb2ebbec3eb0e9c99076d50824d391ef3ac67bf2",
- "8ccedbe1e8cc4093f05b5f8d90e6103e688ae1ac71e0d6261fb17c42ff7c25e4",
- "3524e7fa9ca6f7ef695bc2d3410934eabd5272946a05c8cacd7f329e0bd9f1dd",
- "40499a8f45ae28558ed2fe4fc549a4cb469bd237434b331ccc0b1910310ed733"
- ],
- "identifiers": [
- "0.2.channelwise",
- "clip_mapper.bias",
- ".12.self_attn.k_proj.weight"
- ]
- }
- },
- "info.dit.auraflow": {
- "*": {
- "repo": "fal/AuraFlow",
- "pkg": {
- "0": {
- "diffusers": "AuraFlowPipeline"
- }
- },
- "identifiers": [
- [
- 8192,
- 3072
- ],
- "mlpX.c_fc2.weight",
- "joint_transformer_blocks.2.ff_context.linear_2.weight"
- ],
- "file_256": [
- "ce3e475246258b94ee9dcb8b83292cb34edfffc2bbde46c74604d9c6cd7c585c",
- "526be97cf581c89ad87c6b19c1f7c2378851137698f7ec436596d061a382d37b",
- "6a40b011f287452dbca80face78e667055904c5ad97eb2097ade3200259b2203",
- "05e5493018333d947bb5940083dbc2f071093027ff414bc5b1b1229e4836e5cb"
- ],
- "layer_b3": [
- "cc6d383576c35a9709798d2e2b9e3eb31ba8c608040cf3712bc37871cfd14e21",
- "ddd54c44fa28fbddecf7cfae91cfa04917fd2f2fa94fc78c528cef2356a4ec3a",
- "90c694e7d1e20e6da49b571e9954338d384775419790be315304103227b1051b",
- "9e85aec1bdb616f52f88c80ddc7ab1eae8c16c0b5fbfcdb61a71ac02c325003d"
- ],
- "layer_256": [
- "3c13e6a965d03a49227d8b1606ba6a343a23772d8768407cc78d4ddb9102bc80",
- "b356cc84a23bc93bda4cc0fce1d0ba1b8e3d5a521e659ffc72e9e4a2d2c7f204",
- "270df7317fe01abf06333acbbd4f15f8fc7a7c56053219f42efb598454a3af24",
- "7ab6aa4514dd09f3cf589587d51a81734193ce45dd51bda9db0bd62fe48ef7d5"
- ],
- "tasks": [
- "AuraFlowPipeline"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "auraflow"
- ],
- "text_encoder": [
- "info.stst.mt5",
- "*"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "transformer": [
- "AuraFlowTransformer2DModel"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.dit.stable-diffusion-3": {
- "*": {
- "repo": "stabilityai/stable-diffusion-3.5-medium",
- "pkg": {
- "0": {
- "precision": "ops.precision.float.F16"
- }
- },
- "identifiers": [
- "model.diffusion_model.joint_blocks.",
- "transformer_blocks.21.norm1_context.linear.weight",
- "transformer_blocks.31.norm1_context.linear.weight",
- "blocks.11.ff.net.2.weight"
- ],
- "file_256": [
- "ffef7a279d9134626e6ce0d494fba84fc1c7e720b3c7df2d19a09dc3796d8f93",
- "11fe06e22364b823dfeedc275912336b932b32a293a0b2f35ffac071990cc4de"
- ],
- "layer_b3": [
- "e411016545785046810b29cc3999f40bc6392be134a1318386c6f1c48f98726a",
- "a81e07ee67bc627e8b3c5e292ec1ca239009517a2106e8249d670ced0a88f746"
- ],
- "layer_256": [
- "13c982a6dc82d21c9f459e837d8c6f6d4696fd6e7e7b5783bdd2250b1f4fec61",
- "6ee79050373337bf63ac20916596df778bb22022bb38af986128a7459eda1463"
- ]
- },
- "stable-diffusion-3-turbo": {
- "repo": "tensorart/stable-diffusion-3.5-medium-turbo",
- "pkg": {
- "0": {
- "precision": "ops.precision.bfloat.B16",
- "generation": {
- "num_inference_steps": 8,
- "guidance_scale": 1.5,
- "height": 1024,
- "width": 768
- }
- }
- },
- "file_256": [
- "5b0530e8d71b49fa1358f1208047cd789a40bae5b44406c9524b0f0d88f8b246",
- "07119c77c3548a1d9eb30923df4dd55ec74914dc5ec81626804dcbe51ce17a5d",
- "3c379381344d2a2b3ee3d7a1bc97f7d1e58fa95c6b5187fb48b3ce446f99f17b",
- "6b3806cafdb4303ea2638e9e08eb186067b4a46a95ddf344ccdbe56537afaf6e"
- ],
- "layer_256": [
- "3c324055a1ec6eb4ee0242e344bb2b6356afcbd2e215fdd9d160cda691a72fae",
- "7284d2027523482af9ef47405667ca891cc518bfb6ebf1f1d4666cb0accc8cd5",
- "d938ee5738c73f701760ed18acad274b074d2796123aee3f2eee1328b6c36ea4",
- "c4c40056c2a77959083b5a69a1a4b205caa463ccabde057352c5c4e38b2c67b6"
- ],
- "layer_b3": [
- "873821614080a98e1ebfe56673bc96c2ac57379720d4ad2f97e4bca317571d48",
- "7284d2027523482af9ef47405667ca891cc518bfb6ebf1f1d4666cb0accc8cd5",
- "d938ee5738c73f701760ed18acad274b074d2796123aee3f2eee1328b6c36ea4",
- "c4c40056c2a77959083b5a69a1a4b205caa463ccabde057352c5c4e38b2c67b6"
- ]
- }
- },
- "info.unet.gligen-1-4-inpainting-text-box": {
- "*": {
- "repo": "masterful/gligen-1-4-inpainting-text-box",
- "pkg": {
- "0": {
- "diffusers": "StableDiffusionGLIGENPipeline"
- }
- }
- }
- },
- "info.unet.gligen-inpainting-text-image": {
- "*": {
- "repo": "anhnct/Gligen_Inpainting_Text_Image",
- "pkg": {
- "0": {
- "diffusers": "StableDiffusionGLIGENTextImagePipeline"
- }
- }
- }
- },
- "info.unet.stable-video-diffusion-img2vid-xt": {
- "*": {
- "repo": "stabilityai/stable-video-diffusion-img2vid-xt",
- "pkg": {
- "0": {
- "diffusers": "StableVideoDiffusionPipeline"
- }
- },
- "pipe_names": {
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "unet": [
- "UNetSpatioTemporalConditionModel"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "feature_extractor": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.unet.ldm3d-4c": {
- "*": {
- "repo": "Intel/ldm3d-4c",
- "pkg": {
- "0": {
- "diffusers": "StableDiffusionLDM3DPipeline"
- }
- },
- "tasks": [
- "StableDiffusion3ControlNetInpaintingPipeline",
- "StableDiffusion3ControlNetPipeline",
- "StableDiffusion3Img2ImgPipeline",
- "StableDiffusion3InpaintPipeline",
- "StableDiffusion3PAGImg2ImgPipeline",
- "StableDiffusion3PAGPipeline",
- "StableDiffusion3Pipeline",
- "StableDiffusionControlNetImg2ImgPipeline",
- "StableDiffusionControlNetInpaintPipeline",
- "StableDiffusionControlNetPAGInpaintPipeline",
- "StableDiffusionControlNetPAGPipeline",
- "StableDiffusionControlNetPipeline",
- "StableDiffusionImg2ImgPipeline",
- "StableDiffusionInpaintPipeline",
- "StableDiffusionPAGImg2ImgPipeline",
- "StableDiffusionPAGInpaintPipeline",
- "StableDiffusionPAGPipeline",
- "StableDiffusionPipeline",
- "StableDiffusionXLControlNetImg2ImgPipeline",
- "StableDiffusionXLControlNetInpaintPipeline",
- "StableDiffusionXLControlNetPAGImg2ImgPipeline",
- "StableDiffusionXLControlNetPAGPipeline",
- "StableDiffusionXLControlNetPipeline",
- "StableDiffusionXLControlNetUnionImg2ImgPipeline",
- "StableDiffusionXLControlNetUnionInpaintPipeline",
- "StableDiffusionXLControlNetUnionPipeline",
- "StableDiffusionXLImg2ImgPipeline",
- "StableDiffusionXLInpaintPipeline",
- "StableDiffusionXLPAGImg2ImgPipeline",
- "StableDiffusionXLPAGInpaintPipeline",
- "StableDiffusionXLPAGPipeline",
- "StableDiffusionXLPipeline"
- ],
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "ldm3d-4c"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "safety_checker": [
- "StableDiffusionSafetyChecker"
- ],
- "feature_extractor": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.unet.i2vgen-xl": {
- "*": {
- "repo": "ali-vilab/i2vgen-xl",
- "pkg": {
- "0": {
- "diffusers": "I2VGenXLPipeline"
- }
- },
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "i2vgen-xl"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "feature_extractor": [
- "info.dit.flux1-schnell",
- "*"
- ],
- "unet": [
- "I2VGenXLUNet"
- ],
- "scheduler": [
- "ops.scheduler.ddim",
- "scheduler"
- ]
- }
- }
- },
- "info.unet.wuerstchen": {
- "prior": {
- "repo": "warp-ai/wuerstchen-prior",
- "pkg": {
- "0": {
- "diffusers": "WuerstchenPriorPipeline"
- }
- },
- "tasks": [
- "WuerstchenCombinedPipeline",
- "WuerstchenDecoderPipeline"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "wuerstchen"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "prior": [
- "WuerstchenPrior"
- ],
- "scheduler": [
- "ops.scheduler.ddpmwuerstchen",
- "scheduler"
- ]
- }
- }
- },
- "info.dit.wan2-t2v": {
- "diffusers": {
- "repo": "Wan-AI/Wan2.1-T2V-14B-Diffusers",
- "pkg": {
- "0": {
- "precision": "ops.precision.bfloat.B16",
- "generation": {
- "height": 480,
- "width": 832,
- "num_frames": 81,
- "guidance_scale": 5.0
- }
- }
- },
- "file_256": [
- "299e6304544f2783896372fa919e755a8bb9ab8caf898ce08a678dae391e1179",
- "a9278e6e9c82d174e6c67b3c97d8b97fef30af51dcf59160f2fc241f6819f5dc",
- "be531024cd9018cb5b48c40cfbb6a6191645b1c792eb8bf4f8c1c6e10f924dc5",
- "6f999b0d6cb9a72b3d98ac386ed96f57f8cecae13994a69232514ea4974ad5fd",
- "2e39adde59c5e0e90edbb35873126b0d67928b5c11c501e384e976d6dc597cce",
- "2ee88ab18d7ed7691c5b7f8bdc3d0a9815e6efe75499287564830fd209d3cdfb",
- "46c27d3693bf2475990a912e08bf67fc6e6cd5396eab87b5e8dd1fcd3651364a",
- "193535c6450045f718df5f011de6d94d49bd9b13f37ca0412500f050dbbb01a8"
- ],
- "layer_b3": [
- "32266d1c79b518adb9d21837e6a427f6ae55b68cfdd673a7dadb38820fddeb48",
- "3b6989856f4f05368524c1852d8660b73c84cfbe44460af017d7139c2a4641b8",
- "f4d6cee3c112db93b3c9137ad102ec0e79ec7ab68b9bbc59004fbc268ccd5ddb",
- "e627144f41055619eb5407699c46e69ac0d87cf8873721e3e48c9e842656abf8",
- "6c00f3fadedacb841c4b9b4321b94a11ef85a08c9dd9253e5f9ba95856715579",
- "a0c339253c714b05877c8fbab649ed631cf021930978f3696a46f685a07c9092",
- "6435da89a870fd0e88680d31de75b9a40c408a4768eff384ce9b9e99481e8e66"
- ],
- "layer_256": [
- "52493c23c5fc1d087a283bc4eabb151421b7ae09affa12a5bb059d62656c5766",
- "058dedb3d2683a9a5b671c6302690e22722c93f6ed92281d5fa74ab190e632a1",
- "5fbed4b95e7196d3626003ea9e0fbbffd074b4297ca406e01b5b6c5d881a6080",
- "3a2335c8e7a4359c071b50333b5c00eef6f42a1d5206915e2ee99464a8c5eae7",
- "0542780670dd75d4cd9deda123d2e150730646c0a1a8d34582460991498a77a6",
- "e925b8222774905c8fbf10af77811fde7870e563eedcde2c94bd5c727e952d49",
- "3d915854976284347efa7aa0a117c0fc3b415c4208e1a6c94beb4ccb9720743d"
- ]
- }
- },
- "info.dit.wan-animate": {
- "diffusers": {
- "repo": "Wan-AI/Wan2.2-Animate-14B-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "WanAnimatePipeline"
- }
- },
- "tasks": [
- "WanImageToVideoPipeline",
- "WanPipeline",
- "WanVideoToVideoPipeline"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "wan-animate"
- ],
- "text_encoder": [
- "info.stst.mt5",
- "*"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.unipc",
- "multistep"
- ],
- "image_processor": [
- "info.dit.flux1-schnell",
- "*"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "transformer": [
- "WanAnimateTransformer3DModel"
- ]
- }
- }
- },
- "info.dit.wan2-i2v-480p": {
- "diffusers": {
- "repo": "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "WanImageToVideoPipeline"
- }
- },
- "file_256": [
- "b4602c35fa0519750a42c03e3f296c02d542291e344c4d702522cddbd1711f13",
- "6d7a34b63b70eb608324e546d979167a5e787ac6bca3528e63f54a11572d66aa",
- "b2051cd29d6b2f0c924fa7a3e78a4772f0134d7b059f21590dcce416f4f6cbe8",
- "7664fe075b3c82dcecf89012ad3429eee41ee9f10d476f60bc2d2ae3c4ca986c",
- "8ef7ea5bf9eea636b9b3ebd84c40671b4a18ae2704cb4c8595cb5b25c1d8e8b9",
- "b2de21b99b2e72cb0ff15253b07e926f26e7cf1b7e229efc32f94ad1f1ed9395",
- "0ca75338e7a47ca7cacddb7e626647e65829c497387f718ecb6ea0bae456944a",
- "c058a4ac5363c35d1ab4dd3bdec788c23b267fa42a0d7c68aba599f2f74600c9",
- "27988f6b510eb8d5fdd7485671b54897f8683f2bba7a772c5671be21d3491253"
- ],
- "layer_b3": [
- "4b6c3354c9ee5694e00a78f5658fdf14129f159c3b78a57f82fb18e0f265a83d",
- "c36c783559a40d22504f6c4bfb4f5aae760f3f46bbb3a595be79880935122175",
- "ac62f7d5583fd2e85b738fafaf233e2cde6e2857e04351135bb9ded45f9082ce",
- "215e89e855b5e9456af9aa68bc67567dc2269002aaa6b01d849ffec425fc628d",
- "324b8b6c2d512547a2c31bafa12e20acf313fd3aad587b293334f9f629edeec6"
- ],
- "layer_256": [
- "137881dad8c00063bc8bf05f93067736e419173cd171acc22f77b730db688a19",
- "8c5952fd3d333d3a4b719bf7d8ce6b12d1d2e78caaa7e42d713788cfdcadd244",
- "86c58bc4864c97f394ea6bccb2ecedc4aab7166f5b9bfeb313edfdcb2918164a",
- "cac45f7d8f1a0628cb0738bd308689e439b1cc6206e5f887d60d5b37d30138f2",
- "60e4f71a0961b1346b6f6b5ebe4c8cc93219239c5e13b4c0f1e19e9b8e1324d5"
- ],
- "tasks": [
- "WanImageToVideoPipeline",
- "WanPipeline",
- "WanVideoToVideoPipeline"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "wan2-i2v-480p"
- ],
- "text_encoder": [
- "info.stst.mt5",
- "*"
- ],
- "vae": [
- "info.vae.wan",
- "wan2-i2v-480p"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "image_processor": [
- "info.dit.flux1-schnell",
- "*"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "transformer": [
- "WanTransformer3DModel"
- ],
- "transformer_2": [
- "WanTransformer3DModel"
- ]
- }
- }
- },
- "info.dit.wan21-vace": {
- "diffusers": {
- "repo": "Wan-AI/Wan2.1-VACE-1.3B-diffusers",
- "pkg": {
- "0": {
- "diffusers": "WanVACEPipeline"
- }
- },
- "file_256": [
- "bd8bbb8834a274525ab65cbb063f21aa58973a054bfd1638bfe395504c9d9b99",
- "192804a4e10b5bb0a13f5c224bc4ec9707b3b8cc0def8eea005dbce7c9d6752a",
- "f202a5c59b8a91ada1862c46a038214f1f7f216c61ec8350d25f69b919da4307",
- "654693bf2a93a27cd67c3bcee238bc1d0cbb0dd9a74928ed7155fb21a2a1900a",
- "640ccc0577e6a5d4bb15cd91b11b699ef914fc55f126c5a1c544e152130784f2"
- ],
- "layer_b3": [
- "5357d78799a61cd2d72a8a2824c919d63f718eb3fba624af63689e9c657db032",
- "7ae67b7ccf79d1c3f4531ae138e1eb63d52dd97a66b3fcbe1d68fded8df4d5b1",
- "ee63ecdfb3da6901853a59ec950f3e7c3f6595ac46347a03881a4a9c71425377",
- "82762df3539021d3c0342e0da04137ddbe95ef37ea933cd0a68c09c2c650f2ac"
- ],
- "layer_256": [
- "2684413479030170fb3f08c1069c02957ffc386a59168d23b55d579d5c675269",
- "d527680fa735e5f30ef8852aabf8a49f02a094bc4718f0787c5b85710a13c026",
- "9677492a107b3ed827c7285db3393f5321d451cc6d922a4d0488d2a67e939446",
- "aaef66a4f65ecf852888d160b2122753fe4c6d642b5d41db29e4ce9e6855b5a0"
- ],
- "tasks": [
- "WanImageToVideoPipeline",
- "WanPipeline",
- "WanVideoToVideoPipeline"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "wan21-vace"
- ],
- "text_encoder": [
- "info.stst.mt5",
- "*"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "transformer": [
- "WanVACETransformer3DModel"
- ],
- "transformer_2": [
- "WanVACETransformer3DModel"
- ]
- }
- }
- },
- "info.dit.wan21-t2v": {
- "diffusers": {
- "repo": "Wan-AI/Wan2.1-T2V-1.3B-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "WanPipeline",
- "precision": "ops.precision.bfloat.B16",
- "generation": {
- "height": 480,
- "width": 832,
- "num_frames": 81,
- "guidance_scale": 5.0
- }
- }
- },
- "tasks": [
- "WanImageToVideoPipeline",
- "WanPipeline",
- "WanVideoToVideoPipeline"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "wan21-t2v"
- ],
- "text_encoder": [
- "info.stst.mt5",
- "*"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.dit.kandinsky-5-t2v-lite-sft-5s": {
- "diffusers": {
- "repo": "kandinskylab/Kandinsky-5.0-T2V-Lite-sft-5s-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "Kandinsky5T2VPipeline"
- }
- },
- "pipe_names": {
- "transformer": [
- "Kandinsky5Transformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "hunyuanvideo-i2v"
- ],
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "kandinsky-5-t2v-lite-sft-5s"
- ],
- "text_encoder_2": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "kandinsky-5-t2v-lite-sft-5s"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.dit.kandinsky-5-i2i-lite-sft": {
- "diffusers": {
- "repo": "kandinskylab/Kandinsky-5.0-I2I-Lite-sft-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "Kandinsky5I2IPipeline"
- }
- },
- "pipe_names": {
- "transformer": [
- "Kandinsky5Transformer3DModel"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "kandinsky-5-i2i-lite-sft"
- ],
- "text_encoder_2": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "kandinsky-5-i2i-lite-sft"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.dit.kandinsky-5-i2v-sft-5s": {
- "diffusers": {
- "repo": "kandinskylab/Kandinsky-5.0-I2V-Pro-sft-5s-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "Kandinsky5I2VPipeline"
- }
- },
- "pipe_names": {
- "transformer": [
- "Kandinsky5Transformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "hunyuanvideo-i2v"
- ],
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "kandinsky-5-i2v-sft-5s"
- ],
- "text_encoder_2": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "kandinsky-5-i2v-sft-5s"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.dit.kandinsky-5-t2i-lite-sft": {
- "diffusers": {
- "repo": "kandinskylab/Kandinsky-5.0-T2I-Lite-sft-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "Kandinsky5T2IPipeline"
- }
- },
- "pipe_names": {
- "transformer": [
- "Kandinsky5Transformer3DModel"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "kandinsky-5-t2i-lite-sft"
- ],
- "text_encoder_2": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "kandinsky-5-t2i-lite-sft"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.dit.z-image-turbo": {
- "*": {
- "repo": "Z-a-o/Z-Image-Turbo",
- "pkg": {
- "0": {
- "diffusers": "ZImageOmniPipeline"
- }
- },
- "tasks": [
- "ZImageControlNetInpaintPipeline",
- "ZImageControlNetPipeline",
- "ZImageImg2ImgPipeline",
- "ZImageOmniPipeline",
- "ZImagePipeline"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "PreTrainedModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "z-image-turbo"
- ],
- "transformer": [
- "info.dit.flux1-schnell",
- "*"
- ],
- "siglip": [
- "info.vit.siglip2-patch16-224",
- "*"
- ],
- "siglip_processor": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.controlnet.z-image-turbo": {
- "*": {
- "repo": "Tongyi-MAI/Z-Image-Turbo",
- "pkg": {
- "0": {
- "diffusers": "ZImageControlNetInpaintPipeline"
- }
- }
- }
- },
- "info.dit.skyreels-v2-t2v-720p": {
- "diffusers": {
- "repo": "Skywork/SkyReels-V2-T2V-14B-720P-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "SkyReelsV2Pipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "skyreels-v2-t2v-720p"
- ],
- "text_encoder": [
- "info.stst.mt5",
- "*"
- ],
- "transformer": [
- "SkyReelsV2Transformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.unipc",
- "multistep"
- ]
- }
- }
- },
- "info.dit.skyreels-v2-df-720p": {
- "diffusers": {
- "repo": "Skywork/SkyReels-V2-DF-14B-720P-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "SkyReelsV2DiffusionForcingVideoToVideoPipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "skyreels-v2-df-720p"
- ],
- "text_encoder": [
- "info.stst.mt5",
- "*"
- ],
- "transformer": [
- "SkyReelsV2Transformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.unipc",
- "multistep"
- ]
- }
- }
- },
- "info.dit.skyreels-v2-i2v-720p": {
- "diffusers": {
- "repo": "Skywork/SkyReels-V2-I2V-14B-720P-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "SkyReelsV2ImageToVideoPipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "skyreels-v2-i2v-720p"
- ],
- "text_encoder": [
- "info.stst.mt5",
- "*"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "image_processor": [
- "CLIPProcessor"
- ],
- "transformer": [
- "SkyReelsV2Transformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.unipc",
- "multistep"
- ]
- }
- }
- },
- "info.dit.qwen-image": {
- "*": {
- "repo": "Qwen/Qwen-Image",
- "pkg": {
- "0": {
- "diffusers": "QwenImageInpaintPipeline"
- }
- },
- "file_256": [
- "9f33a59093af3abcc2836d4cf4b7bd122c238ca70a26c70f34fdde64646b3bcd"
- ],
- "layer_b3": [
- "c87eedda853c12844a8deb3592a90bbcbd4dff2f7a850c28755e4aa171432150"
- ],
- "layer_256": [
- "fda2472d8ef6587a4c979021a2390eeb7c8fc2bcf565330ab8dc6b22f5348ec9"
- ],
- "tasks": [
- "QwenImageControlNetPipeline",
- "QwenImageEditInpaintPipeline",
- "QwenImageEditPipeline",
- "QwenImageEditPlusPipeline",
- "QwenImageImg2ImgPipeline",
- "QwenImageInpaintPipeline",
- "QwenImagePipeline"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "info.vae.kl",
- "qwen-image"
- ],
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "qwen-image"
- ],
- "transformer": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.controlnet.qwen-image-controlnet-union": {
- "*": {
- "repo": "InstantX/Qwen-Image-ControlNet-Union",
- "pkg": {
- "0": {
- "diffusers": "QwenImageControlNetModel"
- }
- }
- }
- },
- "info.controlnet.qwen-image-controlnet-inpainting": {
- "*": {
- "repo": "InstantX/Qwen-Image-ControlNet-Inpainting",
- "pkg": {
- "0": {
- "diffusers": "QwenImageControlNetModel"
- }
- }
- }
- },
- "info.dit.qwen-image-edit": {
- "*": {
- "repo": "Qwen/Qwen-Image-Edit",
- "pkg": {
- "0": {
- "diffusers": "QwenImageEditInpaintPipeline"
- }
- },
- "tasks": [
- "QwenImageControlNetPipeline",
- "QwenImageEditInpaintPipeline",
- "QwenImageEditPipeline",
- "QwenImageEditPlusPipeline",
- "QwenImageImg2ImgPipeline",
- "QwenImageInpaintPipeline",
- "QwenImagePipeline"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "info.vae.kl",
- "qwen-image"
- ],
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "qwen-image-edit"
- ],
- "processor": [
- "Qwen2VLProcessor"
- ],
- "transformer": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.qwen-image-edit-2509": {
- "*": {
- "repo": "Qwen/Qwen-Image-Edit-2509",
- "pkg": {
- "0": {
- "diffusers": "QwenImageEditPlusPipeline"
- }
- },
- "tasks": [
- "QwenImageControlNetPipeline",
- "QwenImageEditInpaintPipeline",
- "QwenImageEditPipeline",
- "QwenImageEditPlusPipeline",
- "QwenImageImg2ImgPipeline",
- "QwenImageInpaintPipeline",
- "QwenImagePipeline"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "info.vae.kl",
- "qwen-image"
- ],
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "qwen-image-edit-2509"
- ],
- "processor": [
- "Qwen2VLProcessor"
- ],
- "transformer": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.qwen-image-layered": {
- "*": {
- "repo": "Qwen/Qwen-Image-Layered",
- "pkg": {
- "0": {
- "diffusers": "QwenImageLayeredPipeline"
- }
- },
- "tasks": [
- "QwenImageControlNetPipeline",
- "QwenImageEditInpaintPipeline",
- "QwenImageEditPipeline",
- "QwenImageEditPlusPipeline",
- "QwenImageImg2ImgPipeline",
- "QwenImageInpaintPipeline",
- "QwenImagePipeline"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "info.vae.kl",
- "qwen-image"
- ],
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "qwen-image-layered"
- ],
- "processor": [
- "Qwen2VLProcessor"
- ],
- "transformer": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.chronoedit": {
- "diffusers": {
- "repo": "nvidia/ChronoEdit-14B-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "ChronoEditPipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "chronoedit"
- ],
- "text_encoder": [
- "info.stst.mt5",
- "*"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "image_processor": [
- "info.dit.flux1-schnell",
- "*"
- ],
- "transformer": [
- "ChronoEditTransformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.unet.kolors": {
- "diffusers": {
- "repo": "Kwai-Kolors/Kolors-diffusers",
- "pkg": {
- "0": {
- "precision": "ops.precision.float.F16",
- "generation": {
- "negative_prompt": "",
- "guidance_scale": 5.0,
- "num_inference_steps": 50,
- "width": 1024,
- "height": 1024
- }
- },
- "1": {
- "diffusers": "DiffusionPipeline"
- }
- },
- "file_256": [
- "425ff1dcbe3a70ac13d3afdd69bd4e3176b0c3260722527c80b210f11d2d966c"
- ],
- "layer_b3": [
- "6eb15506fa38b4cbb26391ab1b6c9ead05f86c711e46583bfbe8fc4421571414"
- ],
- "layer_256": [
- "04e3c17170b8a200481f6941b370fdc5056a00fe5a16956de01790f8a93c0dcd"
- ],
- "identifiers": [
- ".DenseReluDense.wi.weight",
- "encoder_hid_proj.weight"
- ],
- "pipe_names": {}
- }
- },
- "info.moe.trinity": {
- "*": {
- "repo": "arcee-ai/Trinity-Mini",
- "pkg": {
- "0": {
- "transformers": "AfmoeModel"
- }
- },
- "tasks": [
- "AfmoeForCausalLM",
- "AfmoeModel",
- "AfmoePreTrainedModel"
- ]
- }
- },
- "info.encoder.tokenizer": {
- "aimv2-patch14-224-lit": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
- }
- }
- },
- "albert-xx-v2": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.albert.tokenization_albert.AlbertTokenizer"
- }
- }
- },
- "align": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "afm": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "aria": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "audio-flamingo-3-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
- }
- }
- },
- "aya-vision": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.cohere.tokenization_cohere.CohereTokenizer"
- }
- }
- },
- "bark": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "bart": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- }
- }
- },
- "bert-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "bert-for-seq-generation-l-24-bbc-encoder": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert_generation.tokenization_bert_generation.BertGenerationTokenizer"
- }
- }
- },
- "bigbird-roberta": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.big_bird.tokenization_big_bird.BigBirdTokenizer"
- }
- }
- },
- "bigbird-pegasus-arxiv": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.pegasus.tokenization_pegasus.PegasusTokenizer"
- }
- }
- },
- "biogpt": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.biogpt.tokenization_biogpt.BioGptTokenizer"
- }
- }
- },
- "bitnet-b18-4t": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "blenderbot": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.blenderbot_small.tokenization_blenderbot_small.BlenderbotSmallTokenizer"
- }
- }
- },
- "blip-vqa": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "blip2-opt": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "bloom": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "blt": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "bridgetower": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- }
- }
- },
- "bros-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "camembert": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.camembert.tokenization_camembert.CamembertTokenizer"
- }
- }
- },
- "canine-s": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.canine.tokenization_canine.CanineTokenizer"
- }
- }
- },
- "chameleon": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "chinese-clip-vit-patch16": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "clap-htsat-fused": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- }
- }
- },
- "clip-vit-patch32": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
- }
- }
- },
- "clipseg-rd64": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
- }
- }
- },
- "clvp-dev": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clvp.tokenization_clvp.ClvpTokenizer"
- }
- }
- },
- "llama-2-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "codegen-mono": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "c4ai-command-r-v01": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.cohere.tokenization_cohere.CohereTokenizer"
- }
- }
- },
- "conv-bert": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "cpm-ant": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.cpmant.tokenization_cpmant.CpmAntTokenizer"
- }
- }
- },
- "csm": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "ctrl": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.ctrl.tokenization_ctrl.CTRLTokenizer"
- }
- }
- },
- "data2vec-audio-960h": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer"
- }
- }
- },
- "data2vec-text": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- }
- }
- },
- "dbrx": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "deberta": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.deberta.tokenization_deberta.DebertaTokenizer"
- }
- }
- },
- "deberta-v2-x": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.deberta_v2.tokenization_deberta_v2.DebertaV2Tokenizer"
- }
- }
- },
- "deepseek-v2-lite": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "deepseek-v3": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "deepseek-vl-chat": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "dia": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.dia.tokenization_dia.DiaTokenizer"
- }
- }
- },
- "diffllama-handcut": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "distilbert-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "dpr-ctx-encoder-single-nq": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.dpr.tokenization_dpr_fast.DPRQuestionEncoderTokenizerFast"
- }
- }
- },
- "electra-discriminator": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "emu3-chat-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "ernie-3-zh": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "ernie-45-pt": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "ernie-4-a-pt": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "ernie-4-vl-a-pt": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "esm": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.esm.tokenization_esm.EsmTokenizer"
- }
- }
- },
- "exaone-4": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "falcon": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "falcon-mamba": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
- }
- }
- },
- "flaubert-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.flaubert.tokenization_flaubert.FlaubertTokenizer"
- }
- }
- },
- "flava": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "flexolmo-7x-1t": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "florence-2": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- }
- }
- },
- "fnet": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.fnet.tokenization_fnet.FNetTokenizer"
- }
- }
- },
- "wmt19-en-ru": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.fsmt.tokenization_fsmt.FSMTTokenizer"
- }
- }
- },
- "funnel": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.funnel.tokenization_funnel.FunnelTokenizer"
- }
- }
- },
- "fuyu": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "gemma": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
- }
- }
- },
- "gemma2": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
- }
- }
- },
- "gemma-3": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
- }
- }
- },
- "gemma3-text": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
- }
- }
- },
- "gemma-3n-e": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
- }
- }
- },
- "git": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "glm-4-chat": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "glm-4-0414": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "glm-4-a": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "glm-4v-thinking": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "glm-4v": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "glm-asr-nano-2512": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "got-ocr-2-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "gpt2": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "gpt-bigcode-santacoder": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "gpt-neo": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "gpt-neox": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
- }
- }
- },
- "gpt-neox-japanese": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer"
- }
- }
- },
- "gpt-oss": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "gpt-j": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "granite": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "powermoe": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "granite-4-h": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "moe-active-shared-experts": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "grounding-dino": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "groupvit-gcc-yfcc": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
- }
- }
- },
- "helium": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "hubert-ls960": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer"
- }
- }
- },
- "ibert-roberta": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- }
- }
- },
- "idefics": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "idefics2": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "idefics3-llama3": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "blip-flan-t5": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "internvl3-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
- }
- }
- },
- "jais-2-chat": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "jamba-v0": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "janus": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "jetmoe": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "kosmos-2-patch14-224": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer"
- }
- }
- },
- "kosmos-2": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "todo": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.parakeet.tokenization_parakeet_fast.ParakeetTokenizerFast"
- }
- }
- },
- "layoutlm-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "layoutlmv2-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer"
- }
- }
- },
- "layoutlmv3": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.layoutlmv3.tokenization_layoutlmv3.LayoutLMv3Tokenizer"
- }
- }
- },
- "led-16384": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- }
- }
- },
- "lfm2-vl": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "lilt-roberta-en": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- }
- }
- },
- "llama-4-scout-16e": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "llava": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "llava-v1-mistral-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "llava-next-video-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "llava-onevision-qwen2-ov-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "longformer-4096": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- }
- }
- },
- "long-t5-local": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
- }
- }
- },
- "luke": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.luke.tokenization_luke.LukeTokenizer"
- }
- }
- },
- "lxmert-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "m": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.m2m_100.tokenization_m2m_100.M2M100Tokenizer"
- }
- }
- },
- "mamba": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
- }
- }
- },
- "mamba2": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
- }
- }
- },
- "opus-mt-en-de": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.marian.tokenization_marian.MarianTokenizer"
- }
- }
- },
- "mbart-cc25": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.mbart.tokenization_mbart.MBartTokenizer"
- }
- }
- },
- "megatron-bert-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "metaclip-2-worldwide-huge-quickgelu": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer"
- }
- }
- },
- "mgp-str": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.mgp_str.tokenization_mgp_str.MgpstrTokenizer"
- }
- }
- },
- "max-text-01-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "stral-3-2512": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "mistral-v0": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "mistral-3-2503": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "mixtral-8x": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "llama-3-vision": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "mm-grounding-dino-o365v1-goldg-v3det": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "mobilebert-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "modernbert": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "moonshine": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "hf-moshiko": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "mpnet": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.mpnet.tokenization_mpnet.MPNetTokenizer"
- }
- }
- },
- "mpt": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
- }
- }
- },
- "mra-512-4": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- }
- }
- },
- "mt5": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
- }
- }
- },
- "musicgen": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
- }
- }
- },
- "musicgen-melody": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
- }
- }
- },
- "mvp": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- }
- }
- },
- "nemotron-3-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "nllb-moe": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.nllb.tokenization_nllb.NllbTokenizer"
- }
- }
- },
- "nystromformer-512": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.albert.tokenization_albert.AlbertTokenizer"
- }
- }
- },
- "olmo-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
- }
- }
- },
- "olmo2-1124-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
- }
- }
- },
- "olmo-3-0725": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "olmoe-0924": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
- }
- }
- },
- "omdet-turbo-swin-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
- }
- }
- },
- "oneformer-ade-swin": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
- }
- }
- },
- "openai-gpt": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.openai.tokenization_openai.OpenAIGPTTokenizer"
- }
- }
- },
- "opt": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "ovis2-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
- }
- }
- },
- "owlv2-patch16": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
- }
- }
- },
- "owlvit-patch32": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
- }
- }
- },
- "paligemma": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "pegasus": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.pegasus.tokenization_pegasus.PegasusTokenizer"
- }
- }
- },
- "pegasus-x": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.pegasus.tokenization_pegasus.PegasusTokenizer"
- }
- }
- },
- "language-perceiver": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.perceiver.tokenization_perceiver.PerceiverTokenizer"
- }
- }
- },
- "persimmon": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "phi-1": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "phi-3": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "phi-3-moe": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "pixtral": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "plbart": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.plbart.tokenization_plbart.PLBartTokenizer"
- }
- }
- },
- "phetnet-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.prophetnet.tokenization_prophetnet.ProphetNetTokenizer"
- }
- }
- },
- "qwen2": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
- }
- }
- },
- "qwen2-vl": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
- }
- }
- },
- "qwen15-moe-a": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
- }
- }
- },
- "qwen3": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
- }
- }
- },
- "qwen3-a": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
- }
- }
- },
- "qwen3-next-a": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
- }
- }
- },
- "qwen3-vl": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
- }
- }
- },
- "qwen3-vl-a": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
- }
- }
- },
- "recurrentgemma": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
- }
- }
- },
- "reformer-crime-and-punishment": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.reformer.tokenization_reformer.ReformerTokenizer"
- }
- }
- },
- "rembert": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.rembert.tokenization_rembert.RemBertTokenizer"
- }
- }
- },
- "roberta": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- }
- }
- },
- "efficient-mlm-m0-0": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- }
- }
- },
- "roc-bert-zh": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roc_bert.tokenization_roc_bert.RoCBertTokenizer"
- }
- }
- },
- "roformer-chinese": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roformer.tokenization_roformer.RoFormerTokenizer"
- }
- }
- },
- "rwkv-4-pile": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
- }
- }
- },
- "hf-seamless-m4t": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.seamless_m4t.tokenization_seamless_m4t.SeamlessM4TTokenizer"
- }
- }
- },
- "seamless-m4t-v2": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.seamless_m4t.tokenization_seamless_m4t.SeamlessM4TTokenizer"
- }
- }
- },
- "siglip-patch16-224": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.siglip.tokenization_siglip.SiglipTokenizer"
- }
- }
- },
- "siglip2-patch16-224": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
- }
- }
- },
- "smollm3": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "s2t-librispeech-asr": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.speech_to_text.tokenization_speech_to_text.Speech2TextTokenizer"
- }
- }
- },
- "speecht5-asr": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.speecht5.tokenization_speecht5.SpeechT5Tokenizer"
- }
- }
- },
- "splinter": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.splinter.tokenization_splinter.SplinterTokenizer"
- }
- }
- },
- "squeezebert-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "stablelm-4e1t": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
- }
- }
- },
- "starcoder2": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "switch-8": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
- }
- }
- },
- "t5": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
- }
- }
- },
- "t5gemma-prefixlm": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
- }
- }
- },
- "tapas-finetuned-sqa": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.tapas.tokenization_tapas.TapasTokenizer"
- }
- }
- },
- "tvp": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "udop": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.udop.tokenization_udop.UdopTokenizer"
- }
- }
- },
- "umt5": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
- }
- }
- },
- "video-llava-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "vilt-b32-mlm": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "vip-llava-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "visualbert-vqa-coco-pre": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "mms-tts-eng": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.vits.tokenization_vits.VitsTokenizer"
- }
- }
- },
- "voxtral-2507": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "wav2vec2-960h": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer"
- }
- }
- },
- "wav2vec2-bert-rel-pos": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer"
- }
- }
- },
- "wav2vec2-conformer-rel-pos": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer"
- }
- }
- },
- "whisper": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.whisper.tokenization_whisper.WhisperTokenizer"
- }
- }
- },
- "xclip-patch32": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
- }
- }
- },
- "xglm": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.xglm.tokenization_xglm.XGLMTokenizer"
- }
- }
- },
- "xlm-mlm-en-2048": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.xlm.tokenization_xlm.XLMTokenizer"
- }
- }
- },
- "xlm-roberta": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer"
- }
- }
- },
- "xlm-roberta-xl": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer"
- }
- }
- },
- "xlnet-cased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer"
- }
- }
- },
- "xlstm": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
- }
- }
- },
- "xmod": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer"
- }
- }
- },
- "yoso-4096": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.albert.tokenization_albert.AlbertTokenizer"
- }
- }
- },
- "zamba-v1": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "zamba2": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- }
- },
- "info.vit.aimv2-patch14-224-lit": {
- "*": {
- "repo": "apple/aimv2-large-patch14-224-lit",
- "pkg": {
- "0": {
- "transformers": "Aimv2Model"
- }
- },
- "tasks": [
- "Aimv2VisionModel",
- "Aimv2Model",
- "Aimv2PreTrainedModel",
- "Aimv2TextModel"
- ]
- }
- },
- "info.vit.aimv2-patch14-224": {
- "*": {
- "repo": "apple/aimv2-large-patch14-224",
- "pkg": {
- "0": {
- "transformers": "Aimv2VisionModel"
- }
- },
- "tasks": [
- "Aimv2VisionModel",
- "Aimv2Model",
- "Aimv2PreTrainedModel",
- "Aimv2TextModel"
- ]
- }
- },
- "info.art.albert-xx-v2": {
- "*": {
- "repo": "albert/albert-xxlarge-v2",
- "pkg": {
- "0": {
- "transformers": "AlbertModel"
- }
- },
- "tasks": [
- "AlbertPreTrainedModel",
- "AlbertModel",
- "AlbertForPreTraining",
- "AlbertForMaskedLM",
- "AlbertForSequenceClassification",
- "AlbertForTokenClassification",
- "AlbertForQuestionAnswering",
- "AlbertForMultipleChoice"
- ]
- }
- },
- "info.vit.align": {
- "*": {
- "repo": "kakaobrain/align-base",
- "pkg": {
- "0": {
- "transformers": "AlignModel"
- }
- },
- "tasks": [
- "AlignPreTrainedModel",
- "AlignTextModel",
- "AlignVisionModel",
- "AlignModel"
- ]
- }
- },
- "info.vit.altclip": {
- "*": {
- "repo": "BAAI/AltCLIP",
- "pkg": {
- "0": {
- "transformers": "AltCLIPModel"
- }
- },
- "tasks": [
- "AltCLIPPreTrainedModel",
- "AltCLIPVisionModel",
- "AltCLIPTextModel",
- "AltCLIPModel"
- ]
- }
- },
- "info.stst.apertus": {
- "*": {
- "repo": "swiss-ai/Apertus-8B",
- "pkg": {
- "0": {
- "transformers": "ApertusModel"
- }
- },
- "tasks": [
- "ApertusModel",
- "ApertusForCausalLM",
- "ApertusForTokenClassification",
- "ApertusPreTrainedModel"
- ]
- }
- },
- "info.stst.afm": {
- "*": {
- "repo": "arcee-ai/AFM-4.5B",
- "pkg": {
- "0": {
- "transformers": "ArceeModel"
- }
- },
- "tasks": [
- "ArceeForCausalLM",
- "ArceeForQuestionAnswering",
- "ArceeForSequenceClassification",
- "ArceeForTokenClassification",
- "ArceeModel",
- "ArceePreTrainedModel"
- ]
- }
- },
- "info.vit.aria": {
- "*": {
- "repo": "rhymes-ai/Aria",
- "pkg": {
- "0": {
- "transformers": "AriaModel"
- }
- },
- "tasks": [
- "AriaForConditionalGeneration",
- "AriaPreTrainedModel",
- "AriaTextPreTrainedModel",
- "AriaTextModel",
- "AriaModel",
- "AriaTextForCausalLM"
- ]
- }
- },
- "info.vit.ast-finetuned-audioset-10-10-0593": {
- "*": {
- "repo": "MIT/ast-finetuned-audioset-10-10-0.4593",
- "pkg": {
- "0": {
- "transformers": "ASTModel"
- }
- },
- "tasks": [
- "ASTForAudioClassification",
- "ASTModel",
- "ASTPreTrainedModel"
- ]
- }
- },
- "info.stst.audio-flamingo-3-hf": {
- "*": {
- "repo": "nvidia/audio-flamingo-3-hf",
- "pkg": {
- "0": {
- "transformers": "AudioFlamingo3ForConditionalGeneration"
- }
- },
- "tasks": [
- "AudioFlamingo3ForConditionalGeneration",
- "AudioFlamingo3PreTrainedModel",
- "AudioFlamingo3Encoder"
- ]
- }
- },
- "info.aet.audio-flamingo-3-hf": {
- "*": {
- "repo": "nvidia/audio-flamingo-3-hf",
- "pkg": {
- "0": {
- "transformers": "AudioFlamingo3Encoder"
- }
- },
- "tasks": [
- "AudioFlamingo3ForConditionalGeneration",
- "AudioFlamingo3PreTrainedModel",
- "AudioFlamingo3Encoder"
- ]
- }
- },
- "info.stst.autoformer-tourism-monthly": {
- "*": {
- "repo": "huggingface/autoformer-tourism-monthly",
- "pkg": {
- "0": {
- "transformers": "AutoformerModel"
- }
- },
- "tasks": [
- "AutoformerForPrediction",
- "AutoformerModel",
- "AutoformerPreTrainedModel"
- ]
- }
- },
- "info.vit.aya-vision": {
- "*": {
- "repo": "CohereForAI/aya-vision-8b",
- "pkg": {
- "0": {
- "transformers": "AyaVisionModel"
- }
- },
- "tasks": [
- "AyaVisionForConditionalGeneration",
- "AyaVisionPreTrainedModel",
- "AyaVisionModel"
- ]
- }
- },
- "info.ssm.bamba-t-hf": {
- "*": {
- "repo": "ibm-fms/Bamba-9.8b-2.2T-hf",
- "pkg": {
- "0": {
- "transformers": "BambaModel"
- }
- },
- "tasks": [
- "BambaModel",
- "BambaForCausalLM",
- "BambaPreTrainedModel"
- ]
- }
- },
- "info.art.bark": {
- "*": {
- "repo": "suno/bark",
- "pkg": {
- "0": {
- "transformers": "BarkModel"
- }
- },
- "tasks": [
- "BarkFineModel",
- "BarkSemanticModel",
- "BarkCoarseModel",
- "BarkModel",
- "BarkPreTrainedModel",
- "BarkCausalModel"
- ]
- }
- },
- "info.stst.bart": {
- "*": {
- "repo": "facebook/bart-large",
- "pkg": {
- "0": {
- "transformers": "BartModel"
- }
- },
- "tasks": [
- "BartForCausalLM",
- "BartForConditionalGeneration",
- "BartForQuestionAnswering",
- "BartForSequenceClassification",
- "BartModel",
- "BartPreTrainedModel",
- "BartPretrainedModel",
- "PretrainedBartModel"
- ]
- }
- },
- "info.vit.beit-patch16-224-pt": {
- "*": {
- "repo": "microsoft/beit-base-patch16-224-pt22k",
- "pkg": {
- "0": {
- "transformers": "BeitModel"
- }
- },
- "tasks": [
- "BeitForImageClassification",
- "BeitForMaskedImageModeling",
- "BeitForSemanticSegmentation",
- "BeitModel",
- "BeitPreTrainedModel",
- "BeitBackbone"
- ]
- }
- },
- "info.art.bert-uncased": {
- "*": {
- "repo": "google-bert/bert-base-uncased",
- "pkg": {
- "0": {
- "transformers": "BertModel"
- }
- },
- "file_256": [
- "c6c6348af2cb4d5852fe51102ce39605903dbe7925c005cf8995506cc21ea914"
- ],
- "layer_b3": [
- "30d7d2cc3ec9e4ba45844e005d0bbcb5887b6a0976042f73da916237dc5c4c12"
- ],
- "layer_256": [
- "94fd2508680ff684eff57e4a5a8ca46bf338fc356a9cf6fe8db2b84543dd7971"
- ],
- "tasks": [
- "BertForMaskedLM",
- "BertForMultipleChoice",
- "BertForNextSentencePrediction",
- "BertForPreTraining",
- "BertForQuestionAnswering",
- "BertForSequenceClassification",
- "BertForTokenClassification",
- "BertLayer",
- "BertLMHeadModel",
- "BertModel",
- "BertPreTrainedModel"
- ]
- }
- },
- "info.art.bert-for-seq-generation-l-24-bbc-encoder": {
- "*": {
- "repo": "google/bert_for_seq_generation_L-24_bbc_encoder",
- "pkg": {
- "0": {
- "transformers": "BertGenerationEncoder"
- }
- },
- "tasks": [
- "BertGenerationDecoder",
- "BertGenerationEncoder",
- "BertGenerationPreTrainedModel"
- ]
- }
- },
- "info.art.bigbird-roberta": {
- "*": {
- "repo": "google/bigbird-roberta-base",
- "pkg": {
- "0": {
- "transformers": "BigBirdModel"
- }
- },
- "tasks": [
- "BigBirdForCausalLM",
- "BigBirdForMaskedLM",
- "BigBirdForMultipleChoice",
- "BigBirdForPreTraining",
- "BigBirdForQuestionAnswering",
- "BigBirdForSequenceClassification",
- "BigBirdForTokenClassification",
- "BigBirdLayer",
- "BigBirdModel",
- "BigBirdPreTrainedModel"
- ]
- }
- },
- "info.stst.bigbird-pegasus-arxiv": {
- "*": {
- "repo": "google/bigbird-pegasus-large-arxiv",
- "pkg": {
- "0": {
- "transformers": "BigBirdPegasusModel"
- }
- },
- "tasks": [
- "BigBirdPegasusForCausalLM",
- "BigBirdPegasusForConditionalGeneration",
- "BigBirdPegasusForQuestionAnswering",
- "BigBirdPegasusForSequenceClassification",
- "BigBirdPegasusModel",
- "BigBirdPegasusPreTrainedModel"
- ]
- }
- },
- "info.art.biogpt": {
- "*": {
- "repo": "microsoft/biogpt",
- "pkg": {
- "0": {
- "transformers": "BioGptModel"
- }
- },
- "tasks": [
- "BioGptForCausalLM",
- "BioGptForTokenClassification",
- "BioGptForSequenceClassification",
- "BioGptModel",
- "BioGptPreTrainedModel"
- ]
- }
- },
- "info.vit.bit-50": {
- "*": {
- "repo": "google/bit-50",
- "pkg": {
- "0": {
- "transformers": "BitModel"
- }
- },
- "tasks": [
- "BitForImageClassification",
- "BitModel",
- "BitPreTrainedModel",
- "BitBackbone"
- ]
- }
- },
- "info.stst.bitnet-b18-4t": {
- "*": {
- "repo": "microsoft/bitnet-b1.58-2B-4T",
- "pkg": {
- "0": {
- "transformers": "BitNetModel"
- }
- },
- "tasks": [
- "BitNetForCausalLM",
- "BitNetModel",
- "BitNetPreTrainedModel"
- ]
- }
- },
- "info.stst.blenderbot": {
- "*": {
- "repo": "facebook/blenderbot-3B",
- "pkg": {
- "0": {
- "transformers": "BlenderbotModel"
- }
- },
- "tasks": [
- "BlenderbotForCausalLM",
- "BlenderbotForConditionalGeneration",
- "BlenderbotModel",
- "BlenderbotPreTrainedModel"
- ]
- }
- },
- "info.vit.blip-vqa": {
- "*": {
- "repo": "Salesforce/blip-vqa-base",
- "pkg": {
- "0": {
- "transformers": "BlipModel"
- }
- },
- "tasks": [
- "BlipModel",
- "BlipPreTrainedModel",
- "BlipForConditionalGeneration",
- "BlipForQuestionAnswering",
- "BlipVisionModel",
- "BlipTextModel",
- "BlipForImageTextRetrieval"
- ]
- }
- },
- "info.vit.blip2-opt": {
- "*": {
- "repo": "Salesforce/blip2-opt-2.7b",
- "pkg": {
- "0": {
- "transformers": "Blip2Model"
- }
- },
- "tasks": [
- "Blip2Model",
- "Blip2VisionModelWithProjection",
- "Blip2QFormerModel",
- "Blip2PreTrainedModel",
- "Blip2ForConditionalGeneration",
- "Blip2ForImageTextRetrieval",
- "Blip2VisionModel",
- "Blip2TextModelWithProjection"
- ]
- }
- },
- "info.stst.blip2-opt": {
- "*": {
- "repo": "Salesforce/blip2-opt-2.7b",
- "pkg": {
- "0": {
- "transformers": "Blip2QFormerModel"
- }
- },
- "tasks": [
- "Blip2Model",
- "Blip2VisionModelWithProjection",
- "Blip2QFormerModel",
- "Blip2PreTrainedModel",
- "Blip2ForConditionalGeneration",
- "Blip2ForImageTextRetrieval",
- "Blip2VisionModel",
- "Blip2TextModelWithProjection"
- ]
- }
- },
- "info.art.bloom": {
- "*": {
- "repo": "bigscience/bloom",
- "pkg": {
- "0": {
- "transformers": "BloomModel"
- }
- },
- "tasks": [
- "BloomForCausalLM",
- "BloomModel",
- "BloomPreTrainedModel",
- "BloomForSequenceClassification",
- "BloomForTokenClassification",
- "BloomForQuestionAnswering"
- ]
- }
- },
- "info.vit.blt": {
- "*": {
- "repo": "facebook/blt",
- "pkg": {
- "0": {
- "transformers": "BltModel"
- }
- },
- "tasks": [
- "BltPreTrainedModel",
- "BltModel",
- "BltPatcher",
- "BltForCausalLM"
- ]
- }
- },
- "info.vit.bridgetower": {
- "*": {
- "repo": "BridgeTower/bridgetower-base",
- "pkg": {
- "0": {
- "transformers": "BridgeTowerModel"
- }
- },
- "tasks": [
- "BridgeTowerForContrastiveLearning",
- "BridgeTowerForImageAndTextRetrieval",
- "BridgeTowerForMaskedLM",
- "BridgeTowerModel",
- "BridgeTowerPreTrainedModel"
- ]
- }
- },
- "info.art.bros-uncased": {
- "*": {
- "repo": "jinho8345/bros-base-uncased",
- "pkg": {
- "0": {
- "transformers": "BrosModel"
- }
- },
- "tasks": [
- "BrosPreTrainedModel",
- "BrosModel",
- "BrosForTokenClassification",
- "BrosSpadeEEForTokenClassification",
- "BrosSpadeELForTokenClassification"
- ]
- }
- },
- "info.art.camembert": {
- "*": {
- "repo": "almanach/camembert-base",
- "pkg": {
- "0": {
- "transformers": "CamembertModel"
- }
- },
- "tasks": [
- "CamembertForCausalLM",
- "CamembertForMaskedLM",
- "CamembertForMultipleChoice",
- "CamembertForQuestionAnswering",
- "CamembertForSequenceClassification",
- "CamembertForTokenClassification",
- "CamembertModel",
- "CamembertPreTrainedModel"
- ]
- }
- },
- "info.art.canine-s": {
- "*": {
- "repo": "google/canine-s",
- "pkg": {
- "0": {
- "transformers": "CanineModel"
- }
- },
- "tasks": [
- "CanineForMultipleChoice",
- "CanineForQuestionAnswering",
- "CanineForSequenceClassification",
- "CanineForTokenClassification",
- "CanineLayer",
- "CanineModel",
- "CaninePreTrainedModel"
- ]
- }
- },
- "info.stst.chameleon": {
- "*": {
- "repo": "meta/chameleon-7B",
- "pkg": {
- "0": {
- "transformers": "ChameleonModel"
- }
- },
- "tasks": [
- "ChameleonForConditionalGeneration",
- "ChameleonModel",
- "ChameleonPreTrainedModel",
- "ChameleonVQVAE"
- ]
- }
- },
- "info.vit.chinese-clip-vit-patch16": {
- "*": {
- "repo": "OFA-Sys/chinese-clip-vit-base-patch16",
- "pkg": {
- "0": {
- "transformers": "ChineseCLIPModel"
- }
- },
- "tasks": [
- "ChineseCLIPModel",
- "ChineseCLIPPreTrainedModel",
- "ChineseCLIPTextModel",
- "ChineseCLIPVisionModel"
- ]
- }
- },
- "info.vit.clap-htsat-fused": {
- "*": {
- "repo": "laion/clap-htsat-fused",
- "pkg": {
- "0": {
- "transformers": "ClapModel"
- }
- },
- "file_256": [
- "c92b5a2bee69ff5dd05820d9e0a5cddbc9c9b9dd19a6cb3214f0cf4f29a4d1b0",
- "ae69f555e7f1a2333b8e684c9fa8233f44a47bbadf76d484f941b74f74d2753d"
- ],
- "layer_b3": [
- "a4d26450ac399d51b9abbe37859615bb02a5cbf63521da4c7cdc549d04a2872c",
- "ddf310d8eb2d4e3f61e605978675a9d3a748cad9406b9aee8335eae013e77573"
- ],
- "layer_256": [
- "843ba86000971d6067bfc4f3ed6dd01bd6f6726188aaa15d86b05554f4fe8481",
- "27529e30442d030a28badf9d62710f4b74e38e9c4424ed169c7e0ac072f5a771"
- ],
- "tasks": [
- "ClapModel",
- "ClapPreTrainedModel",
- "ClapTextModel",
- "ClapTextModelWithProjection",
- "ClapAudioModel",
- "ClapAudioModelWithProjection"
- ]
- }
- },
- "info.vit.clip-vit-patch32": {
- "*": {
- "repo": "openai/clip-vit-base-patch32",
- "pkg": {
- "0": {
- "transformers": "CLIPModel"
- }
- },
- "tasks": [
- "CLIPModel",
- "CLIPPreTrainedModel",
- "CLIPTextModel",
- "CLIPTextModelWithProjection",
- "CLIPVisionModel",
- "CLIPVisionModelWithProjection",
- "CLIPForImageClassification"
- ]
- }
- },
- "info.vit.clipseg-rd64": {
- "*": {
- "repo": "CIDAS/clipseg-rd64",
- "pkg": {
- "0": {
- "transformers": "CLIPSegModel"
- }
- },
- "tasks": [
- "CLIPSegModel",
- "CLIPSegPreTrainedModel",
- "CLIPSegTextModel",
- "CLIPSegVisionModel",
- "CLIPSegForImageSegmentation"
- ]
- }
- },
- "info.vit.clvp-dev": {
- "*": {
- "repo": "susnato/clvp_dev",
- "pkg": {
- "0": {
- "transformers": "ClvpModelForConditionalGeneration"
- }
- },
- "tasks": [
- "ClvpModelForConditionalGeneration",
- "ClvpForCausalLM",
- "ClvpModel",
- "ClvpPreTrainedModel",
- "ClvpEncoder",
- "ClvpDecoder"
- ]
- }
- },
- "info.stst.llama-2-hf": {
- "*": {
- "repo": "meta-llama/Llama-2-7b-hf",
- "pkg": {
- "0": {
- "transformers": "LlamaModel"
- }
- },
- "tasks": [
- "LlamaForCausalLM",
- "LlamaModel",
- "LlamaPreTrainedModel",
- "LlamaForSequenceClassification",
- "LlamaForQuestionAnswering",
- "LlamaForTokenClassification"
- ]
- }
- },
- "info.art.codegen-mono": {
- "*": {
- "repo": "Salesforce/codegen-2B-mono",
- "pkg": {
- "0": {
- "transformers": "CodeGenModel"
- }
- },
- "tasks": [
- "CodeGenForCausalLM",
- "CodeGenModel",
- "CodeGenPreTrainedModel"
- ]
- }
- },
- "info.stst.c4ai-command-r-v01": {
- "*": {
- "repo": "CohereForAI/c4ai-command-r-v01",
- "pkg": {
- "0": {
- "transformers": "CohereModel"
- }
- },
- "tasks": [
- "CohereForCausalLM",
- "CohereModel",
- "CoherePreTrainedModel"
- ]
- }
- },
- "info.vit.command-a-vision-07-2025": {
- "*": {
- "repo": "CohereLabs/command-a-vision-07-2025",
- "pkg": {
- "0": {
- "transformers": "Cohere2VisionModel"
- }
- },
- "tasks": [
- "Cohere2VisionForConditionalGeneration",
- "Cohere2VisionPreTrainedModel",
- "Cohere2VisionModel"
- ]
- }
- },
- "info.detr.conditional-detr-resnet-50": {
- "*": {
- "repo": "microsoft/conditional-detr-resnet-50",
- "pkg": {
- "0": {
- "transformers": "ConditionalDetrModel"
- }
- },
- "tasks": [
- "ConditionalDetrForObjectDetection",
- "ConditionalDetrForSegmentation",
- "ConditionalDetrModel",
- "ConditionalDetrPreTrainedModel"
- ]
- }
- },
- "info.art.conv-bert": {
- "*": {
- "repo": "YituTech/conv-bert-base",
- "pkg": {
- "0": {
- "transformers": "ConvBertModel"
- }
- },
- "tasks": [
- "ConvBertForMaskedLM",
- "ConvBertForMultipleChoice",
- "ConvBertForQuestionAnswering",
- "ConvBertForSequenceClassification",
- "ConvBertForTokenClassification",
- "ConvBertLayer",
- "ConvBertModel",
- "ConvBertPreTrainedModel"
- ]
- }
- },
- "info.vit.convnext-224": {
- "*": {
- "repo": "facebook/convnext-tiny-224",
- "pkg": {
- "0": {
- "transformers": "ConvNextModel"
- }
- },
- "tasks": [
- "ConvNextForImageClassification",
- "ConvNextModel",
- "ConvNextPreTrainedModel",
- "ConvNextBackbone"
- ]
- }
- },
- "info.vit.convnextv2-224": {
- "*": {
- "repo": "facebook/convnextv2-tiny-1k-224",
- "pkg": {
- "0": {
- "transformers": "ConvNextV2Model"
- }
- },
- "tasks": [
- "ConvNextV2ForImageClassification",
- "ConvNextV2Model",
- "ConvNextV2PreTrainedModel",
- "ConvNextV2Backbone"
- ]
- }
- },
- "info.stst.cpm-ant": {
- "*": {
- "repo": "openbmb/cpm-ant-10b",
- "pkg": {
- "0": {
- "transformers": "CpmAntModel"
- }
- },
- "tasks": [
- "CpmAntForCausalLM",
- "CpmAntModel",
- "CpmAntPreTrainedModel"
- ]
- }
- },
- "info.stst.csm": {
- "*": {
- "repo": "sesame/csm-1b",
- "pkg": {
- "0": {
- "transformers": "CsmForConditionalGeneration"
- }
- },
- "tasks": [
- "CsmPreTrainedModel",
- "CsmBackboneModel",
- "CsmDepthDecoderModel",
- "CsmDepthDecoderForCausalLM",
- "CsmForConditionalGeneration"
- ]
- }
- },
- "info.art.ctrl": {
- "*": {
- "repo": "Salesforce/ctrl",
- "pkg": {
- "0": {
- "transformers": "CTRLModel"
- }
- },
- "tasks": [
- "CTRLForSequenceClassification",
- "CTRLLMHeadModel",
- "CTRLModel",
- "CTRLPreTrainedModel"
- ]
- }
- },
- "info.vit.cvt-13": {
- "*": {
- "repo": "microsoft/cvt-13",
- "pkg": {
- "0": {
- "transformers": "CvtModel"
- }
- },
- "tasks": [
- "CvtForImageClassification",
- "CvtModel",
- "CvtPreTrainedModel"
- ]
- }
- },
- "info.art.cwm": {
- "*": {
- "repo": "facebook/cwm",
- "pkg": {
- "0": {
- "transformers": "CwmModel"
- }
- },
- "tasks": [
- "CwmPreTrainedModel",
- "CwmModel",
- "CwmForCausalLM"
- ]
- }
- },
- "info.detr.dfine-x-coco": {
- "*": {
- "repo": "ustc-community/dfine-xlarge-coco",
- "pkg": {
- "0": {
- "transformers": "DFineModel"
- }
- },
- "tasks": [
- "DFineModel",
- "DFinePreTrainedModel",
- "DFineForObjectDetection"
- ]
- }
- },
- "info.detr.dab-detr": {
- "*": {
- "repo": "IDEA-Research/dab-detr-resnet-50",
- "pkg": {
- "0": {
- "transformers": "DabDetrModel"
- }
- },
- "tasks": [
- "DabDetrForObjectDetection",
- "DabDetrModel",
- "DabDetrPreTrainedModel"
- ]
- }
- },
- "info.gan.dac": {
- "*": {
- "repo": "descript/dac_16khz",
- "pkg": {
- "0": {
- "transformers": "DacModel"
- }
- },
- "tasks": [
- "DacModel",
- "DacPreTrainedModel"
- ]
- }
- },
- "info.aet.data2vec-audio-960h": {
- "*": {
- "repo": "facebook/data2vec-audio-base-960h",
- "pkg": {
- "0": {
- "transformers": "Data2VecAudioModel"
- }
- },
- "tasks": [
- "Data2VecAudioForAudioFrameClassification",
- "Data2VecAudioForCTC",
- "Data2VecAudioForSequenceClassification",
- "Data2VecAudioForXVector",
- "Data2VecAudioModel",
- "Data2VecAudioPreTrainedModel"
- ]
- }
- },
- "info.art.data2vec-text": {
- "*": {
- "repo": "facebook/data2vec-text-base",
- "pkg": {
- "0": {
- "transformers": "Data2VecTextModel"
- }
- },
- "tasks": [
- "Data2VecTextForCausalLM",
- "Data2VecTextForMaskedLM",
- "Data2VecTextForMultipleChoice",
- "Data2VecTextForQuestionAnswering",
- "Data2VecTextForSequenceClassification",
- "Data2VecTextForTokenClassification",
- "Data2VecTextModel",
- "Data2VecTextPreTrainedModel"
- ]
- }
- },
- "info.vit.data2vec-vision": {
- "*": {
- "repo": "facebook/data2vec-vision-base",
- "pkg": {
- "0": {
- "transformers": "Data2VecVisionModel"
- }
- },
- "tasks": [
- "Data2VecVisionForImageClassification",
- "Data2VecVisionForSemanticSegmentation",
- "Data2VecVisionModel",
- "Data2VecVisionPreTrainedModel"
- ]
- }
- },
- "info.stst.dbrx": {
- "*": {
- "repo": "databricks/dbrx-instruct",
- "pkg": {
- "0": {
- "transformers": "DbrxModel"
- }
- },
- "tasks": [
- "DbrxForCausalLM",
- "DbrxModel",
- "DbrxPreTrainedModel"
- ]
- }
- },
- "info.art.deberta": {
- "*": {
- "repo": "microsoft/deberta-base",
- "pkg": {
- "0": {
- "transformers": "DebertaModel"
- }
- },
- "tasks": [
- "DebertaForMaskedLM",
- "DebertaForQuestionAnswering",
- "DebertaForSequenceClassification",
- "DebertaForTokenClassification",
- "DebertaModel",
- "DebertaPreTrainedModel"
- ]
- }
- },
- "info.art.deberta-v2-x": {
- "*": {
- "repo": "microsoft/deberta-v2-xlarge",
- "pkg": {
- "0": {
- "transformers": "DebertaV2Model"
- }
- },
- "tasks": [
- "DebertaV2ForMaskedLM",
- "DebertaV2ForMultipleChoice",
- "DebertaV2ForQuestionAnswering",
- "DebertaV2ForSequenceClassification",
- "DebertaV2ForTokenClassification",
- "DebertaV2Model",
- "DebertaV2PreTrainedModel"
- ]
- }
- },
- "info.art.decision-transformer-gym-hopper": {
- "*": {
- "repo": "edbeeching/decision-transformer-gym-hopper-medium",
- "pkg": {
- "0": {
- "transformers": "DecisionTransformerModel"
- }
- },
- "tasks": [
- "DecisionTransformerGPT2Model",
- "DecisionTransformerGPT2PreTrainedModel",
- "DecisionTransformerModel",
- "DecisionTransformerPreTrainedModel"
- ]
- }
- },
- "info.moe.deepseek-v2-lite": {
- "*": {
- "repo": "deepseek-ai/DeepSeek-V2-Lite",
- "pkg": {
- "0": {
- "transformers": "DeepseekV2Model"
- }
- },
- "tasks": [
- "DeepseekV2PreTrainedModel",
- "DeepseekV2Model",
- "DeepseekV2ForCausalLM",
- "DeepseekV2ForSequenceClassification"
- ]
- }
- },
- "info.moe.deepseek-v3": {
- "*": {
- "repo": "bzantium/tiny-deepseek-v3",
- "pkg": {
- "0": {
- "transformers": "DeepseekV3Model"
- }
- },
- "tasks": [
- "DeepseekV3PreTrainedModel",
- "DeepseekV3Model",
- "DeepseekV3ForCausalLM",
- "DeepseekV3ForSequenceClassification",
- "DeepseekV3ForTokenClassification"
- ]
- }
- },
- "info.vit.deepseek-vl-chat": {
- "*": {
- "repo": "deepseek-community/deepseek-vl-1.3b-chat",
- "pkg": {
- "0": {
- "transformers": "DeepseekVLModel"
- }
- },
- "tasks": [
- "DeepseekVLPreTrainedModel",
- "DeepseekVLModel",
- "DeepseekVLForConditionalGeneration"
- ]
- }
- },
- "info.detr.deformable-detr": {
- "*": {
- "repo": "SenseTime/deformable-detr",
- "pkg": {
- "0": {
- "transformers": "DeformableDetrModel"
- }
- },
- "tasks": [
- "DeformableDetrForObjectDetection",
- "DeformableDetrModel",
- "DeformableDetrPreTrainedModel"
- ]
- }
- },
- "info.vit.deit-distilled-patch16-224": {
- "*": {
- "repo": "facebook/deit-base-distilled-patch16-224",
- "pkg": {
- "0": {
- "transformers": "DeiTModel"
- }
- },
- "tasks": [
- "DeiTForImageClassification",
- "DeiTForImageClassificationWithTeacher",
- "DeiTForMaskedImageModeling",
- "DeiTModel",
- "DeiTPreTrainedModel"
- ]
- }
- },
- "info.vit.depth": {
- "*": {
- "repo": "apple/DepthPro",
- "pkg": {
- "0": {
- "transformers": "DepthProModel"
- }
- },
- "tasks": [
- "DepthProPreTrainedModel",
- "DepthProModel",
- "DepthProForDepthEstimation"
- ]
- }
- },
- "info.detr.detr-resnet-50": {
- "*": {
- "repo": "facebook/detr-resnet-50",
- "pkg": {
- "0": {
- "transformers": "DetrModel"
- }
- },
- "tasks": [
- "DetrForObjectDetection",
- "DetrForSegmentation",
- "DetrModel",
- "DetrPreTrainedModel"
- ]
- }
- },
- "info.stst.dia": {
- "*": {
- "repo": "nari-labs/Dia-1.6B",
- "pkg": {
- "0": {
- "transformers": "DiaModel"
- }
- },
- "tasks": [
- "DiaModel",
- "DiaPreTrainedModel",
- "DiaForConditionalGeneration"
- ]
- }
- },
- "info.stst.diffllama-handcut": {
- "*": {
- "repo": "kajuma/DiffLlama-0.3B-handcut",
- "pkg": {
- "0": {
- "transformers": "DiffLlamaModel"
- }
- },
- "tasks": [
- "DiffLlamaPreTrainedModel",
- "DiffLlamaModel",
- "DiffLlamaForCausalLM",
- "DiffLlamaForSequenceClassification",
- "DiffLlamaForQuestionAnswering",
- "DiffLlamaForTokenClassification"
- ]
- }
- },
- "info.gan.dinat-in-224": {
- "*": {
- "repo": "shi-labs/dinat-mini-in1k-224",
- "pkg": {
- "0": {
- "transformers": "DinatModel"
- }
- },
- "tasks": [
- "DinatForImageClassification",
- "DinatModel",
- "DinatPreTrainedModel",
- "DinatBackbone"
- ]
- }
- },
- "info.vit.dinov2-patch16-224": {
- "*": {
- "repo": "google/dinov2-base-patch16-224",
- "pkg": {
- "0": {
- "transformers": "Dinov2Model"
- }
- },
- "tasks": [
- "Dinov2ForImageClassification",
- "Dinov2Model",
- "Dinov2PreTrainedModel",
- "Dinov2Backbone"
- ]
- }
- },
- "info.vit.dinov2-with-registers": {
- "*": {
- "repo": "facebook/dinov2-with-registers-base",
- "pkg": {
- "0": {
- "transformers": "Dinov2WithRegistersModel"
- }
- },
- "tasks": [
- "Dinov2WithRegistersPreTrainedModel",
- "Dinov2WithRegistersModel",
- "Dinov2WithRegistersForImageClassification",
- "Dinov2WithRegistersBackbone"
- ]
- }
- },
- "info.vit.dinov3-convnext-pretrain-lvd": {
- "*": {
- "repo": "facebook/dinov3-convnext-tiny-pretrain-lvd1689m",
- "pkg": {
- "0": {
- "transformers": "DINOv3ConvNextModel"
- }
- },
- "tasks": [
- "DINOv3ConvNextModel",
- "DINOv3ConvNextPreTrainedModel",
- "DINOv3ConvNextBackbone"
- ]
- }
- },
- "info.vit.dinov3-vits16-pretrain-lvd": {
- "*": {
- "repo": "facebook/dinov3-vits16-pretrain-lvd1689m",
- "pkg": {
- "0": {
- "transformers": "DINOv3ViTModel"
- }
- },
- "tasks": [
- "DINOv3ViTModel",
- "DINOv3ViTPreTrainedModel",
- "DINOv3ViTBackbone"
- ]
- }
- },
- "info.art.distilbert-uncased": {
- "*": {
- "repo": "distilbert-base-uncased",
- "pkg": {
- "0": {
- "transformers": "DistilBertModel"
- }
- },
- "tasks": [
- "DistilBertForMaskedLM",
- "DistilBertForMultipleChoice",
- "DistilBertForQuestionAnswering",
- "DistilBertForSequenceClassification",
- "DistilBertForTokenClassification",
- "DistilBertModel",
- "DistilBertPreTrainedModel"
- ]
- }
- },
- "info.moe.doge": {
- "*": {
- "repo": "SmallDoge/Doge-320M",
- "pkg": {
- "0": {
- "transformers": "DogeModel"
- }
- },
- "tasks": [
- "DogeForCausalLM",
- "DogeModel",
- "DogePreTrainedModel",
- "DogeForSequenceClassification"
- ]
- }
- },
- "info.vit.donut": {
- "*": {
- "repo": "naver-clova-ix/donut-base",
- "pkg": {
- "0": {
- "transformers": "DonutSwinModel"
- }
- },
- "tasks": [
- "DonutSwinModel",
- "DonutSwinPreTrainedModel",
- "DonutSwinForImageClassification"
- ]
- }
- },
- "info.moe.dots-llm1": {
- "*": {
- "repo": "rednote-hilab/dots.llm1.base",
- "pkg": {
- "0": {
- "transformers": "Dots1Model"
- }
- },
- "tasks": [
- "Dots1PreTrainedModel",
- "Dots1Model",
- "Dots1ForCausalLM"
- ]
- }
- },
- "info.vit.dpr-ctx-encoder-single-nq": {
- "*": {
- "repo": "facebook/dpr-ctx_encoder-single-nq-base",
- "pkg": {
- "0": {
- "transformers": "DPRQuestionEncoder"
- }
- },
- "tasks": [
- "DPRContextEncoder",
- "DPRPretrainedContextEncoder",
- "DPRPreTrainedModel",
- "DPRPretrainedQuestionEncoder",
- "DPRPretrainedReader",
- "DPRQuestionEncoder",
- "DPRReader"
- ]
- }
- },
- "info.detr.dpt": {
- "*": {
- "repo": "Intel/dpt-large",
- "pkg": {
- "0": {
- "transformers": "DPTModel"
- }
- },
- "tasks": [
- "DPTForDepthEstimation",
- "DPTForSemanticSegmentation",
- "DPTModel",
- "DPTPreTrainedModel"
- ]
- }
- },
- "info.vit.edgetam1-hiera": {
- "*": {
- "repo": "facebook/edgetam.1-hiera-tiny",
- "pkg": {
- "0": {
- "transformers": "EdgeTamModel"
- }
- },
- "tasks": [
- "EdgeTamModel",
- "EdgeTamVisionModel",
- "EdgeTamPreTrainedModel"
- ]
- }
- },
- "info.vit.edgetam": {
- "*": {
- "repo": "facebook/EdgeTAM",
- "pkg": {
- "0": {
- "transformers": "EdgeTamVideoModel"
- }
- },
- "tasks": [
- "EdgeTamVideoModel",
- "EdgeTamVideoInferenceSession",
- "EdgeTamVideoPreTrainedModel"
- ]
- }
- },
- "info.vit.efficientloftr": {
- "*": {
- "repo": "zju-community/efficientloftr",
- "pkg": {
- "0": {
- "transformers": "EfficientLoFTRModel"
- }
- },
- "tasks": [
- "EfficientLoFTRPreTrainedModel",
- "EfficientLoFTRModel",
- "EfficientLoFTRForKeypointMatching"
- ]
- }
- },
- "info.vit.efficientnet-b7": {
- "*": {
- "repo": "google/efficientnet-b7",
- "pkg": {
- "0": {
- "transformers": "EfficientNetModel"
- }
- },
- "tasks": [
- "EfficientNetForImageClassification",
- "EfficientNetModel",
- "EfficientNetPreTrainedModel"
- ]
- }
- },
- "info.art.electra-discriminator": {
- "*": {
- "repo": "google/electra-small-discriminator",
- "pkg": {
- "0": {
- "transformers": "ElectraModel"
- }
- },
- "tasks": [
- "ElectraForCausalLM",
- "ElectraForMaskedLM",
- "ElectraForMultipleChoice",
- "ElectraForPreTraining",
- "ElectraForQuestionAnswering",
- "ElectraForSequenceClassification",
- "ElectraForTokenClassification",
- "ElectraModel",
- "ElectraPreTrainedModel"
- ]
- }
- },
- "info.art.emu3-chat-hf": {
- "*": {
- "repo": "Emu3-community/Emu3-Chat-hf",
- "pkg": {
- "0": {
- "transformers": "Emu3Model"
- }
- },
- "tasks": [
- "Emu3ForConditionalGeneration",
- "Emu3ForCausalLM",
- "Emu3TextModel",
- "Emu3PreTrainedModel",
- "Emu3VQVAE",
- "Emu3Model"
- ]
- }
- },
- "info.gan.encodec": {
- "*": {
- "repo": "facebook/encodec_24khz",
- "pkg": {
- "0": {
- "transformers": "EncodecModel"
- }
- },
- "tasks": [
- "EncodecModel",
- "EncodecPreTrainedModel"
- ]
- }
- },
- "info.art.ernie-3-zh": {
- "*": {
- "repo": "nghuyong/ernie-3.0-base-zh",
- "pkg": {
- "0": {
- "transformers": "ErnieModel"
- }
- },
- "tasks": [
- "ErnieForCausalLM",
- "ErnieForMaskedLM",
- "ErnieForMultipleChoice",
- "ErnieForNextSentencePrediction",
- "ErnieForPreTraining",
- "ErnieForQuestionAnswering",
- "ErnieForSequenceClassification",
- "ErnieForTokenClassification",
- "ErnieModel",
- "ErniePreTrainedModel"
- ]
- }
- },
- "info.stst.ernie-45-pt": {
- "*": {
- "repo": "baidu/ERNIE-4.5-0.3B-PT",
- "pkg": {
- "0": {
- "transformers": "Ernie4_5Model"
- }
- },
- "tasks": [
- "Ernie4_5ForCausalLM",
- "Ernie4_5Model",
- "Ernie4_5PreTrainedModel"
- ]
- }
- },
- "info.moe.ernie-4-a-pt": {
- "*": {
- "repo": "baidu/ERNIE-4.5-21B-A3B-PT",
- "pkg": {
- "0": {
- "transformers": "Ernie4_5_MoeModel"
- }
- },
- "tasks": [
- "Ernie4_5_MoeForCausalLM",
- "Ernie4_5_MoeModel",
- "Ernie4_5_MoePreTrainedModel"
- ]
- }
- },
- "info.vit.ernie-4-vl-a-pt": {
- "*": {
- "repo": "baidu/ERNIE-4.5-VL-28B-A3B-PT",
- "pkg": {
- "0": {
- "transformers": "Ernie4_5_VL_MoeModel"
- }
- },
- "tasks": [
- "Ernie4_5_VL_MoePreTrainedModel",
- "Ernie4_5_VL_MoeForConditionalGeneration",
- "Ernie4_5_VL_MoeModel",
- "Ernie4_5_VL_MoeTextModel",
- "Ernie4_5_VL_MoeVisionTransformerPretrainedModel",
- "Ernie4_5_VL_MoeVariableResolutionResamplerModel"
- ]
- }
- },
- "info.aet.esm": {
- "*": {
- "repo": "facebook/esm-1b",
- "pkg": {
- "0": {
- "transformers": "EsmModel"
- }
- },
- "tasks": [
- "EsmForMaskedLM",
- "EsmForSequenceClassification",
- "EsmForTokenClassification",
- "EsmModel",
- "EsmPreTrainedModel"
- ]
- }
- },
- "info.stst.evolla-hf": {
- "*": {
- "repo": "westlake-repl/Evolla-10B-hf",
- "pkg": {
- "0": {
- "transformers": "EvollaModel"
- }
- },
- "tasks": [
- "EvollaForProteinText2Text",
- "EvollaModel",
- "EvollaPreTrainedModel"
- ]
- }
- },
- "info.stst.exaone-4": {
- "*": {
- "repo": "LGAI-EXAONE/EXAONE-4.0-32B",
- "pkg": {
- "0": {
- "transformers": "Exaone4Model"
- }
- },
- "tasks": [
- "Exaone4PreTrainedModel",
- "Exaone4Model",
- "Exaone4ForCausalLM",
- "Exaone4ForSequenceClassification",
- "Exaone4ForTokenClassification",
- "Exaone4ForQuestionAnswering"
- ]
- }
- },
- "info.ssm.falcon": {
- "*": {
- "repo": "tiiuae/falcon-7b",
- "pkg": {
- "0": {
- "transformers": "FalconModel"
- }
- },
- "tasks": [
- "FalconForCausalLM",
- "FalconModel",
- "FalconPreTrainedModel",
- "FalconForSequenceClassification",
- "FalconForTokenClassification",
- "FalconForQuestionAnswering"
- ]
- }
- },
- "info.ssm.falconh1-t-hf": {
- "*": {
- "repo": "tiiuae/Falcon-H1-34B-Instruct",
- "pkg": {
- "0": {
- "transformers": "FalconH1Model"
- }
- },
- "tasks": [
- "FalconH1Model",
- "FalconH1ForCausalLM",
- "FalconH1PreTrainedModel"
- ]
- }
- },
- "info.ssm.falcon-mamba": {
- "*": {
- "repo": "tiiuae/falcon-mamba-7b",
- "pkg": {
- "0": {
- "transformers": "FalconMambaModel"
- }
- },
- "tasks": [
- "FalconMambaForCausalLM",
- "FalconMambaModel",
- "FalconMambaPreTrainedModel",
- "FalconMambaCache"
- ]
- }
- },
- "info.vit.fastvlm": {
- "*": {
- "repo": "KamilaMila/FastVLM-7B",
- "pkg": {
- "0": {
- "transformers": "FastVlmModel"
- }
- },
- "tasks": [
- "FastVlmForConditionalGeneration",
- "FastVlmModel",
- "FastVlmPreTrainedModel"
- ]
- }
- },
- "info.aet.fastspeech2-conformer": {
- "*": {
- "repo": "espnet/fastspeech2_conformer",
- "pkg": {
- "0": {
- "transformers": "FastSpeech2ConformerModel"
- }
- },
- "tasks": [
- "FastSpeech2ConformerWithHifiGan",
- "FastSpeech2ConformerHifiGan",
- "FastSpeech2ConformerModel",
- "FastSpeech2ConformerPreTrainedModel"
- ]
- }
- },
- "info.stst.fastspeech2-conformer": {
- "*": {
- "repo": "espnet/fastspeech2_conformer",
- "pkg": {
- "0": {
- "transformers": "FastSpeech2ConformerWithHifiGan"
- }
- },
- "tasks": [
- "FastSpeech2ConformerWithHifiGan",
- "FastSpeech2ConformerHifiGan",
- "FastSpeech2ConformerModel",
- "FastSpeech2ConformerPreTrainedModel"
- ]
- }
- },
- "info.art.flaubert-uncased": {
- "*": {
- "repo": "flaubert/flaubert_base_uncased",
- "pkg": {
- "0": {
- "transformers": "FlaubertModel"
- }
- },
- "tasks": [
- "FlaubertForMultipleChoice",
- "FlaubertForQuestionAnswering",
- "FlaubertForQuestionAnsweringSimple",
- "FlaubertForSequenceClassification",
- "FlaubertForTokenClassification",
- "FlaubertModel",
- "FlaubertWithLMHeadModel",
- "FlaubertPreTrainedModel"
- ]
- }
- },
- "info.vit.flava": {
- "*": {
- "repo": "facebook/flava-full",
- "pkg": {
- "0": {
- "transformers": "FlavaModel"
- }
- },
- "tasks": [
- "FlavaForPreTraining",
- "FlavaImageCodebook",
- "FlavaImageModel",
- "FlavaModel",
- "FlavaMultimodalModel",
- "FlavaPreTrainedModel",
- "FlavaTextModel"
- ]
- }
- },
- "info.moe.flexolmo-7x-1t": {
- "*": {
- "repo": "allenai/FlexOlmo-7x7B-1T",
- "pkg": {
- "0": {
- "transformers": "FlexOlmoModel"
- }
- },
- "tasks": [
- "FlexOlmoForCausalLM",
- "FlexOlmoModel",
- "FlexOlmoPreTrainedModel"
- ]
- }
- },
- "info.vit.florence-2": {
- "*": {
- "repo": "florence-community/Florence-2-base",
- "pkg": {
- "0": {
- "transformers": "Florence2Model"
- }
- },
- "tasks": [
- "Florence2Model",
- "Florence2ForConditionalGeneration",
- "Florence2PreTrainedModel",
- "Florence2VisionBackbone",
- "Florence2VisionPreTrainedModel"
- ]
- }
- },
- "info.art.fnet": {
- "*": {
- "repo": "google/fnet-base",
- "pkg": {
- "0": {
- "transformers": "FNetModel"
- }
- },
- "tasks": [
- "FNetForMaskedLM",
- "FNetForMultipleChoice",
- "FNetForNextSentencePrediction",
- "FNetForPreTraining",
- "FNetForQuestionAnswering",
- "FNetForSequenceClassification",
- "FNetForTokenClassification",
- "FNetLayer",
- "FNetModel",
- "FNetPreTrainedModel"
- ]
- }
- },
- "info.vit.focalnet": {
- "*": {
- "repo": "microsoft/focalnet-tiny",
- "pkg": {
- "0": {
- "transformers": "FocalNetModel"
- }
- },
- "tasks": [
- "FocalNetForImageClassification",
- "FocalNetForMaskedImageModeling",
- "FocalNetBackbone",
- "FocalNetModel",
- "FocalNetPreTrainedModel"
- ]
- }
- },
- "info.stst.wmt19-en-ru": {
- "*": {
- "repo": "facebook/wmt19-en-ru",
- "pkg": {
- "0": {
- "transformers": "FSMTModel"
- }
- },
- "tasks": [
- "FSMTForConditionalGeneration",
- "FSMTModel",
- "PretrainedFSMTModel"
- ]
- }
- },
- "info.aet.funnel": {
- "*": {
- "repo": "funnel-transformer/small",
- "pkg": {
- "0": {
- "transformers": "FunnelModel"
- }
- },
- "tasks": [
- "FunnelBaseModel",
- "FunnelForMaskedLM",
- "FunnelForMultipleChoice",
- "FunnelForPreTraining",
- "FunnelForQuestionAnswering",
- "FunnelForSequenceClassification",
- "FunnelForTokenClassification",
- "FunnelModel",
- "FunnelPreTrainedModel"
- ]
- }
- },
- "info.vit.fuyu": {
- "*": {
- "repo": "adept/fuyu-8b",
- "pkg": {
- "0": {
- "transformers": "FuyuModel"
- }
- },
- "tasks": [
- "FuyuForCausalLM",
- "FuyuPreTrainedModel",
- "FuyuModel"
- ]
- }
- },
- "info.stst.gemma": {
- "*": {
- "repo": "google/gemma-7b",
- "pkg": {
- "0": {
- "transformers": "GemmaModel"
- }
- },
- "file_256": [
- "01676b4c6e765f737a5e9854a315de3887e939c370cae116d505777729099a68"
- ],
- "layer_b3": [
- "438d82c867240f194a4e15798eef2886a911c8f57fa2d9f4ffad1d56e7bd1ccf",
- "1de38e09f5f2c5345de48b8cd4dddcfff3e341cc0059752446e186b3863f0981"
- ],
- "layer_256": [
- "e4835a72d582b4ae066d6ff0519f2ee9f8b21fb02e8c28d8eaa317f8d1e9ea75",
- "1657c7180b48672004f4463308dfdd56d92eedeb23d1408ea766985ca208e5aa"
- ],
- "tasks": [
- "GemmaModel",
- "GemmaForCausalLM",
- "GemmaForSequenceClassification",
- "GemmaForTokenClassification",
- "GemmaPreTrainedModel"
- ]
- }
- },
- "info.stst.gemma2": {
- "*": {
- "repo": "google/gemma-2-9b",
- "pkg": {
- "0": {
- "transformers": "Gemma2Model"
- }
- },
- "file_256": [
- "e909230aabafad02d097c7dc02f2ae062b4e6b0593477c1f07679d277e09ce71",
- "d61628bc793240439e608c5ae744f55ec8770f684abb63602648a24cb6da60bc"
- ],
- "layer_b3": [
- "55a3c812ac0832d154867f5927365bcc776926e48e65f7f35a81fc11f4bb81da",
- "543572889beb25cad83a43ce70cdd255d2c82951d6595e8c97ff62fd05871c99"
- ],
- "layer_256": [
- "a0d820c39578cf888f398579d9a00d69b31c81e049795ba70008dad8fe5b3a33",
- "abc83b04a04467579ea1952a7efbdd252b8641ac0e2a6a9be2a5a73e371111d6"
- ],
- "tasks": [
- "Gemma2ForCausalLM",
- "Gemma2Model",
- "Gemma2PreTrainedModel",
- "Gemma2ForSequenceClassification",
- "Gemma2ForTokenClassification"
- ]
- }
- },
- "info.vit.gemma-3": {
- "*": {
- "repo": "google/gemma-3-4b-it",
- "pkg": {
- "0": {
- "transformers": "Gemma3Model"
- }
- },
- "tasks": [
- "Gemma3PreTrainedModel",
- "Gemma3TextModel",
- "Gemma3ForCausalLM",
- "Gemma3ForConditionalGeneration",
- "Gemma3Model",
- "Gemma3ForSequenceClassification",
- "Gemma3TextForSequenceClassification"
- ]
- }
- },
- "info.stst.gemma3-text": {
- "*": {
- "repo": "google/gemma-3-12b-it",
- "pkg": {
- "0": {
- "transformers": "Gemma3TextModel"
- }
- },
- "tasks": [
- "Gemma3PreTrainedModel",
- "Gemma3TextModel",
- "Gemma3ForCausalLM",
- "Gemma3ForConditionalGeneration",
- "Gemma3Model",
- "Gemma3ForSequenceClassification",
- "Gemma3TextForSequenceClassification"
- ]
- }
- },
- "info.vit.gemma-3n-e": {
- "*": {
- "repo": "google/gemma-3n-E4B",
- "pkg": {
- "0": {
- "transformers": "Gemma3nModel"
- }
- },
- "tasks": [
- "Gemma3nAudioEncoder",
- "Gemma3nForCausalLM",
- "Gemma3nForConditionalGeneration",
- "Gemma3nModel",
- "Gemma3nPreTrainedModel",
- "Gemma3nTextModel"
- ]
- }
- },
- "info.art.gemma-3n-e": {
- "*": {
- "repo": "google/gemma-3n-E4B",
- "pkg": {
- "0": {
- "transformers": "Gemma3nAudioEncoder"
- }
- },
- "tasks": [
- "Gemma3nAudioEncoder",
- "Gemma3nForCausalLM",
- "Gemma3nForConditionalGeneration",
- "Gemma3nModel",
- "Gemma3nPreTrainedModel",
- "Gemma3nTextModel"
- ]
- }
- },
- "info.stst.gemma-3n-e": {
- "*": {
- "repo": "google/gemma-3n-E4B",
- "pkg": {
- "0": {
- "transformers": "Gemma3nTextModel"
- }
- },
- "tasks": [
- "Gemma3nAudioEncoder",
- "Gemma3nForCausalLM",
- "Gemma3nForConditionalGeneration",
- "Gemma3nModel",
- "Gemma3nPreTrainedModel",
- "Gemma3nTextModel"
- ]
- }
- },
- "info.vit.git": {
- "*": {
- "repo": "microsoft/git-base",
- "pkg": {
- "0": {
- "transformers": "GitModel"
- }
- },
- "tasks": [
- "GitForCausalLM",
- "GitModel",
- "GitPreTrainedModel",
- "GitVisionModel"
- ]
- }
- },
- "info.stst.glm-4-chat": {
- "*": {
- "repo": "zai-org/glm-4-9b-chat",
- "pkg": {
- "0": {
- "transformers": "GlmModel"
- }
- },
- "tasks": [
- "GlmPreTrainedModel",
- "GlmModel",
- "GlmForCausalLM",
- "GlmForSequenceClassification",
- "GlmForTokenClassification"
- ]
- }
- },
- "info.stst.glm-4-0414": {
- "*": {
- "repo": "zai-org/GLM-4-9B-0414",
- "pkg": {
- "0": {
- "transformers": "Glm4Model"
- }
- },
- "tasks": [
- "Glm4PreTrainedModel",
- "Glm4Model",
- "Glm4ForCausalLM",
- "Glm4ForSequenceClassification",
- "Glm4ForTokenClassification"
- ]
- }
- },
- "info.vit.glm-4v-thinking": {
- "*": {
- "repo": "zai-org/GLM-4.1V-9B-Thinking",
- "pkg": {
- "0": {
- "transformers": "Glm46VModel"
- }
- },
- "tasks": [
- "Glm46VModel",
- "Glm46VPreTrainedModel",
- "Glm46VForConditionalGeneration"
- ]
- }
- },
- "info.moe.glm-4-a": {
- "*": {
- "repo": "zai-org/GLM-4.5-Air",
- "pkg": {
- "0": {
- "transformers": "Glm4MoeModel"
- }
- },
- "tasks": [
- "Glm4MoePreTrainedModel",
- "Glm4MoeModel",
- "Glm4MoeForCausalLM"
- ]
- }
- },
- "info.vit.glm-4v": {
- "*": {
- "repo": "zai-org/GLM-4.5V",
- "pkg": {
- "0": {
- "transformers": "Glm4vMoeModel"
- }
- },
- "tasks": [
- "Glm4vMoeForConditionalGeneration",
- "Glm4vMoeModel",
- "Glm4vMoePreTrainedModel",
- "Glm4vMoeTextModel",
- "Glm4vMoeVisionModel"
- ]
- }
- },
- "info.moe.glm-4v": {
- "*": {
- "repo": "zai-org/GLM-4.5V",
- "pkg": {
- "0": {
- "transformers": "Glm4vMoeTextModel"
- }
- },
- "tasks": [
- "Glm4vMoeForConditionalGeneration",
- "Glm4vMoeModel",
- "Glm4vMoePreTrainedModel",
- "Glm4vMoeTextModel",
- "Glm4vMoeVisionModel"
- ]
- }
- },
- "info.stst.glm-4v-thinking": {
- "*": {
- "repo": "zai-org/GLM-4.1V-9B-Thinking",
- "pkg": {
- "0": {
- "transformers": "Glm4vTextModel"
- }
- },
- "tasks": [
- "Glm4vForConditionalGeneration",
- "Glm4vModel",
- "Glm4vPreTrainedModel",
- "Glm4vTextModel",
- "Glm4vVisionModel"
- ]
- }
- },
- "info.stst.glm-asr-nano-2512": {
- "*": {
- "repo": "zai-org/GLM-ASR-Nano-2512",
- "pkg": {
- "0": {
- "transformers": "GlmAsrForConditionalGeneration"
- }
- },
- "tasks": [
- "GlmAsrEncoder",
- "GlmAsrForConditionalGeneration",
- "GlmAsrPreTrainedModel"
- ]
- }
- },
- "info.vit.glpn-kitti": {
- "*": {
- "repo": "vinvino02/glpn-kitti",
- "pkg": {
- "0": {
- "transformers": "GLPNModel"
- }
- },
- "tasks": [
- "GLPNForDepthEstimation",
- "GLPNLayer",
- "GLPNModel",
- "GLPNPreTrainedModel"
- ]
- }
- },
- "info.vit.got-ocr-2-hf": {
- "*": {
- "repo": "stepfun-ai/GOT-OCR-2.0-hf",
- "pkg": {
- "0": {
- "transformers": "GotOcr2Model"
- }
- },
- "tasks": [
- "GotOcr2PreTrainedModel",
- "GotOcr2Model",
- "GotOcr2ForConditionalGeneration"
- ]
- }
- },
- "info.art.gpt2": {
- "*": {
- "repo": "openai-community/gpt2",
- "pkg": {
- "0": {
- "transformers": "GPT2Model"
- }
- },
- "tasks": [
- "GPT2DoubleHeadsModel",
- "GPT2ForQuestionAnswering",
- "GPT2ForSequenceClassification",
- "GPT2ForTokenClassification",
- "GPT2LMHeadModel",
- "GPT2Model",
- "GPT2PreTrainedModel"
- ]
- }
- },
- "info.art.gpt-bigcode-santacoder": {
- "*": {
- "repo": "bigcode/gpt_bigcode-santacoder",
- "pkg": {
- "0": {
- "transformers": "GPTBigCodeModel"
- }
- },
- "tasks": [
- "GPTBigCodeForSequenceClassification",
- "GPTBigCodeForTokenClassification",
- "GPTBigCodeForCausalLM",
- "GPTBigCodeModel",
- "GPTBigCodePreTrainedModel"
- ]
- }
- },
- "info.art.gpt-neo": {
- "*": {
- "repo": "EleutherAI/gpt-neo-1.3B",
- "pkg": {
- "0": {
- "transformers": "GPTNeoModel"
- }
- },
- "tasks": [
- "GPTNeoForCausalLM",
- "GPTNeoForQuestionAnswering",
- "GPTNeoForSequenceClassification",
- "GPTNeoForTokenClassification",
- "GPTNeoModel",
- "GPTNeoPreTrainedModel"
- ]
- }
- },
- "info.stst.gpt-neox": {
- "*": {
- "repo": "EleutherAI/gpt-neox-20b",
- "pkg": {
- "0": {
- "transformers": "GPTNeoXModel"
- }
- },
- "tasks": [
- "GPTNeoXForCausalLM",
- "GPTNeoXForQuestionAnswering",
- "GPTNeoXForSequenceClassification",
- "GPTNeoXForTokenClassification",
- "GPTNeoXLayer",
- "GPTNeoXModel",
- "GPTNeoXPreTrainedModel"
- ]
- }
- },
- "info.stst.gpt-neox-japanese": {
- "*": {
- "repo": "abeja/gpt-neox-japanese-2.7b",
- "pkg": {
- "0": {
- "transformers": "GPTNeoXJapaneseModel"
- }
- },
- "tasks": [
- "GPTNeoXJapaneseForCausalLM",
- "GPTNeoXJapaneseLayer",
- "GPTNeoXJapaneseModel",
- "GPTNeoXJapanesePreTrainedModel"
- ]
- }
- },
- "info.moe.gpt-oss": {
- "*": {
- "repo": "openai/gpt-oss-120b",
- "pkg": {
- "0": {
- "transformers": "GptOssModel"
- }
- },
- "file_256": [
- "68a8dc1f8e2e5996cb702f14332a25ddf3463daeab2df68e21ca09ef181203c3",
- "a881aa5f561b26a22b14a8262aa61849ace349ffd73d74769e030ac90a1fcf8a"
- ],
- "layer_b3": [
- "b52807536902cabbf84f99e4fa2f8713fb4ef77e739f06367ee0d486e3222faa",
- "43c618018db1fd6e915dead610652da261d9058b73bc5355c85c6ac69af4d913",
- "ab27ce7391b7fbd6ce3c319faa119afdac68f746af6a0ce2c3400a132f36f6ac"
- ],
- "layer_256": [
- "de5dcad822be5ed6196f0f3f6965739993118d14db97b33a94a269f4f1b7a363",
- "575f1977ed42d95a050e13dadaafc05a6d94c8aadca8364dca8a62aa4f2b146c"
- ],
- "tasks": [
- "GptOssForCausalLM",
- "GptOssForSequenceClassification",
- "GptOssForTokenClassification",
- "GptOssModel",
- "GptOssPreTrainedModel"
- ]
- }
- },
- "info.art.gpt-j": {
- "*": {
- "repo": "EleutherAI/gpt-j-6B",
- "pkg": {
- "0": {
- "transformers": "GPTJModel"
- }
- },
- "tasks": [
- "GPTJForCausalLM",
- "GPTJForQuestionAnswering",
- "GPTJForSequenceClassification",
- "GPTJModel",
- "GPTJPreTrainedModel"
- ]
- }
- },
- "info.stst.granite": {
- "*": {
- "repo": "ibm-granite/granite-3.3-2b-base",
- "pkg": {
- "0": {
- "transformers": "GraniteModel"
- }
- },
- "tasks": [
- "GraniteForCausalLM",
- "GraniteModel",
- "GranitePreTrainedModel"
- ]
- }
- },
- "info.moe.powermoe": {
- "*": {
- "repo": "ibm-research/PowerMoE-3b",
- "pkg": {
- "0": {
- "transformers": "GraniteMoeModel"
- }
- },
- "tasks": [
- "GraniteMoeForCausalLM",
- "GraniteMoeModel",
- "GraniteMoePreTrainedModel"
- ]
- }
- },
- "info.ssm.granite-4-h": {
- "*": {
- "repo": "ibm-granite/granite-4.0-h-small",
- "pkg": {
- "0": {
- "transformers": "GraniteMoeHybridModel"
- }
- },
- "tasks": [
- "GraniteMoeHybridForCausalLM",
- "GraniteMoeHybridModel",
- "GraniteMoeHybridPreTrainedModel"
- ]
- }
- },
- "info.moe.moe-active-shared-experts": {
- "*": {
- "repo": "ibm-research/moe-7b-1b-active-shared-experts",
- "pkg": {
- "0": {
- "transformers": "GraniteMoeSharedModel"
- }
- },
- "tasks": [
- "GraniteMoeSharedForCausalLM",
- "GraniteMoeSharedModel",
- "GraniteMoeSharedPreTrainedModel"
- ]
- }
- },
- "info.vit.llava-v1-mistral-hf": {
- "*": {
- "repo": "llava-hf/llava-v1.6-mistral-7b-hf",
- "pkg": {
- "0": {
- "transformers": "LlavaNextModel"
- }
- },
- "tasks": [
- "LlavaNextForConditionalGeneration",
- "LlavaNextPreTrainedModel",
- "LlavaNextModel"
- ]
- }
- },
- "info.detr.grounding-dino": {
- "*": {
- "repo": "IDEA-Research/grounding-dino-tiny",
- "pkg": {
- "0": {
- "transformers": "GroundingDinoModel"
- }
- },
- "tasks": [
- "GroundingDinoForObjectDetection",
- "GroundingDinoModel",
- "GroundingDinoPreTrainedModel"
- ]
- }
- },
- "info.vit.groupvit-gcc-yfcc": {
- "*": {
- "repo": "nvidia/groupvit-gcc-yfcc",
- "pkg": {
- "0": {
- "transformers": "GroupViTModel"
- }
- },
- "tasks": [
- "GroupViTModel",
- "GroupViTPreTrainedModel",
- "GroupViTTextModel",
- "GroupViTVisionModel"
- ]
- }
- },
- "info.stst.helium": {
- "*": {
- "repo": "kyutai/helium-1-2b",
- "pkg": {
- "0": {
- "transformers": "HeliumModel"
- }
- },
- "tasks": [
- "HeliumPreTrainedModel",
- "HeliumModel",
- "HeliumForCausalLM",
- "HeliumForSequenceClassification",
- "HeliumForTokenClassification"
- ]
- }
- },
- "info.vit.dfine-x-coco": {
- "*": {
- "repo": "ustc-community/dfine_x_coco",
- "pkg": {
- "0": {
- "transformers": "HGNetV2Backbone"
- }
- },
- "tasks": [
- "HGNetV2Backbone",
- "HGNetV2PreTrainedModel",
- "HGNetV2ForImageClassification"
- ]
- }
- },
- "info.vit.hiera-224": {
- "*": {
- "repo": "facebook/hiera-base-224-hf",
- "pkg": {
- "0": {
- "transformers": "HieraModel"
- }
- },
- "tasks": [
- "HieraForImageClassification",
- "HieraForPreTraining",
- "HieraBackbone",
- "HieraModel",
- "HieraPreTrainedModel"
- ]
- }
- },
- "info.aet.hubert-ls960": {
- "*": {
- "repo": "facebook/hubert-base-ls960",
- "pkg": {
- "0": {
- "transformers": "HubertModel"
- }
- },
- "tasks": [
- "HubertForCTC",
- "HubertForSequenceClassification",
- "HubertModel",
- "HubertPreTrainedModel"
- ]
- }
- },
- "info.stst.hunyuan": {
- "*": {
- "repo": "tencent/Hunyuan-7B-Instruct",
- "pkg": {
- "0": {
- "transformers": "HunYuanDenseV1Model"
- }
- },
- "tasks": [
- "HunYuanDenseV1ForCausalLM",
- "HunYuanDenseV1Model",
- "HunYuanDenseV1PreTrainedModel",
- "HunYuanDenseV1ForSequenceClassification"
- ]
- }
- },
- "info.moe.hunyuan-a": {
- "*": {
- "repo": "tencent/Hunyuan-A13B-Instruct",
- "pkg": {
- "0": {
- "transformers": "HunYuanMoEV1Model"
- }
- },
- "tasks": [
- "HunYuanMoEV1ForCausalLM",
- "HunYuanMoEV1Model",
- "HunYuanMoEV1PreTrainedModel",
- "HunYuanMoEV1ForSequenceClassification"
- ]
- }
- },
- "info.art.ibert-roberta": {
- "*": {
- "repo": "kssteven/ibert-roberta-base",
- "pkg": {
- "0": {
- "transformers": "IBertModel"
- }
- },
- "tasks": [
- "IBertForMaskedLM",
- "IBertForMultipleChoice",
- "IBertForQuestionAnswering",
- "IBertForSequenceClassification",
- "IBertForTokenClassification",
- "IBertModel",
- "IBertPreTrainedModel"
- ]
- }
- },
- "info.vit.idefics": {
- "*": {
- "repo": "HuggingFaceM4/idefics-9b",
- "pkg": {
- "0": {
- "transformers": "IdeficsModel"
- }
- },
- "tasks": [
- "IdeficsForVisionText2Text",
- "IdeficsModel",
- "IdeficsPreTrainedModel"
- ]
- }
- },
- "info.vit.idefics2": {
- "*": {
- "repo": "HuggingFaceM4/idefics2-8b",
- "pkg": {
- "0": {
- "transformers": "Idefics2Model"
- }
- },
- "tasks": [
- "Idefics2ForConditionalGeneration",
- "Idefics2PreTrainedModel",
- "Idefics2Model"
- ]
- }
- },
- "info.vit.idefics3-llama3": {
- "*": {
- "repo": "HuggingFaceM4/Idefics3-8B-Llama3",
- "pkg": {
- "0": {
- "transformers": "Idefics3Model"
- }
- },
- "tasks": [
- "Idefics3ForConditionalGeneration",
- "Idefics3PreTrainedModel",
- "Idefics3Model",
- "Idefics3VisionTransformer"
- ]
- }
- },
- "info.vit.siglip-patch16-224": {
- "*": {
- "repo": "google/siglip-base-patch16-224",
- "pkg": {
- "0": {
- "transformers": "Idefics3VisionTransformer"
- }
- },
- "tasks": [
- "Idefics3ForConditionalGeneration",
- "Idefics3PreTrainedModel",
- "Idefics3Model",
- "Idefics3VisionTransformer"
- ]
- }
- },
- "info.vit.ijepa-vith14": {
- "*": {
- "repo": "facebook/ijepa_vith14_1k",
- "pkg": {
- "0": {
- "transformers": "IJepaModel"
- }
- },
- "tasks": [
- "IJepaPreTrainedModel",
- "IJepaModel",
- "IJepaForImageClassification"
- ]
- }
- },
- "info.art.imagegpt": {
- "*": {
- "repo": "openai/imagegpt-small",
- "pkg": {
- "0": {
- "transformers": "ImageGPTModel"
- }
- },
- "tasks": [
- "ImageGPTForCausalImageModeling",
- "ImageGPTForImageClassification",
- "ImageGPTModel",
- "ImageGPTPreTrainedModel"
- ]
- }
- },
- "info.stst.informer-tourism-monthly": {
- "*": {
- "repo": "huggingface/informer-tourism-monthly",
- "pkg": {
- "0": {
- "transformers": "InformerModel"
- }
- },
- "tasks": [
- "InformerForPrediction",
- "InformerModel",
- "InformerPreTrainedModel"
- ]
- }
- },
- "info.vit.blip-flan-t5": {
- "*": {
- "repo": "Salesforce/instructblip-flan-t5-xl",
- "pkg": {
- "0": {
- "transformers": "InstructBlipModel"
- }
- },
- "tasks": [
- "InstructBlipQFormerModel",
- "InstructBlipPreTrainedModel",
- "InstructBlipModel",
- "InstructBlipForConditionalGeneration",
- "InstructBlipVisionModel"
- ]
- }
- },
- "info.vit.internvl3-hf": {
- "*": {
- "repo": "OpenGVLab/InternVL3-1B-hf",
- "pkg": {
- "0": {
- "transformers": "InternVLModel"
- }
- },
- "tasks": [
- "InternVLVisionPreTrainedModel",
- "InternVLVisionModel",
- "InternVLPreTrainedModel",
- "InternVLModel",
- "InternVLForConditionalGeneration"
- ]
- }
- },
- "info.stst.jais-2-chat": {
- "*": {
- "repo": "inceptionai/Jais-2-8B-Chat",
- "pkg": {
- "0": {
- "transformers": "Jais2Model"
- }
- },
- "tasks": [
- "Jais2Model",
- "Jais2ForCausalLM",
- "Jais2PreTrainedModel"
- ]
- }
- },
- "info.ssm.jamba-v0": {
- "*": {
- "repo": "ai21labs/Jamba-v0.1",
- "pkg": {
- "0": {
- "transformers": "JambaModel"
- }
- },
- "tasks": [
- "JambaForCausalLM",
- "JambaForSequenceClassification",
- "JambaModel",
- "JambaPreTrainedModel"
- ]
- }
- },
- "info.vit.janus": {
- "*": {
- "repo": "deepseek-community/Janus-Pro-1B",
- "pkg": {
- "0": {
- "transformers": "JanusModel"
- }
- },
- "tasks": [
- "JanusPreTrainedModel",
- "JanusForConditionalGeneration",
- "JanusModel",
- "JanusVQVAE",
- "JanusVisionModel"
- ]
- }
- },
- "info.moe.jetmoe": {
- "*": {
- "repo": "jetmoe/jetmoe-8b",
- "pkg": {
- "0": {
- "transformers": "JetMoeModel"
- }
- },
- "tasks": [
- "JetMoeForCausalLM",
- "JetMoeModel",
- "JetMoePreTrainedModel",
- "JetMoeForSequenceClassification"
- ]
- }
- },
- "info.vit.kosmos-2-patch14-224": {
- "*": {
- "repo": "microsoft/kosmos-2-patch14-224",
- "pkg": {
- "0": {
- "transformers": "Kosmos2Model"
- }
- },
- "tasks": [
- "Kosmos2ForConditionalGeneration",
- "Kosmos2Model",
- "Kosmos2PreTrainedModel"
- ]
- }
- },
- "info.vit.kosmos-2": {
- "*": {
- "repo": "microsoft/kosmos-2.5",
- "pkg": {
- "0": {
- "transformers": "Kosmos2_5Model"
- }
- },
- "tasks": [
- "Kosmos2_5ForConditionalGeneration",
- "Kosmos2_5Model",
- "Kosmos2_5PreTrainedModel"
- ]
- }
- },
- "info.stst.stt-en-trfs": {
- "*": {
- "repo": "kyutai/stt-2.6b-en-trfs",
- "pkg": {
- "0": {
- "transformers": "KyutaiSpeechToTextModel"
- }
- },
- "tasks": [
- "KyutaiSpeechToTextPreTrainedModel",
- "KyutaiSpeechToTextModel",
- "KyutaiSpeechToTextForConditionalGeneration"
- ]
- }
- },
- "info.aet.todo": {
- "*": {
- "repo": "TODO/TODO",
- "pkg": {
- "0": {
- "transformers": "LasrForCTC"
- }
- },
- "tasks": [
- "LasrForCTC",
- "LasrEncoder",
- "LasrPreTrainedModel"
- ]
- }
- },
- "info.stst.todo": {
- "*": {
- "repo": "TODO/TODO",
- "pkg": {
- "0": {
- "transformers": "LasrEncoder"
- }
- },
- "tasks": [
- "LasrForCTC",
- "LasrEncoder",
- "LasrPreTrainedModel"
- ]
- }
- },
- "info.art.layoutlm-uncased": {
- "*": {
- "repo": "microsoft/layoutlm-base-uncased",
- "pkg": {
- "0": {
- "transformers": "LayoutLMModel"
- }
- },
- "tasks": [
- "LayoutLMForMaskedLM",
- "LayoutLMForSequenceClassification",
- "LayoutLMForTokenClassification",
- "LayoutLMForQuestionAnswering",
- "LayoutLMModel",
- "LayoutLMPreTrainedModel"
- ]
- }
- },
- "info.art.layoutlmv2-uncased": {
- "*": {
- "repo": "microsoft/layoutlmv2-base-uncased",
- "pkg": {
- "0": {
- "transformers": "LayoutLMv2Model"
- }
- },
- "tasks": [
- "LayoutLMv2ForQuestionAnswering",
- "LayoutLMv2ForSequenceClassification",
- "LayoutLMv2ForTokenClassification",
- "LayoutLMv2Layer",
- "LayoutLMv2Model",
- "LayoutLMv2PreTrainedModel"
- ]
- }
- },
- "info.vit.layoutlmv3": {
- "*": {
- "repo": "microsoft/layoutlmv3-base",
- "pkg": {
- "0": {
- "transformers": "LayoutLMv3Model"
- }
- },
- "tasks": [
- "LayoutLMv3ForQuestionAnswering",
- "LayoutLMv3ForSequenceClassification",
- "LayoutLMv3ForTokenClassification",
- "LayoutLMv3Model",
- "LayoutLMv3PreTrainedModel"
- ]
- }
- },
- "info.stst.led-16384": {
- "*": {
- "repo": "allenai/led-base-16384",
- "pkg": {
- "0": {
- "transformers": "LEDModel"
- }
- },
- "tasks": [
- "LEDForConditionalGeneration",
- "LEDForQuestionAnswering",
- "LEDForSequenceClassification",
- "LEDModel",
- "LEDPreTrainedModel"
- ]
- }
- },
- "info.gan.levit-128s": {
- "*": {
- "repo": "facebook/levit-128S",
- "pkg": {
- "0": {
- "transformers": "LevitModel"
- }
- },
- "tasks": [
- "LevitForImageClassification",
- "LevitForImageClassificationWithTeacher",
- "LevitModel",
- "LevitPreTrainedModel"
- ]
- }
- },
- "info.stst.lfm": {
- "*": {
- "repo": "LiquidAI/LFM2-1.2B",
- "pkg": {
- "0": {
- "transformers": "Lfm2Model"
- }
- },
- "tasks": [
- "Lfm2ForCausalLM",
- "Lfm2Model",
- "Lfm2PreTrainedModel"
- ]
- }
- },
- "info.moe.lfm2-a": {
- "*": {
- "repo": "LiquidAI/LFM2-8B-A1B",
- "pkg": {
- "0": {
- "transformers": "Lfm2MoeModel"
- }
- },
- "tasks": [
- "Lfm2MoeForCausalLM",
- "Lfm2MoeModel",
- "Lfm2MoePreTrainedModel"
- ]
- }
- },
- "info.vit.lfm2-vl": {
- "*": {
- "repo": "LiquidAI/LFM2-VL-1.6B",
- "pkg": {
- "0": {
- "transformers": "Lfm2VlModel"
- }
- },
- "tasks": [
- "Lfm2VlForConditionalGeneration",
- "Lfm2VlPreTrainedModel",
- "Lfm2VlModel"
- ]
- }
- },
- "info.aet.lightglue-superpoint": {
- "*": {
- "repo": "ETH-CVG/lightglue_superpoint",
- "pkg": {
- "0": {
- "transformers": "LightGlueForKeypointMatching"
- }
- },
- "tasks": [
- "LightGluePreTrainedModel",
- "LightGlueForKeypointMatching"
- ]
- }
- },
- "info.art.lilt-roberta-en": {
- "*": {
- "repo": "SCUT-DLVCLab/lilt-roberta-en-base",
- "pkg": {
- "0": {
- "transformers": "LiltModel"
- }
- },
- "tasks": [
- "LiltForQuestionAnswering",
- "LiltForSequenceClassification",
- "LiltForTokenClassification",
- "LiltModel",
- "LiltPreTrainedModel"
- ]
- }
- },
- "info.vit.llama-4-scout-16e": {
- "*": {
- "repo": "meta-llama/Llama-4-Scout-17B-16E",
- "pkg": {
- "0": {
- "transformers": "Llama4ForConditionalGeneration"
- }
- },
- "tasks": [
- "Llama4PreTrainedModel",
- "Llama4TextModel",
- "Llama4VisionModel",
- "Llama4ForCausalLM",
- "Llama4ForConditionalGeneration"
- ]
- }
- },
- "info.moe.llama-4-scout-16e": {
- "*": {
- "repo": "meta-llama/Llama-4-Scout-17B-16E",
- "pkg": {
- "0": {
- "transformers": "Llama4TextModel"
- }
- },
- "tasks": [
- "Llama4PreTrainedModel",
- "Llama4TextModel",
- "Llama4VisionModel",
- "Llama4ForCausalLM",
- "Llama4ForConditionalGeneration"
- ]
- }
- },
- "info.vit.llava": {
- "*": {
- "repo": "llava-hf/llava-9b",
- "pkg": {
- "0": {
- "transformers": "LlavaModel"
- }
- },
- "file_256": [
- "f5ad57d3eda300a3195bc9c0bb36ab76ebe88831f128e9851e63440aff4a6741"
- ],
- "layer_b3": [
- "d7d6ccb9dbba90b64e4cd259b6309e56708b3f4fbd6e9f85e9f0410e549133ef"
- ],
- "layer_256": [
- "9969c41152aba689413b7f63888ecdc0c0badad2c2960e689ebc4c0e4a696c73"
- ],
- "tasks": [
- "LlavaForConditionalGeneration",
- "LlavaPreTrainedModel",
- "LlavaModel"
- ]
- }
- },
- "info.vit.llava-next-video-hf": {
- "*": {
- "repo": "llava-hf/LLaVA-NeXT-Video-7B-hf",
- "pkg": {
- "0": {
- "transformers": "LlavaNextVideoModel"
- }
- },
- "tasks": [
- "LlavaNextVideoForConditionalGeneration",
- "LlavaNextVideoModel",
- "LlavaNextVideoPreTrainedModel"
- ]
- }
- },
- "info.vit.llava-onevision-qwen2-ov-hf": {
- "*": {
- "repo": "llava-hf/llava-onevision-qwen2-7b-ov-hf",
- "pkg": {
- "0": {
- "transformers": "LlavaOnevisionModel"
- }
- },
- "tasks": [
- "LlavaOnevisionModel",
- "LlavaOnevisionForConditionalGeneration",
- "LlavaOnevisionPreTrainedModel"
- ]
- }
- },
- "info.stst.longcat-flash-chat": {
- "*": {
- "repo": "meituan-longcat/LongCat-Flash-Chat",
- "pkg": {
- "0": {
- "transformers": "LongcatFlashModel"
- }
- },
- "tasks": [
- "LongcatFlashPreTrainedModel",
- "LongcatFlashModel",
- "LongcatFlashForCausalLM"
- ]
- }
- },
- "info.art.longformer-4096": {
- "*": {
- "repo": "allenai/longformer-base-4096",
- "pkg": {
- "0": {
- "transformers": "LongformerModel"
- }
- },
- "tasks": [
- "LongformerForMaskedLM",
- "LongformerForMultipleChoice",
- "LongformerForQuestionAnswering",
- "LongformerForSequenceClassification",
- "LongformerForTokenClassification",
- "LongformerModel",
- "LongformerPreTrainedModel",
- "LongformerSelfAttention"
- ]
- }
- },
- "info.stst.long-t5-local": {
- "*": {
- "repo": "google/long-t5-local-base",
- "pkg": {
- "0": {
- "transformers": "LongT5Model"
- }
- },
- "tasks": [
- "LongT5EncoderModel",
- "LongT5ForConditionalGeneration",
- "LongT5Model",
- "LongT5PreTrainedModel"
- ]
- }
- },
- "info.art.luke": {
- "*": {
- "repo": "studio-ousia/luke-base",
- "pkg": {
- "0": {
- "transformers": "LukeModel"
- }
- },
- "tasks": [
- "LukeForEntityClassification",
- "LukeForEntityPairClassification",
- "LukeForEntitySpanClassification",
- "LukeForMultipleChoice",
- "LukeForQuestionAnswering",
- "LukeForSequenceClassification",
- "LukeForTokenClassification",
- "LukeForMaskedLM",
- "LukeModel",
- "LukePreTrainedModel"
- ]
- }
- },
- "info.art.lxmert-uncased": {
- "*": {
- "repo": "unc-nlp/lxmert-base-uncased",
- "pkg": {
- "0": {
- "transformers": "LxmertModel"
- }
- },
- "tasks": [
- "LxmertEncoder",
- "LxmertForPreTraining",
- "LxmertForQuestionAnswering",
- "LxmertModel",
- "LxmertPreTrainedModel",
- "LxmertVisualFeatureEncoder",
- "LxmertXLayer"
- ]
- }
- },
- "info.stst.m": {
- "*": {
- "repo": "facebook/m2m100_418M",
- "pkg": {
- "0": {
- "transformers": "M2M100Model"
- }
- },
- "tasks": [
- "M2M100ForConditionalGeneration",
- "M2M100Model",
- "M2M100PreTrainedModel"
- ]
- }
- },
- "info.ssm.mamba": {
- "*": {
- "repo": "state-spaces/mamba-2.8b",
- "pkg": {
- "0": {
- "transformers": "MambaModel"
- }
- },
- "tasks": [
- "MambaForCausalLM",
- "MambaModel",
- "MambaPreTrainedModel",
- "MambaCache"
- ]
- }
- },
- "info.ssm.mamba2": {
- "*": {
- "repo": "AntonV/mamba2-2.7b-hf",
- "pkg": {
- "0": {
- "transformers": "Mamba2Model"
- }
- },
- "tasks": [
- "Mamba2ForCausalLM",
- "Mamba2Model",
- "Mamba2PreTrainedModel"
- ]
- }
- },
- "info.stst.opus-mt-en-de": {
- "*": {
- "repo": "Helsinki-NLP/opus-mt-en-de",
- "pkg": {
- "0": {
- "transformers": "MarianModel"
- }
- },
- "tasks": [
- "MarianForCausalLM",
- "MarianModel",
- "MarianMTModel",
- "MarianPreTrainedModel"
- ]
- }
- },
- "info.art.markuplm": {
- "*": {
- "repo": "microsoft/markuplm-base",
- "pkg": {
- "0": {
- "transformers": "MarkupLMModel"
- }
- },
- "tasks": [
- "MarkupLMForQuestionAnswering",
- "MarkupLMForSequenceClassification",
- "MarkupLMForTokenClassification",
- "MarkupLMModel",
- "MarkupLMPreTrainedModel"
- ]
- }
- },
- "info.detr.mask2former-swin-coco-instance": {
- "*": {
- "repo": "facebook/mask2former-swin-small-coco-instance",
- "pkg": {
- "0": {
- "transformers": "Mask2FormerModel"
- }
- },
- "tasks": [
- "Mask2FormerForUniversalSegmentation",
- "Mask2FormerModel",
- "Mask2FormerPreTrainedModel"
- ]
- }
- },
- "info.detr.maskformer-swin-ade": {
- "*": {
- "repo": "facebook/maskformer-swin-base-ade",
- "pkg": {
- "0": {
- "transformers": "MaskFormerModel"
- }
- },
- "tasks": [
- "MaskFormerForInstanceSegmentation",
- "MaskFormerModel",
- "MaskFormerPreTrainedModel"
- ]
- }
- },
- "info.vit.swin-patch4-window7-224": {
- "*": {
- "repo": "microsoft/swin-tiny-patch4-window7-224",
- "pkg": {
- "0": {
- "transformers": "MaskFormerSwinModel"
- }
- },
- "tasks": [
- "MaskFormerSwinBackbone",
- "MaskFormerSwinModel",
- "MaskFormerSwinPreTrainedModel"
- ]
- }
- },
- "info.stst.mbart-cc25": {
- "*": {
- "repo": "facebook/mbart-large-cc25",
- "pkg": {
- "0": {
- "transformers": "MBartModel"
- }
- },
- "tasks": [
- "MBartForCausalLM",
- "MBartForConditionalGeneration",
- "MBartForQuestionAnswering",
- "MBartForSequenceClassification",
- "MBartModel",
- "MBartPreTrainedModel"
- ]
- }
- },
- "info.art.megatron-bert-uncased": {
- "*": {
- "repo": "nvidia/megatron-bert-uncased-345m",
- "pkg": {
- "0": {
- "transformers": "MegatronBertModel"
- }
- },
- "tasks": [
- "MegatronBertForCausalLM",
- "MegatronBertForMaskedLM",
- "MegatronBertForMultipleChoice",
- "MegatronBertForNextSentencePrediction",
- "MegatronBertForPreTraining",
- "MegatronBertForQuestionAnswering",
- "MegatronBertForSequenceClassification",
- "MegatronBertForTokenClassification",
- "MegatronBertModel",
- "MegatronBertPreTrainedModel"
- ]
- }
- },
- "info.vit.metaclip-2-worldwide-huge-quickgelu": {
- "*": {
- "repo": "facebook/metaclip-2-worldwide-huge-quickgelu",
- "pkg": {
- "0": {
- "transformers": "MetaClip2Model"
- }
- },
- "tasks": [
- "MetaClip2Model",
- "MetaClip2PreTrainedModel",
- "MetaClip2TextModel",
- "MetaClip2TextModelWithProjection",
- "MetaClip2VisionModel",
- "MetaClip2VisionModelWithProjection",
- "MetaClip2ForImageClassification"
- ]
- }
- },
- "info.vit.mgp-str": {
- "*": {
- "repo": "alibaba-damo/mgp-str-base",
- "pkg": {
- "0": {
- "transformers": "MgpstrForSceneTextRecognition"
- }
- },
- "tasks": [
- "MgpstrModel",
- "MgpstrPreTrainedModel",
- "MgpstrForSceneTextRecognition"
- ]
- }
- },
- "info.gan.mimi": {
- "*": {
- "repo": "kyutai/mimi",
- "pkg": {
- "0": {
- "transformers": "MimiModel"
- }
- },
- "tasks": [
- "MimiModel",
- "MimiPreTrainedModel"
- ]
- }
- },
- "info.moe.max-text-01-hf": {
- "*": {
- "repo": "MiniMaxAI/MiniMax-Text-01-hf",
- "pkg": {
- "0": {
- "transformers": "MiniMaxModel"
- }
- },
- "tasks": [
- "MiniMaxPreTrainedModel",
- "MiniMaxModel",
- "MiniMaxForCausalLM",
- "MiniMaxForSequenceClassification",
- "MiniMaxForTokenClassification",
- "MiniMaxForQuestionAnswering"
- ]
- }
- },
- "info.stst.stral-2410": {
- "*": {
- "repo": "mistralai/Ministral-8B-Instruct-2410",
- "pkg": {
- "0": {
- "transformers": "MinistralModel"
- }
- },
- "tasks": [
- "MinistralPreTrainedModel",
- "MinistralModel",
- "MinistralForCausalLM",
- "MinistralForSequenceClassification",
- "MinistralForTokenClassification",
- "MinistralForQuestionAnswering"
- ]
- }
- },
- "info.stst.stral-3-2512": {
- "*": {
- "repo": "mistralai/Ministral-3-8B-Base-2512",
- "pkg": {
- "0": {
- "transformers": "Ministral3Model"
- }
- },
- "tasks": [
- "Ministral3ForCausalLM",
- "Ministral3ForQuestionAnswering",
- "Ministral3Model",
- "Ministral3PreTrainedModel",
- "Ministral3ForSequenceClassification",
- "Ministral3ForTokenClassification"
- ]
- }
- },
- "info.stst.mistral-v0": {
- "*": {
- "repo": "mistralai/Mistral-7B-v0.1",
- "pkg": {
- "0": {
- "transformers": "MistralModel"
- }
- },
- "tasks": [
- "MistralForCausalLM",
- "MistralForQuestionAnswering",
- "MistralModel",
- "MistralPreTrainedModel",
- "MistralForSequenceClassification",
- "MistralForTokenClassification"
- ]
- }
- },
- "info.vit.mistral-3-2503": {
- "*": {
- "repo": "mistralai/Mistral-Small-3.1-24B-Instruct-2503",
- "pkg": {
- "0": {
- "transformers": "Mistral3Model"
- }
- },
- "tasks": [
- "Mistral3Model",
- "Mistral3PreTrainedModel",
- "Mistral3ForConditionalGeneration"
- ]
- }
- },
- "info.moe.mixtral-8x": {
- "*": {
- "repo": "mistralai/Mixtral-8x7B-v0.1",
- "pkg": {
- "0": {
- "transformers": "MixtralModel"
- }
- },
- "tasks": [
- "MixtralForCausalLM",
- "MixtralForQuestionAnswering",
- "MixtralModel",
- "MixtralPreTrainedModel",
- "MixtralForSequenceClassification",
- "MixtralForTokenClassification"
- ]
- }
- },
- "info.vit.mlcd-vit-bigg-patch14-336": {
- "*": {
- "repo": "DeepGlint-AI/mlcd-vit-bigG-patch14-336",
- "pkg": {
- "0": {
- "transformers": "MLCDVisionModel"
- }
- },
- "tasks": [
- "MLCDPreTrainedModel",
- "MLCDVisionModel"
- ]
- }
- },
- "info.vit.llama-3-vision": {
- "*": {
- "repo": "meta-llama/Llama-3.2-11B-Vision",
- "pkg": {
- "0": {
- "transformers": "MllamaModel"
- }
- },
- "tasks": [
- "MllamaForConditionalGeneration",
- "MllamaForCausalLM",
- "MllamaTextModel",
- "MllamaVisionModel",
- "MllamaPreTrainedModel",
- "MllamaModel"
- ]
- }
- },
- "info.detr.mm-grounding-dino-o365v1-goldg-v3det": {
- "*": {
- "repo": "openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det",
- "pkg": {
- "0": {
- "transformers": "MMGroundingDinoModel"
- }
- },
- "tasks": [
- "MMGroundingDinoForObjectDetection",
- "MMGroundingDinoModel",
- "MMGroundingDinoPreTrainedModel"
- ]
- }
- },
- "info.art.mobilebert-uncased": {
- "*": {
- "repo": "google/mobilebert-uncased",
- "pkg": {
- "0": {
- "transformers": "MobileBertModel"
- }
- },
- "tasks": [
- "MobileBertForMaskedLM",
- "MobileBertForMultipleChoice",
- "MobileBertForNextSentencePrediction",
- "MobileBertForPreTraining",
- "MobileBertForQuestionAnswering",
- "MobileBertForSequenceClassification",
- "MobileBertForTokenClassification",
- "MobileBertLayer",
- "MobileBertModel",
- "MobileBertPreTrainedModel"
- ]
- }
- },
- "info.vit.mobilenet-v1-1--224": {
- "*": {
- "repo": "google/mobilenet_v1_1.0_224",
- "pkg": {
- "0": {
- "transformers": "MobileNetV1Model"
- }
- },
- "tasks": [
- "MobileNetV1ForImageClassification",
- "MobileNetV1Model",
- "MobileNetV1PreTrainedModel"
- ]
- }
- },
- "info.vit.mobilenet-v2-1--224": {
- "*": {
- "repo": "google/mobilenet_v2_1.0_224",
- "pkg": {
- "0": {
- "transformers": "MobileNetV2Model"
- }
- },
- "tasks": [
- "MobileNetV2ForImageClassification",
- "MobileNetV2ForSemanticSegmentation",
- "MobileNetV2Model",
- "MobileNetV2PreTrainedModel"
- ]
- }
- },
- "info.vit.mobilevit": {
- "*": {
- "repo": "apple/mobilevit-small",
- "pkg": {
- "0": {
- "transformers": "MobileViTModel"
- }
- },
- "tasks": [
- "MobileViTForImageClassification",
- "MobileViTForSemanticSegmentation",
- "MobileViTModel",
- "MobileViTPreTrainedModel"
- ]
- }
- },
- "info.vit.mobilevitv2-1": {
- "*": {
- "repo": "apple/mobilevitv2-1.0-imagenet1k-256",
- "pkg": {
- "0": {
- "transformers": "MobileViTV2Model"
- }
- },
- "tasks": [
- "MobileViTV2ForImageClassification",
- "MobileViTV2ForSemanticSegmentation",
- "MobileViTV2Model",
- "MobileViTV2PreTrainedModel"
- ]
- }
- },
- "info.aet.modernbert": {
- "*": {
- "repo": "answerdotai/ModernBERT-base",
- "pkg": {
- "0": {
- "transformers": "ModernBertModel"
- }
- },
- "tasks": [
- "ModernBertModel",
- "ModernBertPreTrainedModel",
- "ModernBertForMaskedLM",
- "ModernBertForSequenceClassification",
- "ModernBertForTokenClassification",
- "ModernBertForQuestionAnswering",
- "ModernBertForMultipleChoice"
- ]
- }
- },
- "info.aet.test-dec": {
- "*": {
- "repo": "blab-jhu/test-32m-dec",
- "pkg": {
- "0": {
- "transformers": "ModernBertDecoderModel"
- }
- },
- "tasks": [
- "ModernBertDecoderModel",
- "ModernBertDecoderPreTrainedModel",
- "ModernBertDecoderForCausalLM",
- "ModernBertDecoderForSequenceClassification"
- ]
- }
- },
- "info.stst.moonshine": {
- "*": {
- "repo": "UsefulSensors/moonshine-tiny",
- "pkg": {
- "0": {
- "transformers": "MoonshineModel"
- }
- },
- "tasks": [
- "MoonshineModel",
- "MoonshinePreTrainedModel",
- "MoonshineForConditionalGeneration"
- ]
- }
- },
- "info.stst.hf-moshiko": {
- "*": {
- "repo": "kmhf/hf-moshiko",
- "pkg": {
- "0": {
- "transformers": "MoshiModel"
- }
- },
- "tasks": [
- "MoshiForCausalLM",
- "MoshiForConditionalGeneration",
- "MoshiModel",
- "MoshiPreTrainedModel"
- ]
- }
- },
- "info.art.mpnet": {
- "*": {
- "repo": "microsoft/mpnet-base",
- "pkg": {
- "0": {
- "transformers": "MPNetModel"
- }
- },
- "tasks": [
- "MPNetForMaskedLM",
- "MPNetForMultipleChoice",
- "MPNetForQuestionAnswering",
- "MPNetForSequenceClassification",
- "MPNetForTokenClassification",
- "MPNetLayer",
- "MPNetModel",
- "MPNetPreTrainedModel"
- ]
- }
- },
- "info.art.mpt": {
- "*": {
- "repo": "mosaicml/mpt-7b",
- "pkg": {
- "0": {
- "transformers": "MptModel"
- }
- },
- "tasks": [
- "MptForCausalLM",
- "MptModel",
- "MptPreTrainedModel",
- "MptForSequenceClassification",
- "MptForTokenClassification",
- "MptForQuestionAnswering"
- ]
- }
- },
- "info.art.mra-512-4": {
- "*": {
- "repo": "uw-madison/mra-base-512-4",
- "pkg": {
- "0": {
- "transformers": "MraModel"
- }
- },
- "tasks": [
- "MraForMaskedLM",
- "MraForMultipleChoice",
- "MraForQuestionAnswering",
- "MraForSequenceClassification",
- "MraForTokenClassification",
- "MraLayer",
- "MraModel",
- "MraPreTrainedModel"
- ]
- }
- },
- "info.stst.mt5": {
- "*": {
- "repo": "google/mt5-small",
- "pkg": {
- "0": {
- "transformers": "MT5Model"
- }
- },
- "identifiers": [
- [
- 250112,
- 2048
- ],
- "text_encoders.mt5xl.transformer.shared.weight"
- ],
- "file_256": [
- "0524484ec81425ba9deef6fac1393a78ba9b1c9bfed704a4be5f9c7255975cc1",
- "32f70f1d187e131a5fc3e4f0edc97ce89360d8e2f1d90177a443a05296097acc"
- ],
- "layer_b3": [
- "a1d616c37711ec7b9073d04734af2f5fd02f9035a322eb46efeace922e104c51"
- ],
- "layer_256": [
- "bd337daf0c1aa36896013109b406a0580aa3bb8ab9291d89df3015d737358e95",
- "2e40c48c96fc7df636aad96d3e78ed0ba9f68c3059e21b7fcf917f284c569a61"
- ],
- "tasks": [
- "MT5EncoderModel",
- "MT5ForConditionalGeneration",
- "MT5ForQuestionAnswering",
- "MT5ForSequenceClassification",
- "MT5ForTokenClassification",
- "MT5Model",
- "MT5PreTrainedModel"
- ]
- }
- },
- "info.art.musicgen": {
- "*": {
- "repo": "facebook/musicgen-small",
- "pkg": {
- "0": {
- "transformers": "MusicgenModel"
- }
- },
- "tasks": [
- "MusicgenForConditionalGeneration",
- "MusicgenForCausalLM",
- "MusicgenModel",
- "MusicgenPreTrainedModel"
- ]
- }
- },
- "info.art.musicgen-melody": {
- "*": {
- "repo": "facebook/musicgen-melody",
- "pkg": {
- "0": {
- "transformers": "MusicgenMelodyModel"
- }
- },
- "tasks": [
- "MusicgenMelodyForConditionalGeneration",
- "MusicgenMelodyForCausalLM",
- "MusicgenMelodyModel",
- "MusicgenMelodyPreTrainedModel"
- ]
- }
- },
- "info.stst.mvp": {
- "*": {
- "repo": "RUCAIBox/mvp",
- "pkg": {
- "0": {
- "transformers": "MvpModel"
- }
- },
- "tasks": [
- "MvpForCausalLM",
- "MvpForConditionalGeneration",
- "MvpForQuestionAnswering",
- "MvpForSequenceClassification",
- "MvpModel",
- "MvpPreTrainedModel"
- ]
- }
- },
- "info.stst.nanochat-d32": {
- "*": {
- "repo": "karpathy/nanochat-d32",
- "pkg": {
- "0": {
- "transformers": "NanoChatModel"
- }
- },
- "tasks": [
- "NanoChatPreTrainedModel",
- "NanoChatModel",
- "NanoChatForCausalLM"
- ]
- }
- },
- "info.stst.nemotron-3-hf": {
- "*": {
- "repo": "mgoin/nemotron-3-8b-chat-4k-sft-hf",
- "pkg": {
- "0": {
- "transformers": "NemotronModel"
- }
- },
- "tasks": [
- "NemotronForQuestionAnswering",
- "NemotronForCausalLM",
- "NemotronModel",
- "NemotronPreTrainedModel",
- "NemotronForSequenceClassification",
- "NemotronForTokenClassification"
- ]
- }
- },
- "info.moe.nllb-moe": {
- "*": {
- "repo": "facebook/nllb-moe-54b",
- "pkg": {
- "0": {
- "transformers": "NllbMoeModel"
- }
- },
- "tasks": [
- "NllbMoeForConditionalGeneration",
- "NllbMoeModel",
- "NllbMoePreTrainedModel",
- "NllbMoeTop2Router",
- "NllbMoeSparseMLP"
- ]
- }
- },
- "info.art.nystromformer-512": {
- "*": {
- "repo": "uw-madison/nystromformer-512",
- "pkg": {
- "0": {
- "transformers": "NystromformerModel"
- }
- },
- "tasks": [
- "NystromformerForMaskedLM",
- "NystromformerForMultipleChoice",
- "NystromformerForQuestionAnswering",
- "NystromformerForSequenceClassification",
- "NystromformerForTokenClassification",
- "NystromformerLayer",
- "NystromformerModel",
- "NystromformerPreTrainedModel"
- ]
- }
- },
- "info.stst.olmo-hf": {
- "*": {
- "repo": "allenai/OLMo-7B-hf",
- "pkg": {
- "0": {
- "transformers": "OlmoModel"
- }
- },
- "tasks": [
- "OlmoForCausalLM",
- "OlmoModel",
- "OlmoPreTrainedModel"
- ]
- }
- },
- "info.stst.olmo2-1124-hf": {
- "*": {
- "repo": "allenai/Olmo-2-1124-7B",
- "pkg": {
- "0": {
- "transformers": "Olmo2Model"
- }
- },
- "tasks": [
- "Olmo2ForCausalLM",
- "Olmo2Model",
- "Olmo2PreTrainedModel"
- ]
- }
- },
- "info.stst.olmo-3-0725": {
- "*": {
- "repo": "allenai/OLMo-3-0725-1B",
- "pkg": {
- "0": {
- "transformers": "Olmo3Model"
- }
- },
- "tasks": [
- "Olmo3ForCausalLM",
- "Olmo3Model",
- "Olmo3PreTrainedModel"
- ]
- }
- },
- "info.moe.olmoe-0924": {
- "*": {
- "repo": "allenai/OLMoE-1B-7B-0924",
- "pkg": {
- "0": {
- "transformers": "OlmoeModel"
- }
- },
- "tasks": [
- "OlmoeForCausalLM",
- "OlmoeModel",
- "OlmoePreTrainedModel"
- ]
- }
- },
- "info.detr.omdet-turbo-swin-hf": {
- "*": {
- "repo": "omlab/omdet-turbo-swin-tiny-hf",
- "pkg": {
- "0": {
- "transformers": "OmDetTurboForObjectDetection"
- }
- },
- "tasks": [
- "OmDetTurboForObjectDetection",
- "OmDetTurboPreTrainedModel"
- ]
- }
- },
- "info.detr.oneformer-ade-swin": {
- "*": {
- "repo": "shi-labs/oneformer_ade20k_swin_tiny",
- "pkg": {
- "0": {
- "transformers": "OneFormerModel"
- }
- },
- "tasks": [
- "OneFormerForUniversalSegmentation",
- "OneFormerModel",
- "OneFormerPreTrainedModel"
- ]
- }
- },
- "info.art.openai-gpt": {
- "*": {
- "repo": "openai-community/openai-gpt",
- "pkg": {
- "0": {
- "transformers": "OpenAIGPTModel"
- }
- },
- "tasks": [
- "OpenAIGPTDoubleHeadsModel",
- "OpenAIGPTForSequenceClassification",
- "OpenAIGPTLMHeadModel",
- "OpenAIGPTModel",
- "OpenAIGPTPreTrainedModel"
- ]
- }
- },
- "info.art.opt": {
- "*": {
- "repo": "facebook/opt-350m",
- "pkg": {
- "0": {
- "transformers": "OPTModel"
- }
- },
- "tasks": [
- "OPTForCausalLM",
- "OPTModel",
- "OPTPreTrainedModel",
- "OPTForSequenceClassification",
- "OPTForQuestionAnswering"
- ]
- }
- },
- "info.vit.ovis2-hf": {
- "*": {
- "repo": "thisisiron/Ovis2-1B-hf",
- "pkg": {
- "0": {
- "transformers": "Ovis2Model"
- }
- },
- "tasks": [
- "Ovis2PreTrainedModel",
- "Ovis2Model",
- "Ovis2ForConditionalGeneration"
- ]
- }
- },
- "info.vit.owlv2-patch16": {
- "*": {
- "repo": "google/owlv2-base-patch16",
- "pkg": {
- "0": {
- "transformers": "Owlv2Model"
- }
- },
- "tasks": [
- "Owlv2Model",
- "Owlv2PreTrainedModel",
- "Owlv2TextModel",
- "Owlv2VisionModel",
- "Owlv2ForObjectDetection"
- ]
- }
- },
- "info.vit.owlvit-patch32": {
- "*": {
- "repo": "google/owlvit-base-patch32",
- "pkg": {
- "0": {
- "transformers": "OwlViTModel"
- }
- },
- "tasks": [
- "OwlViTModel",
- "OwlViTPreTrainedModel",
- "OwlViTTextModel",
- "OwlViTVisionModel",
- "OwlViTForObjectDetection"
- ]
- }
- },
- "info.vit.paligemma": {
- "*": {
- "repo": "google/paligemma2-3b-mix-224",
- "pkg": {
- "0": {
- "transformers": "PaliGemmaModel"
- }
- },
- "tasks": [
- "PaliGemmaForConditionalGeneration",
- "PaliGemmaPreTrainedModel",
- "PaliGemmaModel"
- ]
- }
- },
- "info.aet.parakeet-ctc-b": {
- "*": {
- "repo": "nvidia/parakeet-ctc-1.1b",
- "pkg": {
- "0": {
- "transformers": "ParakeetForCTC"
- }
- },
- "tasks": [
- "ParakeetForCTC",
- "ParakeetEncoder",
- "ParakeetPreTrainedModel"
- ]
- }
- },
- "info.stst.parakeet-ctc-b": {
- "*": {
- "repo": "nvidia/parakeet-ctc-1.1b",
- "pkg": {
- "0": {
- "transformers": "ParakeetEncoder"
- }
- },
- "tasks": [
- "ParakeetForCTC",
- "ParakeetEncoder",
- "ParakeetPreTrainedModel"
- ]
- }
- },
- "info.mlp.patchtsmixer-etth1-pretrain": {
- "*": {
- "repo": "ibm/patchtsmixer-etth1-pretrain",
- "pkg": {
- "0": {
- "transformers": "PatchTSMixerModel"
- }
- },
- "tasks": [
- "PatchTSMixerPreTrainedModel",
- "PatchTSMixerModel",
- "PatchTSMixerForPretraining",
- "PatchTSMixerForPrediction",
- "PatchTSMixerForTimeSeriesClassification",
- "PatchTSMixerForRegression"
- ]
- }
- },
- "info.art.patchtst": {
- "*": {
- "repo": "ibm/patchtst",
- "pkg": {
- "0": {
- "transformers": "PatchTSTModel"
- }
- },
- "tasks": [
- "PatchTSTModel",
- "PatchTSTPreTrainedModel",
- "PatchTSTForPrediction",
- "PatchTSTForPretraining",
- "PatchTSTForRegression",
- "PatchTSTForClassification"
- ]
- }
- },
- "info.stst.pe-av": {
- "*": {
- "repo": "facebook/pe-av-large",
- "pkg": {
- "0": {
- "transformers": "PeAudioModel"
- }
- },
- "tasks": [
- "PeAudioFrameLevelModel",
- "PeAudioModel",
- "PeAudioEncoder"
- ]
- }
- },
- "info.aet.pe-av": {
- "*": {
- "repo": "facebook/pe-av-large",
- "pkg": {
- "0": {
- "transformers": "PeAudioVideoModel"
- }
- },
- "tasks": [
- "PeAudioVideoModel",
- "PeAudioVideoEncoder"
- ]
- }
- },
- "info.vit.pe-av": {
- "*": {
- "repo": "facebook/pe-av-large",
- "pkg": {
- "0": {
- "transformers": "PeVideoEncoder"
- }
- },
- "tasks": [
- "PeVideoEncoder",
- "PeVideoModel"
- ]
- }
- },
- "info.stst.pegasus": {
- "*": {
- "repo": "google/pegasus-large",
- "pkg": {
- "0": {
- "transformers": "PegasusModel"
- }
- },
- "tasks": [
- "PegasusForCausalLM",
- "PegasusForConditionalGeneration",
- "PegasusModel",
- "PegasusPreTrainedModel"
- ]
- }
- },
- "info.stst.pegasus-x": {
- "*": {
- "repo": "google/pegasus-x-large",
- "pkg": {
- "0": {
- "transformers": "PegasusXModel"
- }
- },
- "tasks": [
- "PegasusXForConditionalGeneration",
- "PegasusXModel",
- "PegasusXPreTrainedModel"
- ]
- }
- },
- "info.vit.language-perceiver": {
- "*": {
- "repo": "deepmind/language-perceiver",
- "pkg": {
- "0": {
- "transformers": "PerceiverModel"
- }
- },
- "tasks": [
- "PerceiverForImageClassificationConvProcessing",
- "PerceiverForImageClassificationFourier",
- "PerceiverForImageClassificationLearned",
- "PerceiverForMaskedLM",
- "PerceiverForMultimodalAutoencoding",
- "PerceiverForOpticalFlow",
- "PerceiverForSequenceClassification",
- "PerceiverLayer",
- "PerceiverModel",
- "PerceiverPreTrainedModel"
- ]
- }
- },
- "info.vit.perception-lm": {
- "*": {
- "repo": "facebook/Perception-LM-1B",
- "pkg": {
- "0": {
- "transformers": "PerceptionLMModel"
- }
- },
- "tasks": [
- "PerceptionLMForConditionalGeneration",
- "PerceptionLMPreTrainedModel",
- "PerceptionLMModel"
- ]
- }
- },
- "info.stst.persimmon": {
- "*": {
- "repo": "adept/persimmon-8b-base",
- "pkg": {
- "0": {
- "transformers": "PersimmonModel"
- }
- },
- "tasks": [
- "PersimmonForCausalLM",
- "PersimmonModel",
- "PersimmonPreTrainedModel",
- "PersimmonForSequenceClassification",
- "PersimmonForTokenClassification"
- ]
- }
- },
- "info.stst.phi-1": {
- "*": {
- "repo": "microsoft/phi-1",
- "pkg": {
- "0": {
- "transformers": "PhiModel"
- }
- },
- "tasks": [
- "PhiPreTrainedModel",
- "PhiModel",
- "PhiForCausalLM",
- "PhiForSequenceClassification",
- "PhiForTokenClassification"
- ]
- }
- },
- "info.stst.phi-3": {
- "*": {
- "repo": "microsoft/Phi-3-mini-4k-instruct",
- "pkg": {
- "0": {
- "transformers": "Phi3Model"
- }
- },
- "tasks": [
- "Phi3PreTrainedModel",
- "Phi3Model",
- "Phi3ForCausalLM",
- "Phi3ForSequenceClassification",
- "Phi3ForTokenClassification"
- ]
- }
- },
- "info.vit.phi-4": {
- "*": {
- "repo": "microsoft/Phi-4-multimodal-instruct",
- "pkg": {
- "0": {
- "transformers": "Phi4MultimodalModel"
- }
- },
- "file_256": [
- "bc703090b63eda16f639fa4de7ac54635c23105ab1da2f6ec4d3403151d38ee6"
- ],
- "layer_b3": [
- "cf4add4ada6082f448788eaf2937f645b5212db88e06ee81475b8be0e99063dc"
- ],
- "layer_256": [
- "7ff992b780b2f8993dd6bb9612207943638b2a42badc976ce80893bc205e801b"
- ],
- "tasks": [
- "Phi4MultimodalAudioPreTrainedModel",
- "Phi4MultimodalAudioModel",
- "Phi4MultimodalVisionPreTrainedModel",
- "Phi4MultimodalVisionModel",
- "Phi4MultimodalPreTrainedModel",
- "Phi4MultimodalModel",
- "Phi4MultimodalForCausalLM"
- ]
- }
- },
- "info.moe.phi-3-moe": {
- "*": {
- "repo": "microsoft/Phi-3.5-MoE-instruct",
- "pkg": {
- "0": {
- "transformers": "PhimoeModel"
- }
- },
- "tasks": [
- "PhimoePreTrainedModel",
- "PhimoeModel",
- "PhimoeForCausalLM",
- "PhimoeForSequenceClassification"
- ]
- }
- },
- "info.vit.pixio-huge": {
- "*": {
- "repo": "facebook/pixio-huge",
- "pkg": {
- "0": {
- "transformers": "PixioModel"
- }
- },
- "tasks": [
- "PixioModel",
- "PixioPreTrainedModel",
- "PixioBackbone"
- ]
- }
- },
- "info.vit.pixtral": {
- "*": {
- "repo": "mistralai/Pixtral-12B-Base-2409",
- "pkg": {
- "0": {
- "transformers": "PixtralVisionModel"
- }
- },
- "tasks": [
- "PixtralVisionModel",
- "PixtralPreTrainedModel"
- ]
- }
- },
- "info.stst.plbart": {
- "*": {
- "repo": "uclanlp/plbart-base",
- "pkg": {
- "0": {
- "transformers": "PLBartModel"
- }
- },
- "tasks": [
- "PLBartForCausalLM",
- "PLBartForConditionalGeneration",
- "PLBartForSequenceClassification",
- "PLBartModel",
- "PLBartPreTrainedModel"
- ]
- }
- },
- "info.vit.poolformer-s12": {
- "*": {
- "repo": "sail/poolformer_s12",
- "pkg": {
- "0": {
- "transformers": "PoolFormerModel"
- }
- },
- "tasks": [
- "PoolFormerForImageClassification",
- "PoolFormerModel",
- "PoolFormerPreTrainedModel"
- ]
- }
- },
- "info.stst.phetnet-uncased": {
- "*": {
- "repo": "microsoft/prophetnet-large-uncased",
- "pkg": {
- "0": {
- "transformers": "ProphetNetModel"
- }
- },
- "tasks": [
- "ProphetNetDecoder",
- "ProphetNetEncoder",
- "ProphetNetForCausalLM",
- "ProphetNetForConditionalGeneration",
- "ProphetNetModel",
- "ProphetNetPreTrainedModel"
- ]
- }
- },
- "info.vit.pvt-224": {
- "*": {
- "repo": "Xrenya/pvt-tiny-224",
- "pkg": {
- "0": {
- "transformers": "PvtModel"
- }
- },
- "tasks": [
- "PvtForImageClassification",
- "PvtModel",
- "PvtPreTrainedModel"
- ]
- }
- },
- "info.vit.pvt-v2-b0": {
- "*": {
- "repo": "OpenGVLab/pvt_v2_b0",
- "pkg": {
- "0": {
- "transformers": "PvtV2Model"
- }
- },
- "tasks": [
- "PvtV2ForImageClassification",
- "PvtV2Model",
- "PvtV2PreTrainedModel",
- "PvtV2Backbone"
- ]
- }
- },
- "info.stst.qwen2": {
- "*": {
- "repo": "Qwen/Qwen2-7B",
- "pkg": {
- "0": {
- "transformers": "Qwen2Model"
- }
- },
- "tasks": [
- "Qwen2PreTrainedModel",
- "Qwen2Model",
- "Qwen2ForCausalLM",
- "Qwen2RMSNorm",
- "Qwen2ForSequenceClassification",
- "Qwen2ForTokenClassification",
- "Qwen2ForQuestionAnswering"
- ]
- }
- },
- "info.vit.qwen2-vl": {
- "*": {
- "repo": "Qwen/Qwen2-VL-7B-Instruct",
- "pkg": {
- "0": {
- "transformers": "Qwen2_5_VLModel"
- }
- },
- "tasks": [
- "Qwen2_5_VLForConditionalGeneration",
- "Qwen2_5_VLModel",
- "Qwen2_5_VLPreTrainedModel",
- "Qwen2_5_VLTextModel"
- ]
- }
- },
- "info.stst.qwen2-vl": {
- "*": {
- "repo": "Qwen/Qwen2-VL-7B-Instruct",
- "pkg": {
- "0": {
- "transformers": "Qwen2_5_VLTextModel"
- }
- },
- "file_256": [
- "1f48ac458d6fbd0aec53a116065a7ee3f1d34bddde544e25c16a05c9d5392b78",
- "0e85c7111ce849293e97aa09ce1172352ecece023a3ecea7ac8311e326b47f3a",
- "d725335e4ea2399be706469e4b8807716a8fa64bd03468252e9f7acf2415fee4",
- "e10bd9583a77250376d9134cd6b46799029dfa3b4d7989c1050b3ec149cc7cf5"
- ],
- "layer_b3": [
- "e4f681bde70a753f30f83495a2aa340d251bf3d818eb5a1cbe58f85fd6ea0d40",
- "47b062ce8ddb14845fb1a71d2fd88fd52a82e26561ba3eb05be057915a867775",
- "b6386f70b528ffa9e09fdd8db8a7b91a7c462ed97b06963576c6139e25fdcf31",
- "4cd449df9f9004a7e53005583a7e4cfa6de42912f03647d2ea799d489e9c1406"
- ],
- "layer_256": [
- "ed36a4a11c4ebebb10d1e010cb93e2e43fcaf975cd42bb6c9958537593d0d44d",
- "f7f6f64e7b6d7826400a2fc0eef942a47c47bd5914e051ad0c8cd9ff5ff7982b",
- "f341ed0f792cf0570ceb21d3b64ed14bf9875e9fcb90116851364eeed683a6ca",
- "ba031d0da78afe24ae63558ad29b8028244a7bd4750a5615dab9079fe32a5fd7"
- ],
- "tasks": [
- "Qwen2_5_VLForConditionalGeneration",
- "Qwen2_5_VLModel",
- "Qwen2_5_VLPreTrainedModel",
- "Qwen2_5_VLTextModel"
- ]
- }
- },
- "info.aet.qwen2-audio": {
- "*": {
- "repo": "Qwen/Qwen2-Audio-7B",
- "pkg": {
- "0": {
- "transformers": "Qwen2AudioEncoder"
- }
- },
- "tasks": [
- "Qwen2AudioForConditionalGeneration",
- "Qwen2AudioPreTrainedModel",
- "Qwen2AudioEncoder"
- ]
- }
- },
- "info.moe.qwen15-moe-a": {
- "*": {
- "repo": "Qwen/Qwen1.5-MoE-A2.7B",
- "pkg": {
- "0": {
- "transformers": "Qwen2MoeModel"
- }
- },
- "tasks": [
- "Qwen2MoeForCausalLM",
- "Qwen2MoeForQuestionAnswering",
- "Qwen2MoeModel",
- "Qwen2MoePreTrainedModel",
- "Qwen2MoeForSequenceClassification",
- "Qwen2MoeForTokenClassification"
- ]
- }
- },
- "info.stst.qwen3": {
- "*": {
- "repo": "Qwen/Qwen3-8B",
- "pkg": {
- "0": {
- "transformers": "Qwen3Model"
- }
- },
- "tasks": [
- "Qwen3ForCausalLM",
- "Qwen3ForQuestionAnswering",
- "Qwen3PreTrainedModel",
- "Qwen3Model",
- "Qwen3ForSequenceClassification",
- "Qwen3ForTokenClassification"
- ]
- }
- },
- "info.moe.qwen3-a": {
- "*": {
- "repo": "Qwen/Qwen3-30B-A3B",
- "pkg": {
- "0": {
- "transformers": "Qwen3MoeModel"
- }
- },
- "file_256": [
- "c56947057481fb5e7cdf766e442da81717b34addc88bbe8f3728fd25bd03cbae"
- ],
- "layer_b3": [
- "d2d1e0875202f5c9c84c781a2105620250733bd01832f67b2c17bc981d1eb508"
- ],
- "layer_256": [
- "408c01da57c4968b7b0e36d98a74e321153e7aeb058fea63ffd140e323526476"
- ],
- "tasks": [
- "Qwen3MoeForCausalLM",
- "Qwen3MoeForQuestionAnswering",
- "Qwen3MoeModel",
- "Qwen3MoePreTrainedModel",
- "Qwen3MoeForSequenceClassification",
- "Qwen3MoeForTokenClassification"
- ]
- }
- },
- "info.moe.qwen3-next-a": {
- "*": {
- "repo": "Qwen/Qwen3-Next-80B-A3B-Instruct",
- "pkg": {
- "0": {
- "transformers": "Qwen3NextModel"
- }
- },
- "tasks": [
- "Qwen3NextForCausalLM",
- "Qwen3NextForQuestionAnswering",
- "Qwen3NextModel",
- "Qwen3NextPreTrainedModel",
- "Qwen3NextForSequenceClassification",
- "Qwen3NextForTokenClassification"
- ]
- }
- },
- "info.vit.qwen3-vl": {
- "*": {
- "repo": "Qwen/Qwen3-VL-4B-Instruct",
- "pkg": {
- "0": {
- "transformers": "Qwen3VLModel"
- }
- },
- "tasks": [
- "Qwen3VLVisionModel",
- "Qwen3VLForConditionalGeneration",
- "Qwen3VLModel",
- "Qwen3VLPreTrainedModel",
- "Qwen3VLTextModel"
- ]
- }
- },
- "info.vit.qwen3-vl-a": {
- "*": {
- "repo": "Qwen/Qwen3-VL-30B-A3B-Instruct",
- "pkg": {
- "0": {
- "transformers": "Qwen3VLMoeModel"
- }
- },
- "tasks": [
- "Qwen3VLMoeVisionModel",
- "Qwen3VLMoeForConditionalGeneration",
- "Qwen3VLMoeModel",
- "Qwen3VLMoePreTrainedModel",
- "Qwen3VLMoeTextModel"
- ]
- }
- },
- "info.moe.qwen3-vl-a": {
- "*": {
- "repo": "Qwen/Qwen3-VL-30B-A3B-Instruct",
- "pkg": {
- "0": {
- "transformers": "Qwen3VLMoeTextModel"
- }
- },
- "tasks": [
- "Qwen3VLMoeVisionModel",
- "Qwen3VLMoeForConditionalGeneration",
- "Qwen3VLMoeModel",
- "Qwen3VLMoePreTrainedModel",
- "Qwen3VLMoeTextModel"
- ]
- }
- },
- "info.stst.qwen3-vl": {
- "*": {
- "repo": "Qwen/Qwen3-VL-4B-Instruct",
- "pkg": {
- "0": {
- "transformers": "Qwen3VLTextModel"
- }
- },
- "tasks": [
- "Qwen3VLVisionModel",
- "Qwen3VLForConditionalGeneration",
- "Qwen3VLModel",
- "Qwen3VLPreTrainedModel",
- "Qwen3VLTextModel"
- ]
- }
- },
- "info.rnn.recurrentgemma": {
- "*": {
- "repo": "google/recurrentgemma-2b",
- "pkg": {
- "0": {
- "transformers": "RecurrentGemmaModel"
- }
- },
- "tasks": [
- "RecurrentGemmaForCausalLM",
- "RecurrentGemmaModel",
- "RecurrentGemmaPreTrainedModel"
- ]
- }
- },
- "info.art.reformer-crime-and-punishment": {
- "*": {
- "repo": "google/reformer-crime-and-punishment",
- "pkg": {
- "0": {
- "transformers": "ReformerModel"
- }
- },
- "tasks": [
- "ReformerAttention",
- "ReformerForMaskedLM",
- "ReformerForQuestionAnswering",
- "ReformerForSequenceClassification",
- "ReformerLayer",
- "ReformerModel",
- "ReformerModelWithLMHead",
- "ReformerPreTrainedModel"
- ]
- }
- },
- "info.vit.regnet-y-040": {
- "*": {
- "repo": "facebook/regnet-y-040",
- "pkg": {
- "0": {
- "transformers": "RegNetModel"
- }
- },
- "tasks": [
- "RegNetForImageClassification",
- "RegNetModel",
- "RegNetPreTrainedModel"
- ]
- }
- },
- "info.art.rembert": {
- "*": {
- "repo": "google/rembert",
- "pkg": {
- "0": {
- "transformers": "RemBertModel"
- }
- },
- "tasks": [
- "RemBertForCausalLM",
- "RemBertForMaskedLM",
- "RemBertForMultipleChoice",
- "RemBertForQuestionAnswering",
- "RemBertForSequenceClassification",
- "RemBertForTokenClassification",
- "RemBertLayer",
- "RemBertModel",
- "RemBertPreTrainedModel"
- ]
- }
- },
- "info.vit.resnet-50": {
- "*": {
- "repo": "microsoft/resnet-50",
- "pkg": {
- "0": {
- "transformers": "ResNetModel"
- }
- },
- "tasks": [
- "ResNetForImageClassification",
- "ResNetModel",
- "ResNetPreTrainedModel",
- "ResNetBackbone"
- ]
- }
- },
- "info.art.roberta": {
- "*": {
- "repo": "FacebookAI/roberta-base",
- "pkg": {
- "0": {
- "transformers": "RobertaModel"
- }
- },
- "tasks": [
- "RobertaForCausalLM",
- "RobertaForMaskedLM",
- "RobertaForMultipleChoice",
- "RobertaForQuestionAnswering",
- "RobertaForSequenceClassification",
- "RobertaForTokenClassification",
- "RobertaModel",
- "RobertaPreTrainedModel"
- ]
- }
- },
- "info.art.efficient-mlm-m0-0": {
- "*": {
- "repo": "andreasmadsen/efficient_mlm_m0.40",
- "pkg": {
- "0": {
- "transformers": "RobertaPreLayerNormModel"
- }
- },
- "tasks": [
- "RobertaPreLayerNormForCausalLM",
- "RobertaPreLayerNormForMaskedLM",
- "RobertaPreLayerNormForMultipleChoice",
- "RobertaPreLayerNormForQuestionAnswering",
- "RobertaPreLayerNormForSequenceClassification",
- "RobertaPreLayerNormForTokenClassification",
- "RobertaPreLayerNormModel",
- "RobertaPreLayerNormPreTrainedModel"
- ]
- }
- },
- "info.art.roc-bert-zh": {
- "*": {
- "repo": "weiweishi/roc-bert-base-zh",
- "pkg": {
- "0": {
- "transformers": "RoCBertModel"
- }
- },
- "tasks": [
- "RoCBertForCausalLM",
- "RoCBertForMaskedLM",
- "RoCBertForMultipleChoice",
- "RoCBertForPreTraining",
- "RoCBertForQuestionAnswering",
- "RoCBertForSequenceClassification",
- "RoCBertForTokenClassification",
- "RoCBertLayer",
- "RoCBertModel",
- "RoCBertPreTrainedModel"
- ]
- }
- },
- "info.art.roformer-chinese": {
- "*": {
- "repo": "junnyu/roformer_chinese_base",
- "pkg": {
- "0": {
- "transformers": "RoFormerModel"
- }
- },
- "tasks": [
- "RoFormerForCausalLM",
- "RoFormerForMaskedLM",
- "RoFormerForMultipleChoice",
- "RoFormerForQuestionAnswering",
- "RoFormerForSequenceClassification",
- "RoFormerForTokenClassification",
- "RoFormerLayer",
- "RoFormerModel",
- "RoFormerPreTrainedModel"
- ]
- }
- },
- "info.detr.rtdetr-r50vd": {
- "*": {
- "repo": "PekingU/rtdetr_r50vd",
- "pkg": {
- "0": {
- "transformers": "RTDetrModel"
- }
- },
- "tasks": [
- "RTDetrForObjectDetection",
- "RTDetrModel",
- "RTDetrPreTrainedModel"
- ]
- }
- },
- "info.detr.rtdetr-r18vd": {
- "*": {
- "repo": "PekingU/rtdetr_r18vd",
- "pkg": {
- "0": {
- "transformers": "RTDetrV2Model"
- }
- },
- "tasks": [
- "RTDetrV2Model",
- "RTDetrV2PreTrainedModel",
- "RTDetrV2ForObjectDetection"
- ]
- }
- },
- "info.rnn.rwkv-4-pile": {
- "*": {
- "repo": "RWKV/rwkv-4-169m-pile",
- "pkg": {
- "0": {
- "transformers": "RwkvModel"
- }
- },
- "tasks": [
- "RwkvForCausalLM",
- "RwkvModel",
- "RwkvPreTrainedModel"
- ]
- }
- },
- "info.vit.sam-vit-huge": {
- "*": {
- "repo": "facebook/sam-vit-huge",
- "pkg": {
- "0": {
- "transformers": "SamModel"
- }
- },
- "tasks": [
- "SamVisionModel",
- "SamModel",
- "SamPreTrainedModel"
- ]
- }
- },
- "info.vit.sam2-hiera": {
- "*": {
- "repo": "facebook/sam2.1-hiera-tiny",
- "pkg": {
- "0": {
- "transformers": "Sam2Model"
- }
- },
- "tasks": [
- "Sam2Model",
- "Sam2VisionModel",
- "Sam2PreTrainedModel",
- "Sam2HieraDetModel"
- ]
- }
- },
- "info.vit.sam3": {
- "*": {
- "repo": "facebook/sam3",
- "pkg": {
- "0": {
- "transformers": "Sam3Model"
- }
- },
- "tasks": [
- "Sam3Model",
- "Sam3VisionModel",
- "Sam3ViTModel",
- "Sam3PreTrainedModel"
- ]
- }
- },
- "info.vit.sam3-tracker1-hiera": {
- "*": {
- "repo": "facebook/sam3_tracker.1-hiera-tiny",
- "pkg": {
- "0": {
- "transformers": "Sam3TrackerModel"
- }
- },
- "tasks": [
- "Sam3TrackerModel",
- "Sam3TrackerPreTrainedModel"
- ]
- }
- },
- "info.stst.sam3": {
- "*": {
- "repo": "facebook/sam3",
- "pkg": {
- "0": {
- "transformers": "Sam3VideoModel"
- }
- },
- "tasks": [
- "Sam3VideoModel",
- "Sam3VideoPreTrainedModel",
- "Sam3VideoInferenceSession",
- "Sam3VideoSegmentationOutput"
- ]
- }
- },
- "info.vit.sam-hq-vit-h": {
- "*": {
- "repo": "sushmanth/sam_hq_vit_h",
- "pkg": {
- "0": {
- "transformers": "SamHQModel"
- }
- },
- "tasks": [
- "SamHQModel",
- "SamHQPreTrainedModel",
- "SamHQVisionModel"
- ]
- }
- },
- "info.vit.sam-hq-vit-huge": {
- "*": {
- "repo": "syscv-community/sam-hq-vit-huge",
- "pkg": {
- "0": {
- "transformers": "SamHQVisionModel"
- }
- },
- "tasks": [
- "SamHQModel",
- "SamHQPreTrainedModel",
- "SamHQVisionModel"
- ]
- }
- },
- "info.aet.hf-seamless-m4t": {
- "*": {
- "repo": "facebook/hf-seamless-m4t-medium",
- "pkg": {
- "0": {
- "transformers": "SeamlessM4TModel"
- }
- },
- "tasks": [
- "SeamlessM4TForTextToSpeech",
- "SeamlessM4TForSpeechToSpeech",
- "SeamlessM4TForTextToText",
- "SeamlessM4TForSpeechToText",
- "SeamlessM4TModel",
- "SeamlessM4TPreTrainedModel",
- "SeamlessM4TCodeHifiGan",
- "SeamlessM4THifiGan",
- "SeamlessM4TTextToUnitForConditionalGeneration",
- "SeamlessM4TTextToUnitModel"
- ]
- }
- },
- "info.stst.seamless-m4t-v2": {
- "*": {
- "repo": "facebook/seamless-m4t-v2-large",
- "pkg": {
- "0": {
- "transformers": "SeamlessM4Tv2Model"
- }
- },
- "tasks": [
- "SeamlessM4Tv2ForTextToSpeech",
- "SeamlessM4Tv2ForSpeechToSpeech",
- "SeamlessM4Tv2ForTextToText",
- "SeamlessM4Tv2ForSpeechToText",
- "SeamlessM4Tv2Model",
- "SeamlessM4Tv2PreTrainedModel"
- ]
- }
- },
- "info.stst.seedoss": {
- "*": {
- "repo": "ByteDance-Seed/SeedOss-36B",
- "pkg": {
- "0": {
- "transformers": "SeedOssModel"
- }
- },
- "tasks": [
- "SeedOssForCausalLM",
- "SeedOssForQuestionAnswering",
- "SeedOssPreTrainedModel",
- "SeedOssModel",
- "SeedOssForSequenceClassification",
- "SeedOssForTokenClassification"
- ]
- }
- },
- "info.vit.segformer-b0-finetuned-ade-512-512": {
- "*": {
- "repo": "nvidia/segformer-b0-finetuned-ade-512-512",
- "pkg": {
- "0": {
- "transformers": "SegformerModel"
- }
- },
- "tasks": [
- "SegformerDecodeHead",
- "SegformerForImageClassification",
- "SegformerForSemanticSegmentation",
- "SegformerLayer",
- "SegformerModel",
- "SegformerPreTrainedModel"
- ]
- }
- },
- "info.vit.seggpt-vit": {
- "*": {
- "repo": "BAAI/seggpt-vit-large",
- "pkg": {
- "0": {
- "transformers": "SegGptModel"
- }
- },
- "tasks": [
- "SegGptModel",
- "SegGptPreTrainedModel",
- "SegGptForImageSegmentation"
- ]
- }
- },
- "info.aet.sew": {
- "*": {
- "repo": "asapp/sew-tiny-100k",
- "pkg": {
- "0": {
- "transformers": "SEWModel"
- }
- },
- "tasks": [
- "SEWForCTC",
- "SEWForSequenceClassification",
- "SEWModel",
- "SEWPreTrainedModel"
- ]
- }
- },
- "info.aet.sew-d": {
- "*": {
- "repo": "asapp/sew-d-tiny-100k",
- "pkg": {
- "0": {
- "transformers": "SEWDModel"
- }
- },
- "tasks": [
- "SEWDForCTC",
- "SEWDForSequenceClassification",
- "SEWDModel",
- "SEWDPreTrainedModel"
- ]
- }
- },
- "info.vit.siglip2-patch16-224": {
- "*": {
- "repo": "google/siglip2-base-patch16-224",
- "pkg": {
- "0": {
- "transformers": "Siglip2Model"
- }
- },
- "tasks": [
- "Siglip2Model",
- "Siglip2PreTrainedModel",
- "Siglip2TextModel",
- "Siglip2VisionModel",
- "Siglip2ForImageClassification"
- ]
- }
- },
- "info.vit.siglip2-patch16-naflex": {
- "*": {
- "repo": "google/siglip2-base-patch16-naflex",
- "pkg": {
- "0": {
- "transformers": "Siglip2VisionModel"
- }
- },
- "tasks": [
- "Siglip2Model",
- "Siglip2PreTrainedModel",
- "Siglip2TextModel",
- "Siglip2VisionModel",
- "Siglip2ForImageClassification"
- ]
- }
- },
- "info.stst.smollm3": {
- "*": {
- "repo": "HuggingFaceTB/SmolLM3-3B",
- "pkg": {
- "0": {
- "transformers": "SmolLM3Model"
- }
- },
- "tasks": [
- "SmolLM3PreTrainedModel",
- "SmolLM3Model",
- "SmolLM3ForCausalLM",
- "SmolLM3ForSequenceClassification",
- "SmolLM3ForTokenClassification",
- "SmolLM3ForQuestionAnswering"
- ]
- }
- },
- "info.vit.smolvlm": {
- "*": {
- "repo": "HuggingFaceTB/SmolVLM2-2.2B-Instruct",
- "pkg": {
- "0": {
- "transformers": "SmolVLMModel"
- }
- },
- "tasks": [
- "SmolVLMForConditionalGeneration",
- "SmolVLMPreTrainedModel",
- "SmolVLMModel",
- "SmolVLMVisionTransformer"
- ]
- }
- },
- "info.vit.siglip-so-patch14-384": {
- "*": {
- "repo": "google/siglip-so400m-patch14-384",
- "pkg": {
- "0": {
- "transformers": "SmolVLMVisionTransformer"
- }
- },
- "tasks": [
- "SmolVLMForConditionalGeneration",
- "SmolVLMPreTrainedModel",
- "SmolVLMModel",
- "SmolVLMVisionTransformer"
- ]
- }
- },
- "info.aet.s2t-librispeech-asr": {
- "*": {
- "repo": "facebook/s2t-small-librispeech-asr",
- "pkg": {
- "0": {
- "transformers": "Speech2TextModel"
- }
- },
- "tasks": [
- "Speech2TextForConditionalGeneration",
- "Speech2TextModel",
- "Speech2TextPreTrainedModel"
- ]
- }
- },
- "info.stst.speecht5-asr": {
- "*": {
- "repo": "microsoft/speecht5_asr",
- "pkg": {
- "0": {
- "transformers": "SpeechT5Model"
- }
- },
- "tasks": [
- "SpeechT5ForSpeechToText",
- "SpeechT5ForSpeechToSpeech",
- "SpeechT5ForTextToSpeech",
- "SpeechT5Model",
- "SpeechT5PreTrainedModel",
- "SpeechT5HifiGan"
- ]
- }
- },
- "info.art.splinter": {
- "*": {
- "repo": "tau/splinter-base",
- "pkg": {
- "0": {
- "transformers": "SplinterModel"
- }
- },
- "tasks": [
- "SplinterForQuestionAnswering",
- "SplinterForPreTraining",
- "SplinterLayer",
- "SplinterModel",
- "SplinterPreTrainedModel"
- ]
- }
- },
- "info.art.squeezebert-uncased": {
- "*": {
- "repo": "squeezebert/squeezebert-uncased",
- "pkg": {
- "0": {
- "transformers": "SqueezeBertModel"
- }
- },
- "tasks": [
- "SqueezeBertForMaskedLM",
- "SqueezeBertForMultipleChoice",
- "SqueezeBertForQuestionAnswering",
- "SqueezeBertForSequenceClassification",
- "SqueezeBertForTokenClassification",
- "SqueezeBertModel",
- "SqueezeBertModule",
- "SqueezeBertPreTrainedModel"
- ]
- }
- },
- "info.stst.stablelm-4e1t": {
- "*": {
- "repo": "stabilityai/stablelm-3b-4e1t",
- "pkg": {
- "0": {
- "transformers": "StableLmModel"
- }
- },
- "tasks": [
- "StableLmForCausalLM",
- "StableLmModel",
- "StableLmPreTrainedModel",
- "StableLmForSequenceClassification",
- "StableLmForTokenClassification"
- ]
- }
- },
- "info.stst.starcoder2": {
- "*": {
- "repo": "bigcode/starcoder2-7b",
- "pkg": {
- "0": {
- "transformers": "Starcoder2Model"
- }
- },
- "tasks": [
- "Starcoder2ForCausalLM",
- "Starcoder2Model",
- "Starcoder2PreTrainedModel",
- "Starcoder2ForSequenceClassification",
- "Starcoder2ForTokenClassification"
- ]
- }
- },
- "info.vit.swiftformer-xs": {
- "*": {
- "repo": "MBZUAI/swiftformer-xs",
- "pkg": {
- "0": {
- "transformers": "SwiftFormerModel"
- }
- },
- "tasks": [
- "SwiftFormerForImageClassification",
- "SwiftFormerModel",
- "SwiftFormerPreTrainedModel"
- ]
- }
- },
- "info.vit.swin2sr-classicalsr-x2-64": {
- "*": {
- "repo": "caidas/swin2sr-classicalsr-x2-64",
- "pkg": {
- "0": {
- "transformers": "Swin2SRModel"
- }
- },
- "tasks": [
- "Swin2SRForImageSuperResolution",
- "Swin2SRModel",
- "Swin2SRPreTrainedModel"
- ]
- }
- },
- "info.vit.swinv2-patch4-window8-256": {
- "*": {
- "repo": "microsoft/swinv2-tiny-patch4-window8-256",
- "pkg": {
- "0": {
- "transformers": "Swinv2Model"
- }
- },
- "tasks": [
- "Swinv2ForImageClassification",
- "Swinv2ForMaskedImageModeling",
- "Swinv2Model",
- "Swinv2PreTrainedModel",
- "Swinv2Backbone"
- ]
- }
- },
- "info.moe.switch-8": {
- "*": {
- "repo": "google/switch-base-8",
- "pkg": {
- "0": {
- "transformers": "SwitchTransformersModel"
- }
- },
- "tasks": [
- "SwitchTransformersEncoderModel",
- "SwitchTransformersForConditionalGeneration",
- "SwitchTransformersModel",
- "SwitchTransformersPreTrainedModel",
- "SwitchTransformersTop1Router",
- "SwitchTransformersSparseMLP"
- ]
- }
- },
- "info.stst.t5": {
- "*": {
- "repo": "google-t5/t5-small",
- "pkg": {
- "0": {
- "transformers": "T5Model"
- }
- },
- "identifiers": [
- [
- 4096
- ],
- "encoder.embed_tokens.weight",
- "text_encoders.t5xxl.transformer.shared.weight",
- "t5xxl",
- "encoder.block.0.layer.1.DenseReluDense.wi.weight"
- ],
- "file_256": [
- "ec87bffd1923e8b2774a6d240c922a41f6143081d52cf83b8fe39e9d838c893e",
- "565cb2487351282e8e4dbeb88e63f4ad28217ce0439f5a8e6525a924807d2d9b",
- "6e480b09fae049a72d2a8c5fbccb8d3e92febeb233bbe9dfe7256958a9167635",
- "4f2751ceeb2a96edd693e539dc5d6bba0b8d3814f49a9b3798403a0cec4b2e3d",
- "83690f3cc37cecb5e907f41ab0f7abb0855ef24a0a8aab9259f2888ce85a34e2",
- "7d330da4816157540d6bb7838bf63a0f02f573fc48ca4d8de34bb0cbfd514f09",
- "8490f7a22615c20651a63dbe7b4241929826a4de20292dc8e63bfc3c61e3654f",
- "d8720addef2596fef86b1b22e4b62875c9118779ba8723759a75dfcbc649ffd5",
- "7d0eac95abe8daae454bcd3d166b8bfc6a35fe68278f97479d62dbb6850f38c0",
- "ceabd6f71c7112cfaa4dfca8711dda97b79fb9b25983f1c95532de226045f1f8",
- "49e139f50824fef40908ef4307c851e7adaa8b91bed44054c4829600dbedfdda",
- "211ade1d474f5dc83190aec8be5c4baf52643777790d64de0cbd84f63613e5e9",
- "7894547154ba3fd6e364e66e2951ee82b4c3fc1ae0f95df6a4f9d1c5a4e98f17",
- "eb529f693f4b17773a24e787fcba29486d5e1700dadcc20bb91e4c8b00212d08",
- "d80116f6fc39801e4eef425a584e7a7a41cbe5119797bef2dad67299909fe2ae",
- "31ebe18e901bfb6e5709a20ec1c95fce29bce2b9545073231e0f909a53239f5c",
- "6be2b0b7e2de7cf2919340c88cb802a103a997ce46c53131cec91958c1db1af4",
- "b51cbb10b1a7aac6dd1c3b62f0ed908bfd06e0b42d2f3577d43e061361f51dae",
- "9ec60f6028534b7fe5af439fcb535d75a68592a9ca3fcdeb175ef89e3ee99825",
- "8f5ab879234384235d56732f0cda07bf8801f30a49645248c5bfdeeb1665f64b",
- "86427a1f4dba48940e45bf78d6db5bf0d48fce8b4656f5aba27955f06af9628e",
- "88b696cfae098f03bb078cc5944ef03aec1e91ec020a6b016b723a0f0532558c",
- "1dc600961d3c5ed081f6700485cdc7ed9cfb4631f2dc385b7ac6bd3c80846d0d",
- "f28631189911f8d7931e8fe642a4cb2a3c51f50da7cabbfa06b89bafc19c00d0",
- "de9dfdd19d7ba6859993cadec5100665dc7a4fb71e1c6c8970959cbdaf4366e3",
- "7a68b2c8c080696a10109612a649bc69330991ecfea65930ccfdfbdb011f2686",
- "2c0c539ab8e8fba3877cc94bc483e427f74c525f817a809b028ebc8d96d75a94"
- ],
- "layer_b3": [
- "ca94e03b7b1fdcb0d6ff5205eac56f145d2dff8a9c489faf80935bfec8387f18",
- "c0e2b054bedd782909191b05748a88c28d1538fa91789fec63f036ba01dcc001",
- "672de9b79d14001de7d1109ffc52e4d0cccc3bfee6f45648fa347703b58e2b99",
- "abdb187a996c51cb0469630c124b14eeb0bb8f5f635aca6c71dea264f8bd61ae",
- "8926f862b7763fd9688af317eba7809aa71a478484be0c738c269de368ace4a7",
- "e616b754cf55e55b3f9f17ab7e1fff95f0607c81782822fc1223ae22fb1e9f36",
- "b79e5f1878a62cd726bb4f9fc1415cacb071d278440e9026290c7b36cb41e1d4",
- "77619d5278d9f547ddac17d4d99df56cb6a3a9e660ae31b2f896a4297907e62e",
- "c87c9d3cc7becc46ee34821299cf8551a6df5541582a45469a031bccdc4bd340",
- "7e6c32c01c89fc5d1610c410135aa9708e77a7444510e5e479fa677ff2b53643",
- "a49c2bc301733967ddff113790e301773dc5dd71368b657af4141458de593ced",
- "c2ea94030ea362e03d73d448fa5353ace0a449dc38c51a4a49fb148444ebb8ef",
- "4a90463350f08ef41479da1d561ab41b8f8b792f1603a092226a838156aebfb0",
- "f86cd0324eebbffb81b15ad47dc8b63fedfa51dc222e44e1a958a7becce2bcb0",
- "48c54c61c5f14e42761c6177539b2da3a22222516dab053952ca8d8e92f93d65",
- "311332d9738773669128814d944b1e860a8e3176b37abf43370bc06b43b454d0",
- "3f4e51dec6d542759cdea49b3bec14c090a4908f953fa3e182e2ea43b5b05402",
- "beb25461e168359108add77263ea5cc121b7584cc4aa304ffc4e134783bb1d88",
- "43313f90a359c8c1c787a7a833b1ab9f7a38204ba36d0ba587c658d0d9bf0852",
- "fa9e97cdad26f55fedab83a3f114e0338c9cca3ea2bf8f1b168a6dfc5919bf8e",
- "93108d67f8829a7e1e8f3773e9ce53c67f365889c2acfd69816ac80fd43f8e08",
- "fc65a6cc55e89394d7bc0fa4ee952d63ce3bdc143b84b5aa4bb3edf7722a6b83",
- "8163bc781a7e013dfeb806bbb828a36913cf119363ea5fcd9071d87a0c227cda",
- "ad2ba63e1134bad1b15ee339313bc130708b2995e8b4b76fb44d727f28c26ad9",
- "4a844772638ffed2f61d45eaac984094b92540fa1391a4098608fc73a6cd4fd8",
- "76c31e1fd35da7de7cee97c1e7c5ccde640e6fac3e17a62e115ecf484c7196c3",
- "a4d672e22b5bdd8f8b0885cec4a173d0466bb1dcbfbf8400cedcc41c2494f16c",
- "d1860c3f01dc9f260d98b50d3d2bbc8dc2d3eefaa93778a8de9d7adfb897fc6e",
- "b8719092fc58487406211f52dc55bf40b573ccfd29933a989c33a36b694f6f0a",
- "795e272409bc4fa55f402485acf86b607256f91aa965295c5bb771c61f8e9e74"
- ],
- "layer_256": [
- "bb20f7805209379aea4d6548f17e551cf27d0f8426ca169e4df8234f718ed5ef",
- "431580c2d86f9a9ed3500f776a4c997223e5644aed211f965354869ccfa4d76e",
- "2ccd548c4ffe34168c60779ebd497b9b410981a2fda813c8723a24a805c94ea0",
- "a608fc4e1cc9762e46187a1ce66e98e8ba4bc3a604cbfd96174bd876baea0fa1",
- "dc9e74cdf535e0b7a17e1335d0d8b38a00f94facf0cb01363baee09945a25278",
- "f07409710a69b2247aa4723a9b40d2225d5e5bfba7b60c51f0ea901fc2ef5ad9",
- "ed28f8b6cc472f352fc840b5a9f841ff17d76ae6918f0676464dca20529aa92b",
- "97c1a08f87c59b4c55ad4672841977cfce43ca7730bcd11d8c178a9330de1855",
- "968972839b859a9c4457f190fad2e17e8585ce27d9ef318df4f5b4e902143944",
- "4dbdeadc957c898c327197a3d8770188535672e9208beb29bbf48dfdf51c8955",
- "669172c2b5e8b97774d9dd0227ede40c4d25cae3adae97d9f281d03531e7e137",
- "39fff130b9ee240102c28a78ee1c4a643e9f800b734ff133f3ab2ad1357bd2f6",
- "6e047ed8cb7007034ff15840dd53c92096f0e7ed5befa07808de8afa35d35874",
- "adbd0baa059074501b7686db2b0c01715f3a317275c2657c5dfbfd6ee92389b7",
- "eb63790fb32b5660de34fa42c2e608df58f7aa3680b4984f0ee9008fe613729c",
- "f125c20a33b0ff2dbd4e8ad9acebc34383cb2ef98668169ef79a8c06655ced35",
- "e64e0ac83a785ef584a0e86b347fae8f9e2bd84324a49396ca8a9fe7532a947b",
- "70001b3ac1b66522142bb86e4c3e87e20c2bbd07276c763878e0838ef6184aad",
- "f46fd1e2b5fef3b9f7ae80d183cc77f7be181117a72a0bb933bdef0bc6cd679e",
- "83676d73726d101325a47c7f8a60cedf10bab99ea79a6bedad7761220cb4a625",
- "a621a907586e5e270e7c7873b167364d8a935ff347d8240fa9bab319678da690",
- "f0af1a089f40d8611db5c59469314f1547e2df23c6eff24860359b37ea9bd966",
- "72478320b8dbfd9aeaea010dcf0896e3116fa5ab940f3b472882d9f9d2d7333f",
- "9c1a88e36334a48d8482fec54b14ea1d5fd31f0dbb65d13cc616e63dc7c42be5",
- "d0689f727e8ac4fef3ec4b1f29e8a3bd12e1116559eeefb2a1a457cd4e676d1e",
- "fea158a4afcfaa6e95e04799bae0287de0c4fcb188f3b41768a46ce48c71c9df",
- "2e5bc4e73312b5aec4c1a55631cb4ed69cf34ccaa6d1f28f7045f137a579b439",
- "015fdecbc3b5369dbcb2302e4b79985437ac4496d1b9ad63316423a222fb0803"
- ],
- "tasks": [
- "T5EncoderModel",
- "T5ForConditionalGeneration",
- "T5Model",
- "T5PreTrainedModel",
- "T5ForQuestionAnswering",
- "T5ForSequenceClassification",
- "T5ForTokenClassification"
- ]
- }
- },
- "info.stst.t5gemma-prefixlm": {
- "*": {
- "repo": "google/t5gemma-2b-2b-prefixlm-it",
- "pkg": {
- "0": {
- "transformers": "T5GemmaModel"
- }
- },
- "tasks": [
- "T5GemmaForConditionalGeneration",
- "T5GemmaModel",
- "T5GemmaEncoderModel",
- "T5GemmaPreTrainedModel",
- "T5GemmaForSequenceClassification",
- "T5GemmaForTokenClassification"
- ]
- }
- },
- "info.stst.t5gemma-2": {
- "*": {
- "repo": "google/t5gemma-2-270m-270m",
- "pkg": {
- "0": {
- "transformers": "T5Gemma2Model"
- }
- },
- "tasks": [
- "T5Gemma2ForConditionalGeneration",
- "T5Gemma2Model",
- "T5Gemma2PreTrainedModel",
- "T5Gemma2ForSequenceClassification",
- "T5Gemma2ForTokenClassification"
- ]
- }
- },
- "info.detr.table-transformer-detection": {
- "*": {
- "repo": "microsoft/table-transformer-detection",
- "pkg": {
- "0": {
- "transformers": "TableTransformerModel"
- }
- },
- "tasks": [
- "TableTransformerForObjectDetection",
- "TableTransformerModel",
- "TableTransformerPreTrainedModel"
- ]
- }
- },
- "info.art.tapas-finetuned-sqa": {
- "*": {
- "repo": "google/tapas-base-finetuned-sqa",
- "pkg": {
- "0": {
- "transformers": "TapasModel"
- }
- },
- "tasks": [
- "TapasForMaskedLM",
- "TapasForQuestionAnswering",
- "TapasForSequenceClassification",
- "TapasModel",
- "TapasPreTrainedModel"
- ]
- }
- },
- "info.vit.textnet": {
- "*": {
- "repo": "czczup/textnet-base",
- "pkg": {
- "0": {
- "transformers": "TextNetModel"
- }
- },
- "tasks": [
- "TextNetBackbone",
- "TextNetModel",
- "TextNetPreTrainedModel",
- "TextNetForImageClassification"
- ]
- }
- },
- "info.stst.time-series-transformer-tourism-monthly": {
- "*": {
- "repo": "huggingface/time-series-transformer-tourism-monthly",
- "pkg": {
- "0": {
- "transformers": "TimeSeriesTransformerModel"
- }
- },
- "tasks": [
- "TimeSeriesTransformerForPrediction",
- "TimeSeriesTransformerModel",
- "TimeSeriesTransformerPreTrainedModel"
- ]
- }
- },
- "info.art.timesfm-2-pytorch": {
- "*": {
- "repo": "google/timesfm-2.0-500m-pytorch",
- "pkg": {
- "0": {
- "transformers": "TimesFmModel"
- }
- },
- "tasks": [
- "TimesFmModelForPrediction",
- "TimesFmPreTrainedModel",
- "TimesFmModel"
- ]
- }
- },
- "info.vit.timesformer-finetuned-k600": {
- "*": {
- "repo": "facebook/timesformer-base-finetuned-k600",
- "pkg": {
- "0": {
- "transformers": "TimesformerModel"
- }
- },
- "tasks": [
- "TimesformerModel",
- "TimesformerForVideoClassification",
- "TimesformerPreTrainedModel"
- ]
- }
- },
- "info.detr.resnet18-a1-in": {
- "*": {
- "repo": "timm/resnet18.a1_in1k",
- "pkg": {
- "0": {
- "transformers": "TimmWrapperModel"
- }
- },
- "tasks": [
- "TimmWrapperPreTrainedModel",
- "TimmWrapperModel",
- "TimmWrapperForImageClassification"
- ]
- }
- },
- "info.detr.tvp": {
- "*": {
- "repo": "Intel/tvp-base",
- "pkg": {
- "0": {
- "transformers": "TvpModel"
- }
- },
- "tasks": [
- "TvpModel",
- "TvpPreTrainedModel",
- "TvpForVideoGrounding"
- ]
- }
- },
- "info.vit.udop": {
- "*": {
- "repo": "microsoft/udop-large",
- "pkg": {
- "0": {
- "transformers": "UdopModel"
- }
- },
- "tasks": [
- "UdopForConditionalGeneration",
- "UdopPreTrainedModel",
- "UdopModel",
- "UdopEncoderModel"
- ]
- }
- },
- "info.stst.umt5": {
- "*": {
- "repo": "google/umt5-small",
- "pkg": {
- "0": {
- "transformers": "UMT5Model"
- }
- },
- "identifiers": [
- "encoder.block.1.layer.0.SelfAttention.relative_attention_bias.weight"
- ],
- "file_256": [
- "a8e861969c7433e707cc5a74065d795d36cca07ec96eb6763eb4083df7248f58",
- "decf9b70814ed5e9965bfca9fbd0483462e2bf743790663025b7742f8c014c72",
- "0a07449cf1141c0ec86e653c00465f6f0d79c6e58a2c60c8bcf4203d0e4ec4f6",
- "c0ef3a140898e228a3520c9adec60743d2e8e5b3d229651bb37f1a3921919f99",
- "7b8850f1961e1cf8a77cca4c964a358d303f490833c6c087d0cff4b2f99db2af",
- "c3355d30191f1f066b26d93fba017ae9809dce6c627dda5f6a66eaa651204f68",
- "fa1d36fd54f171ae60fea915c23bd77986b330bbed9729f0d2f8ecbe9168bc48",
- "4a3176f32fd70c0a335b4419fcbf8c86cc875e23498c0fc06f5b4aa0930889e0",
- "adbc782b9145a27e15d63dfa25057efca0ac75e2db7d372c901ddaa130ca2def",
- "b7e2ca4c493c9d51fa951005e8ceba2f4b6b6877cfb4c36a8955c6cd68a1dba7",
- "2521d4de0bf9e1cc6549866463ceae85e4ec3239bc6063f7488810be39033bbc",
- "9209b4c77b34ad8cf3f06b04c6eaa27e7beeebb348a31f85e3b38a1d719b09ed",
- "8bc12d80bc0413573fa58a93626117440b4528f640dd9cb310732e05fa9e6c3e",
- "f64f8d6dc4d8a24276df69d0ccea789aae686f7417950a41e6568c30cb478a5c",
- "17cf97a5bbbc60a646d6105b832b6f657ce904a8a1ad970e4b59df0c67584a40",
- "eaea358bb438c5d211721a4feecc162000e3636e9cb96f51e216f1f44ebd12ce"
- ],
- "layer_b3": [
- "cd92b29c9099a640e3f5d4a76e64b3467f87f6c056119e0defdff94d311ad6de",
- "1c943dbcb8b328a7c6c852921ddaefbd84c9df8c83bc51fe303c1f06cb734102",
- "1639a6467af0db1e15828d33b878e568cba1335947eeadd481170bcdc9ba8e33",
- "72a0329740dee29a2c099eec3c320b3945590a74293356014c30249fe69652e5",
- "0374cba03c607ffe8ab8f04994d82f82e80901dc7578f1a9a6cb2637608be5d5",
- "d75a407f873e1cfa1a0a36214b53b14bfebe9253ea263465151c07f0d57f3f29",
- "621153502b985c143d304318c91dc3d10296d24268c81e3538fc336fdc84c915",
- "43bb052945d38a68bec27c3d26162e88e306e6074d027d3b4b2b8ae2b1851691",
- "98f50ea5d55e61c1478df47e567e48bdd036d240b9129e64d53a826406900adc",
- "9400313b8eae31699473daa5f840d25a4ef660f68de9a7894f1a28f214f23384",
- "9f13826b8e4ddde24d80de6a947a7868e26cea25dda52790ee6ed695ff72b9bb",
- "475773ab108a537ff904b84e7f3a80129ba4983deb7170b6b52c922ece6069ce",
- "5ef27b3c1eddb08cfe41b452cf9529d86dff811645d40c165bae324486d19e96",
- "e170559d8551cfe651344594e54c0a9a90c0068b00f3866f6e9a3737e20925cb",
- "e8dc7442a20bcdc7b6e5dd0265939d88896eab5ddd33ee16f1f09537e65914b8",
- "4d3d5049857d01741780daf01e96617092973305637b435f4895499a26bbaede",
- "7a2adadc2372feda23b2169337276adda6d1fdef82ba69f0d3321c4c6ba8c604",
- "0a7c61a85bb3f51f75924de48ef3f5e87cbf8901f600cbfcae97f5e2919c4148"
- ],
- "layer_256": [
- "467916d35f3053dce1d40d998fcaf6aa03feda75aa578d964dd61461e23641a3",
- "58deeef888d4ded4ffababfbf8da27227a4a6ff8adfa42016e12c0180f713816",
- "178ebd3fa3418d33a2e45a80d8b9d3662ff4a8e75f3de3f0332f82c505d8152a",
- "8700dcb651465fe6c925b7ad6068b58b32951832fff0ed19819510f8d0713ee5",
- "954f2129ba166e746c71433f717b572d8869ec14b32b7f214d1701d3b1120047",
- "32f5fc1daea014b6488b96c2a1330e0aad87e074844fa3e2e3f20b9e58440395",
- "9245abaf6df8a4b5fcc828ecbcd7b21a1b19bf5f3c4388fb5c8eabc140276dce",
- "172d0fbbd379ae014a7008e148813818494e9e645db802fd000d443369df9d17",
- "2fa68a26b0386aaf9123d2b4067dafc8631ee724602197dd353f3ea5a61dac8a",
- "16f0054014e6d07b86b0526d5bcfed7d2aa3aebe3e44e6758933d90cbd3da46e",
- "fd62047f5d27ff43210c117dc0f253c101e694a5331d6b684688606c92c65ccf",
- "ddc4f38db9f132fb1b736c1d693b5c039a2d6fe83bdf4f1c1e7a2745b5d79124",
- "9e9ab11b3ea059b84ae2bcc5be76ab3f730a486d92a16f1fd2a959bdc2ede08f",
- "bfb178b1ce27f00e122d2328c662fdef6cc239c07efc749aa61ae2d395441b02",
- "50addf6a911b90194a75b0212429d1af55eb2f9d24715479b9ccc4a40adc299b",
- "2e46e9f1b714d72160d3b3b775a845b3049a01396fab935f1278d9e8de2ef0c6",
- "db8d2b49d9042e39d6531b33ec3bebb9cdf42b9e6ad56163f08da2a7da2a53cd",
- "2d81d19ad5440422b85e0b17c71914269f6c25c9b1fa321c0dd6119ddb41d62d"
- ],
- "tasks": [
- "UMT5EncoderModel",
- "UMT5ForConditionalGeneration",
- "UMT5ForQuestionAnswering",
- "UMT5ForSequenceClassification",
- "UMT5ForTokenClassification",
- "UMT5Model",
- "UMT5PreTrainedModel"
- ]
- }
- },
- "info.aet.unispeech-1500h-cv": {
- "*": {
- "repo": "microsoft/unispeech-large-1500h-cv",
- "pkg": {
- "0": {
- "transformers": "UniSpeechModel"
- }
- },
- "tasks": [
- "UniSpeechForCTC",
- "UniSpeechForPreTraining",
- "UniSpeechForSequenceClassification",
- "UniSpeechModel",
- "UniSpeechPreTrainedModel"
- ]
- }
- },
- "info.aet.unispeech-sat-100h-libri-ft": {
- "*": {
- "repo": "microsoft/unispeech-sat-base-100h-libri-ft",
- "pkg": {
- "0": {
- "transformers": "UniSpeechSatModel"
- }
- },
- "tasks": [
- "UniSpeechSatForAudioFrameClassification",
- "UniSpeechSatForCTC",
- "UniSpeechSatForPreTraining",
- "UniSpeechSatForSequenceClassification",
- "UniSpeechSatForXVector",
- "UniSpeechSatModel",
- "UniSpeechSatPreTrainedModel"
- ]
- }
- },
- "info.gan.univnet-dev": {
- "*": {
- "repo": "dg845/univnet-dev",
- "pkg": {
- "0": {
- "transformers": "UnivNetModel"
- }
- },
- "tasks": [
- "UnivNetModel"
- ]
- }
- },
- "info.stst.vaultgemma": {
- "*": {
- "repo": "google/vaultgemma-7b",
- "pkg": {
- "0": {
- "transformers": "VaultGemmaModel"
- }
- },
- "tasks": [
- "VaultGemmaForCausalLM",
- "VaultGemmaModel",
- "VaultGemmaPreTrainedModel"
- ]
- }
- },
- "info.vit.videollama3-image-hf": {
- "*": {
- "repo": "lkhl/VideoLLaMA3-2B-Image-HF",
- "pkg": {
- "0": {
- "transformers": "VideoLlama3Model"
- }
- },
- "tasks": [
- "VideoLlama3VisionModel",
- "VideoLlama3PreTrainedModel",
- "VideoLlama3Model",
- "VideoLlama3ForConditionalGeneration"
- ]
- }
- },
- "info.vit.video-llava-hf": {
- "*": {
- "repo": "LanguageBind/Video-LLaVA-7B-hf",
- "pkg": {
- "0": {
- "transformers": "VideoLlavaModel"
- }
- },
- "tasks": [
- "VideoLlavaPreTrainedModel",
- "VideoLlavaModel",
- "VideoLlavaForConditionalGeneration"
- ]
- }
- },
- "info.vit.videomae": {
- "*": {
- "repo": "MCG-NJU/videomae-base",
- "pkg": {
- "0": {
- "transformers": "VideoMAEModel"
- }
- },
- "tasks": [
- "VideoMAEForPreTraining",
- "VideoMAEModel",
- "VideoMAEPreTrainedModel",
- "VideoMAEForVideoClassification"
- ]
- }
- },
- "info.vit.vilt-b32-mlm": {
- "*": {
- "repo": "dandelin/vilt-b32-mlm",
- "pkg": {
- "0": {
- "transformers": "ViltModel"
- }
- },
- "tasks": [
- "ViltForImageAndTextRetrieval",
- "ViltForImagesAndTextClassification",
- "ViltForTokenClassification",
- "ViltForMaskedLM",
- "ViltForQuestionAnswering",
- "ViltLayer",
- "ViltModel",
- "ViltPreTrainedModel"
- ]
- }
- },
- "info.vit.vip-llava-hf": {
- "*": {
- "repo": "ybelkada/vip-llava-7b-hf",
- "pkg": {
- "0": {
- "transformers": "VipLlavaModel"
- }
- },
- "tasks": [
- "VipLlavaModel",
- "VipLlavaForConditionalGeneration",
- "VipLlavaPreTrainedModel"
- ]
- }
- },
- "info.vit.japanese-clip-vit-h-14-bert-wider": {
- "*": {
- "repo": "hakuhodo-tech/japanese-clip-vit-h-14-bert-wider",
- "pkg": {
- "0": {
- "transformers": "VisionTextDualEncoderModel"
- }
- },
- "tasks": [
- "VisionTextDualEncoderModel"
- ]
- }
- },
- "info.art.visualbert-vqa-coco-pre": {
- "*": {
- "repo": "uclanlp/visualbert-vqa-coco-pre",
- "pkg": {
- "0": {
- "transformers": "VisualBertModel"
- }
- },
- "tasks": [
- "VisualBertForMultipleChoice",
- "VisualBertForPreTraining",
- "VisualBertForQuestionAnswering",
- "VisualBertForRegionToPhraseAlignment",
- "VisualBertForVisualReasoning",
- "VisualBertLayer",
- "VisualBertModel",
- "VisualBertPreTrainedModel"
- ]
- }
- },
- "info.vit.vit-patch16-224": {
- "*": {
- "repo": "google/vit-base-patch16-224",
- "pkg": {
- "0": {
- "transformers": "ViTModel"
- }
- },
- "tasks": [
- "ViTForImageClassification",
- "ViTForMaskedImageModeling",
- "ViTModel",
- "ViTPreTrainedModel"
- ]
- }
- },
- "info.vit.vit-mae": {
- "*": {
- "repo": "facebook/vit-mae-base",
- "pkg": {
- "0": {
- "transformers": "ViTMAEModel"
- }
- },
- "tasks": [
- "ViTMAEForPreTraining",
- "ViTMAELayer",
- "ViTMAEModel",
- "ViTMAEPreTrainedModel"
- ]
- }
- },
- "info.vit.vit-msn": {
- "*": {
- "repo": "facebook/vit-msn-base",
- "pkg": {
- "0": {
- "transformers": "ViTMSNModel"
- }
- },
- "tasks": [
- "ViTMSNModel",
- "ViTMSNForImageClassification",
- "ViTMSNPreTrainedModel"
- ]
- }
- },
- "info.vit.vitdet-patch16-224": {
- "*": {
- "repo": "google/vitdet-base-patch16-224",
- "pkg": {
- "0": {
- "transformers": "VitDetModel"
- }
- },
- "tasks": [
- "VitDetModel",
- "VitDetPreTrainedModel",
- "VitDetBackbone"
- ]
- }
- },
- "info.art.mms-tts-eng": {
- "*": {
- "repo": "facebook/mms-tts-eng",
- "pkg": {
- "0": {
- "transformers": "VitsModel"
- }
- },
- "tasks": [
- "VitsModel",
- "VitsPreTrainedModel"
- ]
- }
- },
- "info.vit.vivit16x2-kinetics400": {
- "*": {
- "repo": "google/vivit-b-16x2-kinetics400",
- "pkg": {
- "0": {
- "transformers": "VivitModel"
- }
- },
- "tasks": [
- "VivitModel",
- "VivitPreTrainedModel",
- "VivitForVideoClassification"
- ]
- }
- },
- "info.vit.vjepa2-vitl-fpc64-256": {
- "*": {
- "repo": "facebook/vjepa2-vitl-fpc64-256",
- "pkg": {
- "0": {
- "transformers": "VJEPA2Model"
- }
- },
- "tasks": [
- "VJEPA2Model",
- "VJEPA2PreTrainedModel",
- "VJEPA2ForVideoClassification"
- ]
- }
- },
- "info.stst.voxtral-2507": {
- "*": {
- "repo": "mistralai/Voxtral-Mini-3B-2507",
- "pkg": {
- "0": {
- "transformers": "VoxtralForConditionalGeneration"
- }
- },
- "tasks": [
- "VoxtralPreTrainedModel",
- "VoxtralEncoder",
- "VoxtralForConditionalGeneration"
- ]
- }
- },
- "info.aet.voxtral-2507": {
- "*": {
- "repo": "mistralai/Voxtral-Mini-3B-2507",
- "pkg": {
- "0": {
- "transformers": "VoxtralEncoder"
- }
- },
- "tasks": [
- "VoxtralPreTrainedModel",
- "VoxtralEncoder",
- "VoxtralForConditionalGeneration"
- ]
- }
- },
- "info.aet.wav2vec2-960h": {
- "*": {
- "repo": "facebook/wav2vec2-base-960h",
- "pkg": {
- "0": {
- "transformers": "Wav2Vec2Model"
- }
- },
- "tasks": [
- "Wav2Vec2ForAudioFrameClassification",
- "Wav2Vec2ForCTC",
- "Wav2Vec2ForMaskedLM",
- "Wav2Vec2ForPreTraining",
- "Wav2Vec2ForSequenceClassification",
- "Wav2Vec2ForXVector",
- "Wav2Vec2Model",
- "Wav2Vec2PreTrainedModel"
- ]
- }
- },
- "info.aet.wav2vec2-bert-rel-pos": {
- "*": {
- "repo": "facebook/w2v-bert-2.0",
- "pkg": {
- "0": {
- "transformers": "Wav2Vec2BertModel"
- }
- },
- "tasks": [
- "Wav2Vec2BertForAudioFrameClassification",
- "Wav2Vec2BertForCTC",
- "Wav2Vec2BertForSequenceClassification",
- "Wav2Vec2BertForXVector",
- "Wav2Vec2BertModel",
- "Wav2Vec2BertPreTrainedModel"
- ]
- }
- },
- "info.aet.wav2vec2-conformer-rel-pos": {
- "*": {
- "repo": "facebook/wav2vec2-conformer-rel-pos-large",
- "pkg": {
- "0": {
- "transformers": "Wav2Vec2ConformerModel"
- }
- },
- "tasks": [
- "Wav2Vec2ConformerForAudioFrameClassification",
- "Wav2Vec2ConformerForCTC",
- "Wav2Vec2ConformerForPreTraining",
- "Wav2Vec2ConformerForSequenceClassification",
- "Wav2Vec2ConformerForXVector",
- "Wav2Vec2ConformerModel",
- "Wav2Vec2ConformerPreTrainedModel"
- ]
- }
- },
- "info.aet.wavlm": {
- "*": {
- "repo": "microsoft/wavlm-base",
- "pkg": {
- "0": {
- "transformers": "WavLMModel"
- }
- },
- "tasks": [
- "WavLMForAudioFrameClassification",
- "WavLMForCTC",
- "WavLMForSequenceClassification",
- "WavLMForXVector",
- "WavLMModel",
- "WavLMPreTrainedModel"
- ]
- }
- },
- "info.aet.whisper": {
- "*": {
- "repo": "openai/whisper-tiny",
- "pkg": {
- "0": {
- "transformers": "WhisperModel"
- }
- },
- "tasks": [
- "WhisperForCausalLM",
- "WhisperForConditionalGeneration",
- "WhisperModel",
- "WhisperPreTrainedModel",
- "WhisperForAudioClassification"
- ]
- }
- },
- "info.vit.xclip-patch32": {
- "*": {
- "repo": "microsoft/xclip-base-patch32",
- "pkg": {
- "0": {
- "transformers": "XCLIPModel"
- }
- },
- "tasks": [
- "XCLIPModel",
- "XCLIPPreTrainedModel",
- "XCLIPTextModel",
- "XCLIPVisionModel"
- ]
- }
- },
- "info.gan.x-codec": {
- "*": {
- "repo": "Manel/X-Codec",
- "pkg": {
- "0": {
- "transformers": "XcodecModel"
- }
- },
- "tasks": [
- "XcodecModel",
- "XcodecPreTrainedModel"
- ]
- }
- },
- "info.art.xglm": {
- "*": {
- "repo": "facebook/xglm-564M",
- "pkg": {
- "0": {
- "transformers": "XGLMModel"
- }
- },
- "tasks": [
- "XGLMForCausalLM",
- "XGLMModel",
- "XGLMPreTrainedModel"
- ]
- }
- },
- "info.art.xlm-mlm-en-2048": {
- "*": {
- "repo": "FacebookAI/xlm-mlm-en-2048",
- "pkg": {
- "0": {
- "transformers": "XLMModel"
- }
- },
- "tasks": [
- "XLMForMultipleChoice",
- "XLMForQuestionAnswering",
- "XLMForQuestionAnsweringSimple",
- "XLMForSequenceClassification",
- "XLMForTokenClassification",
- "XLMModel",
- "XLMPreTrainedModel",
- "XLMWithLMHeadModel"
- ]
- }
- },
- "info.art.xlm-roberta": {
- "*": {
- "repo": "FacebookAI/xlm-roberta-base",
- "pkg": {
- "0": {
- "transformers": "XLMRobertaModel"
- }
- },
- "tasks": [
- "XLMRobertaForCausalLM",
- "XLMRobertaForMaskedLM",
- "XLMRobertaForMultipleChoice",
- "XLMRobertaForQuestionAnswering",
- "XLMRobertaForSequenceClassification",
- "XLMRobertaForTokenClassification",
- "XLMRobertaModel",
- "XLMRobertaPreTrainedModel"
- ]
- }
- },
- "info.art.xlm-roberta-xl": {
- "*": {
- "repo": "facebook/xlm-roberta-xl",
- "pkg": {
- "0": {
- "transformers": "XLMRobertaXLModel"
- }
- },
- "tasks": [
- "XLMRobertaXLForCausalLM",
- "XLMRobertaXLForMaskedLM",
- "XLMRobertaXLForMultipleChoice",
- "XLMRobertaXLForQuestionAnswering",
- "XLMRobertaXLForSequenceClassification",
- "XLMRobertaXLForTokenClassification",
- "XLMRobertaXLModel",
- "XLMRobertaXLPreTrainedModel"
- ]
- }
- },
- "info.art.xlnet-cased": {
- "*": {
- "repo": "xlnet/xlnet-large-cased",
- "pkg": {
- "0": {
- "transformers": "XLNetModel"
- }
- },
- "tasks": [
- "XLNetForMultipleChoice",
- "XLNetForQuestionAnswering",
- "XLNetForQuestionAnsweringSimple",
- "XLNetForSequenceClassification",
- "XLNetForTokenClassification",
- "XLNetLMHeadModel",
- "XLNetModel",
- "XLNetPreTrainedModel"
- ]
- }
- },
- "info.lstm.xlstm": {
- "*": {
- "repo": "NX-AI/xLSTM-7b",
- "pkg": {
- "0": {
- "transformers": "xLSTMModel"
- }
- },
- "tasks": [
- "xLSTMForCausalLM",
- "xLSTMModel",
- "xLSTMPreTrainedModel"
- ]
- }
- },
- "info.art.xmod": {
- "*": {
- "repo": "facebook/xmod-base",
- "pkg": {
- "0": {
- "transformers": "XmodModel"
- }
- },
- "tasks": [
- "XmodForCausalLM",
- "XmodForMaskedLM",
- "XmodForMultipleChoice",
- "XmodForQuestionAnswering",
- "XmodForSequenceClassification",
- "XmodForTokenClassification",
- "XmodModel",
- "XmodPreTrainedModel"
- ]
- }
- },
- "info.cnn.yolos": {
- "*": {
- "repo": "hustvl/yolos-base",
- "pkg": {
- "0": {
- "transformers": "YolosModel"
- }
- },
- "tasks": [
- "YolosForObjectDetection",
- "YolosModel",
- "YolosPreTrainedModel"
- ]
- }
- },
- "info.art.yoso-4096": {
- "*": {
- "repo": "uw-madison/yoso-4096",
- "pkg": {
- "0": {
- "transformers": "YosoModel"
- }
- },
- "tasks": [
- "YosoForMaskedLM",
- "YosoForMultipleChoice",
- "YosoForQuestionAnswering",
- "YosoForSequenceClassification",
- "YosoForTokenClassification",
- "YosoLayer",
- "YosoModel",
- "YosoPreTrainedModel"
- ]
- }
- },
- "info.ssm.zamba-v1": {
- "*": {
- "repo": "Zyphra/Zamba-7B-v1",
- "pkg": {
- "0": {
- "transformers": "ZambaModel"
- }
- },
- "tasks": [
- "ZambaForCausalLM",
- "ZambaForSequenceClassification",
- "ZambaModel",
- "ZambaPreTrainedModel"
- ]
- }
- },
- "info.ssm.zamba2": {
- "*": {
- "repo": "Zyphra/Zamba2-2.7B",
- "pkg": {
- "0": {
- "transformers": "Zamba2Model"
- }
- },
- "tasks": [
- "Zamba2ForCausalLM",
- "Zamba2ForSequenceClassification",
- "Zamba2Model",
- "Zamba2PreTrainedModel"
- ]
- }
- },
- "ops.precision.uint": {
- "U8": {
- "pkg": {
- "0": {
- "torch": {
- "uint8": {
- "variant": "uint8"
- }
- }
- }
- }
- },
- "U16": {
- "pkg": {
- "0": {
- "torch": {
- "uint16": {
- "variant": "uint16"
- }
- }
- }
- }
- },
- "U32": {
- "pkg": {
- "0": {
- "torch": {
- "uint32": {
- "variant": "uint32"
- }
- }
- }
- }
- },
- "U64": {
- "pkg": {
- "0": {
- "torch": {
- "uint64": {
- "variant": "uint64"
- }
- }
- }
- }
- },
- "U1": {
- "pkg": {
- "0": {
- "torch": {
- "uint1": {
- "variant": "uint1"
- }
- }
- }
- }
- },
- "U2": {
- "pkg": {
- "0": {
- "torch": {
- "uint2": {
- "variant": "uint2"
- }
- }
- }
- }
- },
- "U3": {
- "pkg": {
- "0": {
- "torch": {
- "uint3": {
- "variant": "uint3"
- }
- }
- }
- }
- },
- "U4": {
- "pkg": {
- "0": {
- "torch": {
- "uint4": {
- "variant": "uint4"
- }
- }
- }
- }
- },
- "U5": {
- "pkg": {
- "0": {
- "torch": {
- "uint5": {
- "variant": "uint5"
- }
- }
- }
- }
- },
- "U6": {
- "pkg": {
- "0": {
- "torch": {
- "uint6": {
- "variant": "uint6"
- }
- }
- }
- }
- },
- "U7": {
- "pkg": {
- "0": {
- "torch": {
- "uint7": {
- "variant": "uint7"
- }
- }
- }
- }
- }
- },
- "ops.precision.int": {
- "I8": {
- "pkg": {
- "0": {
- "torch": {
- "int8": {
- "variant": "int8"
- }
- }
- }
- }
- },
- "I16": {
- "pkg": {
- "0": {
- "torch": {
- "int16": {
- "variant": "int16"
- }
- }
- }
- }
- },
- "I32": {
- "pkg": {
- "0": {
- "torch": {
- "int32": {
- "variant": "int32"
- }
- }
- }
- }
- },
- "I64": {
- "pkg": {
- "0": {
- "torch": {
- "int64": {
- "variant": "int64"
- }
- }
- }
- }
- },
- "Q8": {
- "pkg": {
- "0": {
- "torch": {
- "qint8": {
- "variant": "qint8"
- }
- }
- }
- }
- },
- "Q32": {
- "pkg": {
- "0": {
- "torch": {
- "qint32": {
- "variant": "qint32"
- }
- }
- }
- }
- },
- "I1": {
- "pkg": {
- "0": {
- "torch": {
- "int1": {
- "variant": "int1"
- }
- }
- }
- }
- },
- "I2": {
- "pkg": {
- "0": {
- "torch": {
- "int2": {
- "variant": "int2"
- }
- }
- }
- }
- },
- "I3": {
- "pkg": {
- "0": {
- "torch": {
- "int3": {
- "variant": "int3"
- }
- }
- }
- }
- },
- "I4": {
- "pkg": {
- "0": {
- "torch": {
- "int4": {
- "variant": "int4"
- }
- }
- }
- }
- },
- "I5": {
- "pkg": {
- "0": {
- "torch": {
- "int5": {
- "variant": "int5"
- }
- }
- }
- }
- },
- "I6": {
- "pkg": {
- "0": {
- "torch": {
- "int6": {
- "variant": "int6"
- }
- }
- }
- }
- },
- "I7": {
- "pkg": {
- "0": {
- "torch": {
- "int7": {
- "variant": "int7"
- }
- }
- }
- }
- }
- },
- "ops.precision.float": {
- "F16": {
- "pkg": {
- "0": {
- "torch": {
- "float16": {
- "variant": "fp16"
- }
- }
- }
- }
- },
- "F32": {
- "pkg": {
- "0": {
- "torch": {
- "float32": {
- "variant": "fp32"
- }
- }
- }
- }
- },
- "F64": {
- "pkg": {
- "0": {
- "torch": {
- "float64": {
- "variant": "fp64"
- }
- }
- }
- }
- },
- "F8_E5M2": {
- "pkg": {
- "0": {
- "torch": {
- "float8_e5m2": {
- "variant": "fp8_e5m2"
- }
- }
- }
- }
- },
- "F8_E4M3": {
- "pkg": {
- "0": {
- "torch": {
- "float8_e4m3fn": {
- "variant": "fp8_e4m3fn"
- }
- }
- }
- }
- },
- "F8_E5M2FNUZ": {
- "pkg": {
- "0": {
- "torch": {
- "float8_e5m2fnuz": {
- "variant": "fp8_e5m2fnuz"
- }
- }
- }
- }
- },
- "F8_E4M3FNUZ": {
- "pkg": {
- "0": {
- "torch": {
- "float8_e4m3fnuz": {
- "variant": "fp8_e4m3fnuz"
- }
- }
- }
- }
- },
- "F8_E8M0FNU": {
- "pkg": {
- "0": {
- "torch": {
- "float8_e8m0fnu": {
- "variant": "fp8_e8m0fnu"
- }
- }
- }
- }
- },
- "F8_E2M1": {
- "pkg": {
- "0": {
- "torch": {
- "float4_e2m1fn_x2": {
- "variant": "fp4_e2m1fn_x2"
- }
- }
- }
- }
- }
- },
- "ops.precision.complex": {
- "C32": {
- "pkg": {
- "0": {
- "torch": {
- "complex32": {
- "variant": "complex32"
- }
- }
- }
- }
- },
- "C64": {
- "pkg": {
- "0": {
- "torch": {
- "complex64": {
- "variant": "complex64"
- }
- }
- }
- }
- },
- "C128": {
- "pkg": {
- "0": {
- "torch": {
- "complex128": {
- "variant": "complex128"
- }
- }
- }
- }
- }
- },
- "ops.precision.bool": {
- "Bbool": {
- "pkg": {
- "0": {
- "torch": {
- "bool": {
- "variant": "bool"
- }
- }
- }
- }
- }
- },
- "ops.precision.quint": {
- "Q8": {
- "pkg": {
- "0": {
- "torch": {
- "quint8": {
- "variant": "quint8"
- }
- }
- }
- }
- },
- "Q4x2": {
- "pkg": {
- "0": {
- "torch": {
- "quint4x2": {
- "variant": "quint4x2"
- }
- }
- }
- }
- },
- "Q2x4": {
- "pkg": {
- "0": {
- "torch": {
- "quint2x4": {
- "variant": "quint2x4"
- }
- }
- }
- }
- }
- },
- "ops.precision.bfloat": {
- "B16": {
- "pkg": {
- "0": {
- "torch": {
- "bfloat16": {
- "variant": "bf16"
- }
- }
- }
- }
- }
- },
- "ops.precision.bits": {
- "B1x8": {
- "pkg": {
- "0": {
- "torch": {
- "bits1x8": {
- "variant": "bits1x8"
- }
- }
- }
- }
- },
- "B2x4": {
- "pkg": {
- "0": {
- "torch": {
- "bits2x4": {
- "variant": "bits2x4"
- }
- }
- }
- }
- },
- "B4x2": {
- "pkg": {
- "0": {
- "torch": {
- "bits4x2": {
- "variant": "bits4x2"
- }
- }
- }
- }
- },
- "B8": {
- "pkg": {
- "0": {
- "torch": {
- "bits8": {
- "variant": "bits8"
- }
- }
- }
- }
- },
- "B16": {
- "pkg": {
- "0": {
- "torch": {
- "bits16": {
- "variant": "bits16"
- }
- }
- }
- }
- }
- },
- "ops.scheduler.amused": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "AmusedScheduler",
- "module_path": "diffusers.schedulers.scheduling_amused"
- }
- }
- }
- },
- "ops.scheduler.cmstochasticiterative": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "CMStochasticIterativeScheduler",
- "module_path": "diffusers.schedulers.scheduling_consistency_models"
- }
- }
- }
- },
- "ops.scheduler.cogvideoxddim": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "CogVideoXDDIMScheduler",
- "module_path": "diffusers.schedulers.scheduling_ddim_cogvideox"
- }
- }
- }
- },
- "ops.scheduler.cogvideoxdpm": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "CogVideoXDPMScheduler",
- "module_path": "diffusers.schedulers.scheduling_dpm_cogvideox"
- }
- }
- }
- },
- "ops.scheduler.ddiminverse": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "DDIMInverseScheduler",
- "module_path": "diffusers.schedulers.scheduling_ddim_inverse"
- }
- }
- }
- },
- "ops.scheduler.ddimparallel": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "DDIMParallelScheduler",
- "module_path": "diffusers.schedulers.scheduling_ddim_parallel"
- }
- }
- }
- },
- "ops.scheduler.ddim": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "DDIMScheduler",
- "module_path": "diffusers.schedulers.scheduling_ddim"
- }
- }
- }
- },
- "ops.scheduler.ddpmparallel": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "DDPMParallelScheduler",
- "module_path": "diffusers.schedulers.scheduling_ddpm_parallel"
- }
- }
- }
- },
- "ops.scheduler.ddpm": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "DDPMScheduler",
- "module_path": "diffusers.schedulers.scheduling_ddpm"
- }
- }
- }
- },
- "ops.scheduler.ddpmwuerstchen": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "DDPMWuerstchenScheduler",
- "module_path": "diffusers.schedulers.scheduling_ddpm_wuerstchen"
- }
- }
- }
- },
- "ops.scheduler.deis": {
- "multistep": {
- "pkg": {
- "0": {
- "diffusers": "DEISMultistepScheduler",
- "module_path": "diffusers.schedulers.scheduling_deis_multistep"
- }
- }
- }
- },
- "ops.scheduler.dpminverse": {
- "multistep": {
- "pkg": {
- "0": {
- "diffusers": "DPMSolverMultistepInverseScheduler",
- "module_path": "diffusers.schedulers.scheduling_dpmsolver_multistep_inverse"
- }
- }
- }
- },
- "ops.scheduler.dpm": {
- "multistep": {
- "pkg": {
- "0": {
- "diffusers": "DPMSolverMultistepScheduler",
- "module_path": "diffusers.schedulers.scheduling_dpmsolver_multistep"
- }
- }
- }
- },
- "ops.scheduler.dpmsinglestep": {
- "solver": {
- "pkg": {
- "0": {
- "diffusers": "DPMSolverSinglestepScheduler",
- "module_path": "diffusers.schedulers.scheduling_dpmsolver_singlestep"
- }
- }
- }
- },
- "ops.scheduler.edmdpm": {
- "multistep": {
- "pkg": {
- "0": {
- "diffusers": "EDMDPMSolverMultistepScheduler",
- "module_path": "diffusers.schedulers.scheduling_edm_dpmsolver_multistep"
- }
- }
- }
- },
- "ops.scheduler.edmeuler": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "EDMEulerScheduler",
- "module_path": "diffusers.schedulers.scheduling_edm_euler"
- }
- }
- }
- },
- "ops.scheduler.eulerancestral": {
- "discrete": {
- "pkg": {
- "0": {
- "diffusers": "EulerAncestralDiscreteScheduler",
- "module_path": "diffusers.schedulers.scheduling_euler_ancestral_discrete"
- }
- }
- }
- },
- "ops.scheduler.euler": {
- "discrete": {
- "pkg": {
- "0": {
- "diffusers": "EulerDiscreteScheduler",
- "module_path": "diffusers.schedulers.scheduling_euler_discrete"
- }
- }
- }
- },
- "ops.scheduler.flowmatcheuler": {
- "discrete": {
- "pkg": {
- "0": {
- "diffusers": "FlowMatchEulerDiscreteScheduler",
- "module_path": "diffusers.schedulers.scheduling_flow_match_euler_discrete"
- }
- }
- }
- },
- "ops.scheduler.flowmatchheun": {
- "discrete": {
- "pkg": {
- "0": {
- "diffusers": "FlowMatchHeunDiscreteScheduler",
- "module_path": "diffusers.schedulers.scheduling_flow_match_heun_discrete"
- }
- }
- }
- },
- "ops.scheduler.flowmatchlcm": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "FlowMatchLCMScheduler",
- "module_path": "diffusers.schedulers.scheduling_flow_match_lcm"
- }
- }
- }
- },
- "ops.scheduler.heun": {
- "discrete": {
- "pkg": {
- "0": {
- "diffusers": "HeunDiscreteScheduler",
- "module_path": "diffusers.schedulers.scheduling_heun_discrete"
- }
- }
- }
- },
- "ops.scheduler.ipndm": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "IPNDMScheduler",
- "module_path": "diffusers.schedulers.scheduling_ipndm"
- }
- }
- }
- },
- "ops.scheduler.karrasve": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "KarrasVeScheduler",
- "module_path": "diffusers.schedulers.deprecated.scheduling_karras_ve"
- }
- }
- }
- },
- "ops.scheduler.kdpm2ancestral": {
- "discrete": {
- "pkg": {
- "0": {
- "diffusers": "KDPM2AncestralDiscreteScheduler",
- "module_path": "diffusers.schedulers.scheduling_k_dpm_2_ancestral_discrete"
- }
- }
- }
- },
- "ops.scheduler.kdpm2": {
- "discrete": {
- "pkg": {
- "0": {
- "diffusers": "KDPM2DiscreteScheduler",
- "module_path": "diffusers.schedulers.scheduling_k_dpm_2_discrete"
- }
- }
- }
- },
- "ops.scheduler.lcm": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "LCMScheduler",
- "module_path": "diffusers.schedulers.scheduling_lcm"
- }
- }
- }
- },
- "ops.scheduler.pndm": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "PNDMScheduler",
- "module_path": "diffusers.schedulers.scheduling_pndm"
- }
- }
- }
- },
- "ops.scheduler.repaint": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "RePaintScheduler",
- "module_path": "diffusers.schedulers.scheduling_repaint"
- }
- }
- }
- },
- "ops.scheduler.sa": {
- "solver": {
- "pkg": {
- "0": {
- "diffusers": "SASolverScheduler",
- "module_path": "diffusers.schedulers.scheduling_sasolver"
- }
- }
- }
- },
- "ops.scheduler.scm": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "SCMScheduler",
- "module_path": "diffusers.schedulers.scheduling_scm"
- }
- }
- }
- },
- "ops.scheduler.scoresdeve": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "ScoreSdeVeScheduler",
- "module_path": "diffusers.schedulers.scheduling_sde_ve"
- }
- }
- }
- },
- "ops.scheduler.tcd": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "TCDScheduler",
- "module_path": "diffusers.schedulers.scheduling_tcd"
- }
- }
- }
- },
- "ops.scheduler.unclip": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "UnCLIPScheduler",
- "module_path": "diffusers.schedulers.scheduling_unclip"
- }
- }
- }
- },
- "ops.scheduler.unipc": {
- "multistep": {
- "pkg": {
- "0": {
- "diffusers": "UniPCMultistepScheduler",
- "module_path": "diffusers.schedulers.scheduling_unipc_multistep"
- }
- }
- }
- },
- "ops.scheduler.vqdiffusion": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "VQDiffusionScheduler",
- "module_path": "diffusers.schedulers.scheduling_vq_diffusion"
- }
- }
- }
- },
- "ops.scheduler.karrasdiffusion": {
- "schedulers": {
- "pkg": {
- "0": {
- "diffusers": "KarrasDiffusionSchedulers",
- "module_path": "diffusers.schedulers.scheduling_utils"
- }
- }
- }
- },
- "info.lora.dmd": {
- "stable-diffusion-xl-1": {
- "repo": "tianweiy/DMD2",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "generation": {
- "num_inference_steps": 4,
- "guidance_scale": 0,
- "timesteps": [
- 999,
- 749,
- 499,
- 249
- ]
- },
- "scheduler": {
- "ops.scheduler.lcm": ""
- }
- }
- },
- "file_256": [
- "b3d9173815a4b595991c3a7a0e0e63ad821080f314a0b2a3cc31ecd7fcf2cbb8",
- "a374289e9446d7f14d2037c4b3770756b7b52c292142a691377c3c755010a1bb"
- ]
- }
- },
- "info.lora.dpo": {
- "stable-diffusion-xl-1": {
- "repo": "radames/sdxl-DPO-LoRA",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "generation": {
- "guidance_scale": 7.5,
- "num_inference_steps": 4
- },
- "scheduler": {
- "ops.scheduler.dpm": {
- "algorithm_type": "sde-dpmsolver++",
- "use_karras_sigmas": true,
- "order": 2
- }
- }
- }
- },
- "file_256": [
- "666f71a833fc41229ec7e8a264fb7b0fcb8bf47a80e366ae7486c18f38ec9fc0",
- "6b1dcbfb234d7b6000948b5b95ccebc8f903450ce2ba1b50bc3456987c9087ad"
- ]
- }
- },
- "info.lora.flash": {
- "stable-diffusion-xl-1": {
- "repo": "jasperai/flash-sdxl",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "scheduler": "ops.scheduler.lcm"
- }
- },
- "file_256": [
- "afe2ca6e27c4c6087f50ef42772c45d7b0efbc471b76e422492403f9cae724d7"
- ]
- },
- "pixart-alpha": {
- "repo": "jasperai/flash-pixart",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- }
- }
- },
- "file_256": [
- "99ef037fe3c1fb6d6bbefdbb85ad60df434fcc0577d34c768d752d60cf69681b"
- ]
- },
- "stable-diffusion-3": {
- "repo": "jasperai/flash-sd3",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- }
- }
- },
- "file_256": [
- "85fce13c36e3739aa42930f745eb9fceb6c53d53fb17e2a687e3234c1a58ee15"
- ]
- },
- "stable-diffusion-v1-5": {
- "repo": "jasperai/flash-sd",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "generation": {
- "num_inference_steps": 4,
- "guidance_scale": 0
- }
- }
- },
- "file_256": [
- "99353444c1a0f40719a1b3037049dbd24800317979a73c312025c05af3574a5f"
- ]
- }
- },
- "info.lora.hyper": {
- "stable-diffusion-xl-1": {
- "repo": "ByteDance/Hyper-SD",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {
- "fuse": 1.0
- }
- }
- }
- },
- "file_256": {
- "0b97f447b5878323a28fbe7c51ba7acebd21f4d77552ba77b04b11c8911825b6": {
- "num_inference_steps": 12
- },
- "55b51334c85061afff5eff7c550b61963c8b8607a5868bbe4f26db49374719b1": {
- "num_inference_steps": 8
- },
- "c912df184c5116792d2c604d26c6bc2aa916685f4a793755255cda1c43a3c78a": {
- "num_inference_steps": 1,
- "guidance_scale": 0.0
- },
- "69b25c0187ced301c3603c599c0bc509ac99b8ac34db89a2aecc3d5f77a35187": {
- "num_inference_steps": 2,
- "guidance_scale": 0.0
- },
- "12f81a27d00a751a40d68fd15597091896c5a90f3bd632fb6c475607cbdad76e": {
- "num_inference_steps": 4,
- "guidance_scale": 0.0
- },
- "ca689190e8c46038550384b5675488526cfe5a40d35f82b27acb75c100f417c1": {
- "num_inference_steps": 8,
- "guidance_scale": 0.0
- }
- }
- },
- "flux1-dev": {
- "repo": "ByteDance/Hyper-SD",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {
- "fuse": 0.125
- }
- }
- }
- },
- "file_256": {
- "6461f67dfc1a967ae60344c3b3f350877149ccab758c273cc37f5e8a87b5842e": {
- "num_inference_steps": 16,
- "guidance_scale": 0.0
- },
- "e0ab0fdf569cd01a382f19bd87681f628879dea7ad51fe5a3799b6c18c7b2d03": {
- "num_inference_steps": 8,
- "guidance_scale": 0.0
- }
- }
- },
- "stable-diffusion-3": {
- "repo": "ByteDance/Hyper-SD",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {
- "fuse": 0.125
- }
- }
- }
- },
- "file_256": {
- "5b4d0b99d58deb811bdbbe521a06f4dbf56a2e9148ff3211c594e0502b656bc9": {
- "num_inference_steps": 16
- },
- "0ee4e529abd17b06d4295e3bb91c0d4ddae393afad86b2b43c4f5eeb9e401602": {
- "num_inference_steps": 4
- },
- "fc6a3e73e14ed11e21e4820e960d7befcffe7e333850ada9545f239e9aa6027e": {
- "num_inference_steps": 8
- }
- }
- },
- "stable-diffusion-v1-5": {
- "repo": "ByteDance/Hyper-SD",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- }
- }
- },
- "file_256": {
- "64b98437383537cd968fda6f87a05c33160ece9c79ff4757949a1e212ff78361": {
- "num_inference_steps": 12
- },
- "f6123d5b950d5250ab6c33600e27f4dcf71b3099ebf888685e01e9e8117ce482": {
- "num_inference_steps": 8
- },
- "a04fd9a535c1e56d38f7590ee72a13fd5ca0409853b4fff021e5a9482cf1ca3b": {
- "num_inference_steps": 1,
- "guidance_scale": 0.0
- },
- "2f26dcc1d883feb07557a552315baae2ca2a04ac08556b08a355a244547e8c3a": {
- "num_inference_steps": 2,
- "guidance_scale": 0.0
- },
- "c5dd058616461ed5053e2b14eec4dbe3fa0eea3b13688642f6d6c80ea2ba5958": {
- "num_inference_steps": 4,
- "guidance_scale": 0.0
- },
- "91fc3186236e956d64dbb4357f2e120c69b968b78af7d2db9884a5ca74d3cd13": {
- "num_inference_steps": 8,
- "guidance_scale": 0.0
- }
- }
- }
- },
- "info.lora.lcm": {
- "stable-diffusion-xl-1": {
- "repo": "latent-consistency/lcm-lora-sdxl",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {
- "fuse": 1.0
- }
- },
- "scheduler": {
- "ops.scheduler.lcm": {
- "timestep_spacing": "trailing"
- }
- },
- "generation": {
- "num_inference_steps": 8
- }
- }
- },
- "file_256": [
- "a764e6859b6e04047cd761c08ff0cee96413a8e004c9f07707530cd776b19141"
- ]
- },
- "ssd": {
- "repo": "latent-consistency/lcm-lora-ssd-1b",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "generation": {
- "num_inference_steps": 8
- }
- }
- },
- "file_256": [
- "7adaaa69db6f011058a19fd1d5315fdf19ef79fcd513cdab30e173833fd5c59b"
- ]
- },
- "segmind-vega": {
- "repo": "segmind/Segmind-VegaRT",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "gen_kwargs": {
- "num_inference_steps": 8
- }
- }
- },
- "file_256": [
- "9b6e8cd833fa205eaeeed391ca623a6f2546e447470bd1c5dcce3fa8d2f26afb"
- ]
- },
- "stable-diffusion-v1-5": {
- "repo": "latent-consistency/lcm-lora-sdv1-5",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "generation": {
- "num_inference_steps": 8
- }
- }
- },
- "file_256": [
- "8f90d840e075ff588a58e22c6586e2ae9a6f7922996ee6649a7f01072333afe4"
- ]
- }
- },
- "info.lora.lightning": {
- "stable-diffusion-xl-1": {
- "repo": "ByteDance/SDXL-Lightning",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "generation": {
- "num_inference_steps": 4,
- "guidance_scale": 0
- }
- }
- }
- }
- },
- "info.lora.pcm": {
- "stable-diffusion-xl-1": {
- "repo": "wangfuyun/PCM_Weights",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- }
- }
- },
- "file_256": {
- "0365f6107250a4fed1b83e8ae6a070065e026a2ba54bff65f55a50284232bbe6": {
- "num_inference_steps": 4,
- "guidance_scale": 0.0
- },
- "04ea827435d5750e63d113dc509174b4f6e8a069ff8f91970c3d25299c10b1f8": {
- "num_inference_steps": 16
- },
- "7eb353b2abcaabab6251ba4e17d6cbe2e763feb0674b0f950555552212b44621": {
- "num_inference_steps": 16
- },
- "a85cf70ac16ed42011630a5cd6b5927722cb7c40a2107eff85e2670f9a38c893": {
- "num_inference_steps": 4
- },
- "9f7f13bb019925eacd89aeff678e4fd831f7b60245b986855dff6634aee4eba9": {
- "num_inference_steps": 4
- },
- "3b9c970a3e4c0e182931e71b3f769c1956f16c6b06db98b4d67236790d4d0b1d": {
- "num_inference_steps": 8
- },
- "7f04ba8911b4c25ef2c7cbf74abcb6daa3b4f0e4bc6a03896bdae7601f2f180b": {
- "num_inference_steps": 8
- },
- "13fb038025ce9dad93b8ee1b67fc81bac8affb59a77b67d408d286e0b0365a1d": {
- "num_inference_steps": 16,
- "guidance_scale": 0.0
- },
- "3442eff271aa3b60a094fd6f9169d03e49e4051044a974f6fcf690507959191f": {
- "num_inference_steps": 16,
- "guidance_scale": 0.0
- },
- "242cbe4695fe3f2e248faa71cf53f2ccbf248a316973e4b2f38ab9e34f35a5ab": {
- "num_inference_steps": 2,
- "guidance_scale": 0.0
- },
- "e1f600491bb8e0cd94f41144321e44fdb2cb346447f31e71f6e53f1c24cccfbf": {
- "num_inference_steps": 2,
- "guidance_scale": 0.0
- },
- "d0bf40a7f280829195563486bec7253f043a06b1f218602b20901c367641023e": {
- "num_inference_steps": 4,
- "guidance_scale": 0.0
- },
- "212150d7953627fb89df99aad579d6763645a1cb2ef26b19fee8b398d5e5ff4d": {
- "num_inference_steps": 4,
- "guidance_scale": 0.0
- },
- "e80fcf46d15f4d3821d3d9611bdb3022a4a8b647b2536833b168d317a91e4f74": {
- "num_inference_steps": 8,
- "guidance_scale": 0.0
- },
- "56ed9dc9f51f4bb0d6172e13b7947f215c347fc0da341c8951b2c12b9507d09e": {
- "num_inference_steps": 8,
- "guidance_scale": 0.0
- }
- }
- },
- "stable-diffusion-v1-5": {
- "repo": "wangfuyun/PCM_Weights",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- }
- }
- },
- "file_256": {
- "b80b27dd6504f1c3a7637237dda86bc7e26fa5766da30c4fc853c0a1d46bad31": {
- "num_inference_steps": 4,
- "guidance_scale": 0.0
- },
- "8f605ffde3616592deb37ed8c6bacb83fe98963c1fd0883c2a4f93787098aa45": {
- "num_inference_steps": 16
- },
- "fa6acb94f11dba3bf4120af5a12e3c88cd2b9572d43ec1a6fb04eede9f32829e": {
- "num_inference_steps": 4
- },
- "bff3d4499718b61455b0757b5f8d98fe23e73a768b538c82ecf91c693b69dbcd": {
- "num_inference_steps": 8
- },
- "c7ac2fa3df3a5b7080ebe63f259ab13630014f104c93c3c706d77b05cc48506b": {
- "num_inference_steps": 16,
- "guidance_scale": 0.0
- },
- "4c5f27a727d12146de4b1d987cee3343bca89b085d12b03c45297af05ce88ef4": {
- "num_inference_steps": 2,
- "guidance_scale": 0.0
- },
- "29278bc86274fdfc840961e3c250758ff5e2dc4666d940f103e78630d5b879d3": {
- "num_inference_steps": 4,
- "guidance_scale": 0.0
- },
- "41a7f0b966d18f643d16c4401f0b5ef6b9ef7362c20e17128322f17874709107": {
- "num_inference_steps": 8,
- "guidance_scale": 0.0
- }
- }
- },
- "stable-diffusion-3": {
- "repo": "wangfuyun/PCM_Weights",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- }
- }
- },
- "file_256": {
- "8a45878ecc34e53855fe21146cb6ef32682053b7c4eacc013be89fb08c4c19d8": {
- "num_inference_steps": 2,
- "guidance_scale": 1.2
- },
- "9444a5cead551c56c4d1c455ce829ba9f96f01fbcca31294277e0862a6a15b76": {
- "num_inference_steps": 4,
- "guidance_scale": 1.2
- },
- "e365902c208cbc0456ca5e7c41a490f637c15f3f7b98691cbba21f96a8c960b4": {
- "num_inference_steps": 4,
- "guidance_scale": 1.2
- },
- "3550fa018cd0b60d9e36ac94c31b30f27e402d3855ed63e47668bb181b35a0ad": {
- "num_inference_steps": 4,
- "guidance_scale": 1.2
- }
- }
- }
- },
- "info.lora.slam": {
- "stable-diffusion-xl-1": {
- "repo": "alimama-creative/slam-lora-sdxl",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "scheduler": {
- "ops.scheduler.lcm": {
- "timestep_spacing": "trailing"
- }
- },
- "generation": {
- "num_inference_steps": 4,
- "guidance_scale": 1
- }
- }
- },
- "file_256": [
- "22569a946b0db645aa3b8eb782c674c8e726a7cc0d655887c21fecf6dfe6ad91"
- ]
- },
- "stable-diffusion-v1-5": {
- "repo": "alimama-creative/slam-sd1.5",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- }
- }
- }
- }
- },
- "info.lora.spo": {
- "stable-diffusion-xl-1": {
- "repo": "SPO-Diffusion-Models/SPO-SDXL_4k-p_10ep_LoRA",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "generation": {
- "guidance_scale": 5.0
- }
- }
- },
- "file_256": [
- "0b9896f30d29daa5eedcfc9e7ad03304df6efc5114508f6ca9c328c0b4f057df"
- ]
- },
- "stable-diffusion-v1-5": {
- "repo": "SPO-Diffusion-Models/SPO-SD-v1-5_4k-p_10ep_LoRA",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "generation": {
- "guidance_scale": 7.5
- }
- }
- },
- "file_256": [
- "1be130c5be2de0beacadd3bf0bafe3bedd7e7a380729932a1e369fb29efa86f4"
- ]
- }
- },
- "info.lora.tcd": {
- "stable-diffusion-xl-1": {
- "repo": "h1t/TCD-SDXL-LoRA",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "generation": {
- "num_inference_steps": 4,
- "guidance_scale": 0,
- "eta": 0.3
- },
- "scheduler": {
- "ops.scheduler.tcd": {}
- }
- }
- },
- "file_256": [
- "2c777bc60abf41d3eb0fe405d23d73c280a020eea5adf97a82a141592c33feba"
- ]
- },
- "stable-diffusion-v1-5": {
- "repo": "h1t/TCD-SD15-LoRA",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- }
- }
- },
- "file_256": [
- "eaecb24a1cda4411eab67275b1d991071216ac93693e8fa0c9226c9df0386232"
- ],
- "layer_256": [
- "e9825b81bca684126ac3cc8867d2ebc655f74268bc26bea4e4b7e58a52ad6c75"
- ],
- "layer_b3": [
- "90158259812a89beb8874216009c799f420334aac49bbf4fa1bf0ebf4bbd256b"
- ]
- }
- },
- "info.lora.turbo": {
- "stable-diffusion-xl-1": {
- "file_256": [
- "a599c42a9f4f7494c7f410dbc0fd432cf0242720509e9d52fa41aac7a88d1b69"
- ]
- },
- "flux1-dev": {
- "repo": "alimama-creative/FLUX.1-Turbo-Alpha",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {
- "fuse": 0.125
- }
- },
- "generation": {
- "guidance_scale": 3.5,
- "num_inference_steps": 8,
- "max_sequence_length": 512
- }
- }
- },
- "file_256": [
- "77f7523a5e9c3da6cfc730c6b07461129fa52997ea06168e9ed5312228aa0bff"
- ]
- },
- "stable-diffusion-3": {
- "repo": "tensorart/stable-diffusion-3.5-large-TurboX",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {
- "fuse": 1.0
- }
- },
- "scheduler": {
- "ops.scheduler.flow-match": {
- "shift": 5
- }
- }
- }
- },
- "file_256": {
- "fae59d1b749c0d14a8fd4c68cc94eaac92876cee7b91fa75cf8fde3160e09548": {
- "num_inference_steps": "8"
- }
- }
- }
- },
- "info.art.audiogen": {
- "*": {
- "repo": "facebook/audiogen-medium",
- "pkg": {
- "0": {
- "audiocraft": "models.AudioGen",
- "generation": {
- "duration": 5
- },
- "stage_2": {
- "audiocraft": ".data.audioaudio_write",
- "generation": {
- "strategy": "loudness",
- "loudness_compressor": true
- }
- }
- }
- }
- }
- },
- "info.art.parler-tts-v1": {
- "*": {
- "repo": "parler-tts/parler-tts-large-v1",
- "pkg": {
- "0": {
- "parler_tts": "ParlerTTSForConditionalGeneration",
- "generation": {
- "return_tensors": "pt"
- }
- }
- }
- }
- },
- "info.gan.snac-st": {
- "*": {
- "repo": "Zuellni/snac-24khz-ST",
- "pkg": {
- "0": {
- "snac": "SNAC"
- },
- "1": {
- "mlx_audio": "tts.generate.generate_audio"
- }
- },
- "file_256": [
- "e61ae2f638f56ee07a37592cd5a6a9e7d642560ddc78a76ee4a7f96d6922f1be",
- "973ee1be4032319fd9685ec54eee1b93e79c7bc98c786e67f17c04669714f11d"
- ],
- "layer_256": [
- "35ba9aa1feb931010559a178fcac243673d2efdd1396a4b69d406c9853a88300",
- "5a22c4707ed6c928043f23b59f2d102a579db3a9af41cf6e60d7c3958f182841"
- ],
- "layer_b3": [
- "18307b00460a64cc4893f9061592ce8d7e15b70fc54065cc8ae0f0155381ec46",
- "d599b1bb36dee3cee4674b7922fcd69e5ec05b74413f611d21cfdfdf8f9b6119"
- ]
- }
- },
- "info.gan.kokoro": {
- "*": {
- "repo": "hexgrad/Kokoro-82M",
- "pkg": {
- "0": {
- "kokoro": "KPipeline"
- },
- "1": {
- "mlx_audio": "tts.generate.generate_audio",
- "generation": {
- "audio_format": "wav",
- "join_audio": true,
- "verbose": false
- }
- }
- },
- "file_256": [
- "5a5cb3d87478f2e74dfca208ee52209ccfce024095e137097fd276026506e45f",
- "496dba118d1a58f5f3db2efc88dbdc216e0483fc89fe6e47ee1f2c53f18ad1e4"
- ],
- "layer_256": [
- "dbedf0e2115aa309b92689f86534be4a77b91d7900365e1717879fbb19b849f6",
- "2c68574571b3f9229e015a909788116ea2251142e29c1bd5c687863192124e8b"
- ],
- "layer_b3": [
- "3e9b5017cfe67a7804ac717b18b6add42ffc0bd3353490df2bcc520eaaef79b6",
- "379660a87a64524bab69a267e3d9580f04b5eec4f7e3fbd48c6597d164d9b17d",
- "997f154f5a78879ef3ba1a1556977c40b28b9c21076b8f583f752c57ecc36e932dc3dba29452b85ea85266084a6248f9e0efe642d5f75b43e64f25b9f2837f92"
- ]
- }
- },
- "info.stst.silero-vad": {
- "*": {
- "repo": "freddyaboulton/silero-vad",
- "pkg": {
- "0": {
- "onnx": "onnx"
- },
- "1": {
- "mlx_audio": "tts.generate.generate_audio",
- "generation": {
- "audio_format": "wav",
- "join_audio": true,
- "verbose": false
- }
- }
- },
- "file_256": [
- "591f853590d11ddde2f2a54f9e7ccecb2533a8af7716330e8adfa6f3849787a9"
- ],
- "layer_256": [
- "2ffef1834d5fe14ad8db58fc78d769d5dc38dda5eddbfc396786f74b326215fd"
- ],
- "layer_b3": [
- "41ca5931452b3ffee588c6c7e5bd327c4e914141604eaf3fd05f4a790ac83bb2",
- "7dc736cd5d840182792bde4edfbf5ddc5aeaf16826a9c72d1ba8166c1e3fab9b",
- "6e2c1bdbad74f56663ffb5710c7cb849a2b91ba331d81acdba47a21f69107434",
- "ab5ff443aece9171af5e7603d0b4309d3ecc934e3940ccedefff10f0b54b931e"
- ]
- }
- },
- "info.stst.wav2vec2-conformer-rope-960h-ft": {
- "*": {
- "repo": "facebook/wav2vec2-conformer-rope-large-960h-ft",
- "pkg": {
- "0": {
- "transformers": "Wav2Vec2ConformerForCTC"
- }
- },
- "file_256": [
- "97bb9761fb71ec1225100bc81ccf7d002e0d0ba3d0604c1fd2dbda7d7d491f1d"
- ],
- "layer_256": [
- "1afcfda68307a75caa1a1c4456cf97e20c7914e8aba828006e9fe17e8675a79d"
- ],
- "layer_b3": [
- "6c9c5642aa8dce62bcb3eb577bc519619a2d868005c767c5e65371c583a8a8eb"
- ],
- "tasks": [
- "Wav2Vec2ConformerForAudioFrameClassification",
- "Wav2Vec2ConformerForCTC",
- "Wav2Vec2ConformerForPreTraining",
- "Wav2Vec2ConformerForSequenceClassification",
- "Wav2Vec2ConformerForXVector",
- "Wav2Vec2ConformerModel",
- "Wav2Vec2ConformerPreTrainedModel"
- ]
- }
- },
- "info.art.orpheus-0-ft": {
- "*": {
- "repo": "canopylabs/orpheus-3b-0.1-ft",
- "pkg": {
- "0": {
- "orpheus_tts": "OrpheusModel",
- "generation": {
- "max_model_len": 2048
- }
- },
- "1": {
- "mlx_audio": "tts.generate.generate_audio",
- "generation": {
- "audio_format": "wav",
- "join_audio": true,
- "verbose": false
- }
- }
- }
- }
- },
- "info.art.outetts-0": {
- "*": {
- "repo": "OuteAI/OuteTTS-0.3-1B",
- "pkg": {
- "0": {
- "outetts": "InterfaceHF"
- },
- "1": {
- "mlx_audio": "tts.generate.generate_audio",
- "generation": {
- "audio_format": "wav",
- "join_audio": true,
- "verbose": false
- }
- }
- }
- }
- },
- "info.gan.speecht5-hifigan": {
- "*": {
- "file_256": [
- "d9dc6513c30a5b86c2497712690c04fe74b4aa79fdab6d490b34fcb4e24c590c"
- ],
- "layer_256": [
- "bd52b538e7ac05711be9321cfb7619d4056996ce32923c9c91ee02cf69154770"
- ],
- "layer_b3": [
- "85b5acdf29ad04c63f885383340d8e3445ae0055521f82cabb82bd09cfb9a956"
- ]
- }
- },
- "info.dit.flux1-dev": {
- "mystic": {
- "repo": "enhanceaiteam/Mystic",
- "pkg": {
- "0": {
- "generation": {
- "num_inference_steps": 16,
- "guidance_scale": 7.5,
- "width": 768,
- "height": 1024
- }
- }
- },
- "file_256": [
- "179d4000e44295f6dfadc0e4ac210146454724d46371b82657200ff9fb5c68a9",
- "48ca85274e3b67f07f70dd84b67725e62395c2f7b188394342716f783ea4c6ac"
- ],
- "layer_256": [
- "3942e6a52dbb0abaf63b031d9c4eda0df47576b51d4c81361978a3dc27b1309e"
- ],
- "layer_b3": [
- "91074aaebe1b5f3b2e7755d3c092af7eb240e92a192360690f1033949d3c8a68"
- ]
- },
- "flux1-lite": {
- "repo": "freepik/flux.1-lite-8b",
- "pkg": {
- "0": {
- "generation": {
- "num_inference_steps": 28
- }
- }
- },
- "file_256": [
- "09e970a7b8d1813ea7cacd48f9a944fd223882b137a8f4f3b61d864cdc20bbec",
- "de90e69945c2f4afcb9b6a057ce48190905c984370fce76b16ba3b97d46e2747"
- ],
- "layer_256": [
- "e1afe2f9b1ca55b3c659293cf3237f6b5571f5c4e826bad025ff0f7b54dc34ee"
- ],
- "layer_b3": [
- "9276fa4805efeb45c08cca32c5b51d490e57a2ce5c15ef476a8e468a509c5cdf"
- ]
- },
- "f-lite": {
- "repo": "freepik/f-lite",
- "pkg": {
- "0": {
- "f_lite": "FLitePipeline",
- "generation": {
- "num_inference_steps": 28
- }
- }
- }
- },
- "f-lite-texture": {
- "repo": "freepik/f-lite-texture",
- "pkg": {
- "0": {
- "f_lite": "FLitePipeline",
- "generation": {
- "num_inference_steps": 28
- }
- }
- }
- },
- "flux": {
- "repo": "TencentARC/flux-mini",
- "file_256": [
- "4236455adeaeb4ed444d63b253ec99805022d17e962ed7261ada9c72ce11cfee"
- ],
- "layer_256": [
- "e4a0d8cf2034da094518ab058da1d4aea14e00d132c6152a266ec196ffef02d0"
- ],
- "layer_b3": [
- "c1a6f83585398fe452d20596a79a522e2986f4c2c01a40e7bfd787af113735d3"
- ]
- },
- "flex2": {
- "repo": "ostris/Flex.2-preview",
- "file_256": [
- "0407108e446a4f57efffc5e7518bc374876af970d3c6068dc4074de0d221c615",
- "df168ba94d5f96c478b24604a6beedff6189047152190509c73c162ea0d8ec02"
- ],
- "layer_256": [
- "5063de856be5365807d12b47ef6919b4ac611a72651739b2b4050e113bed7a83"
- ],
- "layer_b3": [
- "7f85cdc186896da6965b57d5edb672f08663075d2b207f0e20e328c4034a8076"
- ]
- },
- "flex1-alpha": {
- "repo": "ostris/Flex.1-alpha",
- "file_256": [
- "5d6dce30a266ccbf530c3a3bf253cd5486720a8fb71cdeed556c28304201dc2f",
- "7acf8771b80a91eaa21566abe8c7d9d3ba33d8688e6e98446827749aee7ca1ee"
- ],
- "layer_256": [
- "a6b9af6efc25fa77cd24046b81ee66fea09a9987d2a8e56ffca9b7a1c9c9c519"
- ],
- "layer_b3": [
- "cb3d3edafd81651eefd62894b3572deb02c5304f4b5d4f7ab8654f1fb922ecd6"
- ]
- },
- "*": {
- "pkg": {
- "0": {
- "precision": "ops.precision.bfloat.B16",
- "generation": {
- "height": 1024,
- "width": 1024,
- "guidance_scale": 3.5,
- "num_inference_steps": 50,
- "max_sequence_length": 512
- }
- },
- "1": {
- "mflux": "flux.flux.Flux1",
- "generation": {
- "height": 1024,
- "width": 1024,
- "gudance": 3.5,
- "num_inference_steps": 25
- }
- }
- },
- "file_256": [
- "f6315581b7cddd450b9aba72b4e9ccf8b6580dc1a6b9538aff43ee26a1a3b6c2",
- "1b2170ac37156d4cf91909eb6834bb8adac84bc1fce8098a29cfb03738df84ad",
- "4610115bb0c89560703c892c59ac2742fa821e60ef5871b33493ba544683abd7",
- "d86a3038eacaa720682cb9b1da3c49fecf8a3ded605af4def6061eaa18903eb8",
- "b7d840eef01c27dfd72ae9143c261355a51bab3b2662263a6cb0059d55347c3d"
- ],
- "layer_b3": [
- "261559c8eaccae558f72621804a9ee188d338e45e2c622a58db709ac190198ba",
- "87f5d565c66e40eb02eb96498243ad81afcbf86192db99a4fc8fff215470320e",
- "e61d10a394902dadca9367467b2245070f651f4553ec4a96192fbba64e820acb"
- ],
- "layer_256": [
- "3db58cf834d2f81abb1e035131956da4c90451074c681d0db10810e55e60c2c4",
- "ddf1a34a06b355ce2bcd0f9beb0713450d9bcdc61a03a6bc37716361735e96f1",
- "ad8763121f98e28bc4a3d5a8b494c1e8f385f14abe92fc0ca5e4ab3191f3a881"
- ],
- "identifiers": [
- "double_blocks.12.txt_mod.lin.weight",
- "add_q_proj.weight",
- "single_transformer_blocks.9.norm.linear.weight"
- ],
- "tasks": [
- "Image",
- "Redux",
- "Kontext",
- "Depth",
- "Fill",
- "ConceptAttention",
- "ControlNet",
- "CavTon",
- "IC-Edit"
- ]
- }
- },
- "info.dit.wan2-flf2v-720p": {
- "diffusers": {
- "repo": "Wan-AI/Wan2.1-FLF2V-14B-720P-Diffusers",
- "file_256": [
- "",
- ""
- ],
- "layer_256": [
- ""
- ],
- "layer_b3": [
- ""
- ]
- }
- },
- "ops.patch.hidiffusion": {
- "stable-diffusion-xl-1": {
- "pkg": {
- "0": {
- "hidiffusion": {
- "apply_hidiffusion": {
- "timesteps": "StableDiffusionXLTimesteps"
- }
- },
- "generation": {
- "height": 2048,
- "width": 2048,
- "eta": 1.0,
- "guidance_scale": 7.5,
- "num_inference_steps": 10
- }
- }
- }
- }
- },
- "ops.scheduler.align-your-steps": {
- "stable-diffusion-xl-1": {
- "pkg": {
- "0": {
- "diffusers": "schedulers.scheduling_utils.AysSchedules",
- "generation": {
- "timesteps": "StableDiffusionXLTimesteps",
- "num_inference_steps": 10
- }
- }
- }
- }
- },
- "info.art.chameleon": {
- "lumina-mgpt-1024": {
- "repo": "Alpha-VLLM/Lumina-mGPT-7B-1024",
- "pkg": {
- "0": {
- "inference_solver": {
- "FlexARInferenceSolver": {
- "precision": "bf16",
- "target_size": 768
- }
- },
- "generation": {
- "images": [],
- "qas": [
- [
- "q1",
- null
- ]
- ],
- "max_gen_len": 8192,
- "temperature": 1.0
- }
- },
- "1": {
- "inference_solver": "ChameleonXLLMXForConditionalGeneration"
- }
- },
- "file_256": [
- "6b71408a7c574d98f00114ab770ac6addc71471770456e482e7b5ec641c02345",
- "1d5d8d5532bae0f32ba35d10d411e506d61e4378dc9fc338f2b1e6af2aa322ec",
- "a8fe636bbee30fef06dcd8e806ffc65b2aed0ad08a07fdc62f35717d0f851be5",
- "6420fa13483576d46263996627ba7add2237a01f46dedd3b7750112c0cc2d95b"
- ],
- "layer_256": [
- "eaa882db6a69cf8ed0104a15b2cdbbb570a23a06ab8c8f65f4c6c21719c6ba25"
- ],
- "layer_b3": [
- "6cd6b3caaea270feb5aff8e9fec205a27da4f48a1e740e63dc9a08f16e70a656"
- ]
- }
- },
- "info.vit.clip-vit-patch14": {
- "*": {
- "repo": "openai/clip-vit-large-patch14",
- "pkg": {
- "0": {
- "transformers": "CLIPTextModel"
- }
- },
- "file_256": [
- "cb0cba1ead482a850532ebe5ff6b5c8d4456aee32a5228acf0a31e7d9472415e",
- "39e79c916feca4ddf546d9fe923e664714b59ea61074f7228037d17c302f3d17",
- "893d67a23f4693ed42cdab4cbad7fe3e727cf59609c40da28a46b5470f9ed082",
- "778d02eb9e707c3fbaae0b67b79ea0d1399b52e624fb634f2f19375ae7c047c3",
- "660c6f5b1abae9dc498ac2d21e1347d2abdb0cf6c0c0c8576cd796491d9a6cdd",
- "71e183d11db0c6b6282a4d9e0abb74125edc8692393e89ed8ee5571005f35cb1",
- "5c3d6454dd2d23414b56aa1b5858a72487a656937847b6fea8d0606d7a42cdbc",
- "87c1c0b0894c9e9e10b962e597e8d64dd3a3a2d372c389922b335a53c250b2ae",
- "bd289dd57fee86bc8816b55919a2b03f9c3c75af6025e21777325a6730872325",
- "8377b1ca9d88fe06ec483dd7b3cfc62e5e8dbf8ddd252f455e79d659fa0553c5",
- "5487ea0eee9c9a9bff8abd097908d4deff3ae1fa87b3b67397f8b9538139d447",
- "92b998a9a64549bfa05c019bde114be6681549a0c79caee903fe30c9444d08b9",
- "1e090d6a828fd92401be5f83e615fd7b4fb1f4a22e9af9040a38f602e839317c",
- "11807cb2522cfe99240e5ee2bbeb1ccb42cecca2215102ee872567c7773b28b9",
- "d008943c017f0092921106440254dbbe00b6a285f7883ec8ba160c3faad88334",
- "77795e2023adcf39bc29a884661950380bd093cf0750a966d473d1718dc9ef4e",
- "b70c11ad5d7e9abf6109348908f599ea382f8019e1f36910bbc8ebecde936633",
- "fc42badf529dd83f2f7c3d20fe6bda1e22036162f37c4c668b9e130884e20561",
- "e27bafa0b3029ad637ef3ace24ce1efe85b8d0dbd22e03a2e70bda6fc88963a1"
- ],
- "layer_256": [
- "48daa3d8f939972e69f044533a4312a941971c18c78255f5e555fa26faf664c1",
- "60f5734a74c342be8b0011fc704e718431839790bcfdc7d7004fc39d70f7fec6",
- "6e76e25b4a55dddfa2eecf4b7ab189a8148658a9f6df165c00170f6ce661033c",
- "2d5249df489fec9137cc3a5e9bda499dd9b72a957ddd8e7ad4e99ff3684bad99",
- "3bf085e701713ed3e79775dafea375c3e2a43659ad1ee788b1b393c0aeff9f0e",
- "efb7976800692772e449c81a739339f59394886590ff3f768b0f9ddd87d2a94c",
- "9b0ac8d127c6c457b2eb8c7236f18c4e4ba9e8bbf27130aa8fe854d7c3f7b1e0",
- "24a9ee3d60cdde6c967f08e4b2ec7088fe1bfe308c6896e73caa874860570a5c",
- "5d6d9d0cc7943eb1b8c16862bfd5bee5c3766d0df027ec837e90fac715ac2bd3",
- "68fb122f7d6c3cfbef320341b2af8f5916678e36a69ed36fa8cfcb19e7d5c43d",
- "11807cb2522cfe99240e5ee2bbeb1ccb42cecca2215102ee872567c7773b28b9",
- "50c46cdddbe9f0162278c69b9a1f818519330e3a91b994272e19b5c789670471",
- "ffe1c4f55e07c2010ace7b9cf35798bb9f431bc954a32784e5acbdc16acc0364",
- "146ea48d234e05a934db9d8988e9a9dd86b2ac70f535eaa550ecb0ee23ec135e",
- "d97560cf9704cf71711f6121df2bf55e55a1eda4b574a6ddba074767420bc8c3"
- ],
- "layer_b3": [
- "f58a22a381f79985b6d38782f6110a52c2f319b40fdedd3b88b24945dfcbdf64",
- "8faa00b8fd1dbd9286a7237df18caeb8c91af100a6813849b6bae272a01dd7b7",
- "ab5bebc98299c155251a06deccde599ba0128038ee3ce021e8c59a45f58f72c0",
- "c70e9d86a9dcbbbe7c269ef9dfac96ce9c96c46922577338cc1902e5fe936315",
- "f285e9b7b70745df81adc8b558ec74b536b79b6fc02a453ecc61ea9d13f25f1a",
- "7ab17bfa06ab8d65840997ef641f3f593d096860e20141f1eeb0169d131c1c23",
- "2737d3f327e8176dbb549b9c5c4994821430a6c3b07e3bbc925d97511c802636",
- "58a826a4a5fe555b4df188a1ebc0d8d9c96cedae3a26ce84c247861dbb93388f",
- "1540fd8844898960e18ce8fd153e5f21a8c446bd8c4d6f536a7cf11418f02bf3",
- "c4c9caccdbec12b965d93688c521893f75e0bf9a5e0aad70a6a962b669e7b9d5",
- "e43fae8d5fd1e562607da172369cc0c5ec99b834e42502e682287ff7d12baacc",
- "c6f79f7416a882891957b815fbdfd6edfaa253c43970b1a25ef14e217599c7bc",
- "daf5e09f67ad09a909f58a01298fec0132324634cb8fca2a604c3a240c2c453f",
- "3f62bfb6bbde05f01435129326166c44aeb113ac0d9f735f31ed3f7dd04f6980",
- "22f866f3c96a92bc61e9965cf366d706db942ad047ba8cb82109edcd4e68fa40",
- "f3fa9d7a8f15741621c1fe82f8a1bcc5c601c900d947ac09fba7016615a252a5"
- ],
- "tasks": [
- "CLIPModel",
- "CLIPPreTrainedModel",
- "CLIPTextModel",
- "CLIPTextModelWithProjection",
- "CLIPVisionModel",
- "CLIPVisionModelWithProjection",
- "CLIPForImageClassification"
- ]
- }
- },
- "info.vit.clip-vit-g-14-laion-s-b": {
- "*": {
- "repo": "laion/CLIP-ViT-g-14-laion2B-s12B-b42K",
- "pkg": {
- "0": {
- "transformers": "CLIPTextModelWithProjection"
- }
- },
- "file_256": [
- "ca18e0c67c1ef1e64cac22926266765b60688f692307ecc06283d987c5768134",
- "ec310df2af79c318e24d20511b601a591ca8cd4f1fce1d8dff822a356bcdb1f4",
- "fa5b2e6f4c2efc2d82e4b8312faec1a5540eabfc6415126c9a05c8436a530ef4",
- "b84f413eebecbd049b72874c1df533a516510cb5a2489ae58c7e320209cf0ebe",
- "d3df577f6e3799c8e1bd9b40e30133710e02e8e25d0ce48cdcc790e7dfe12d6d",
- "943a2924ee888295a156dd47089d67181d633b782337890af11ef4b15af17ec5",
- "5b98e4a57a9292eeb819d67e2d2100f66f17db723cde4ecea27a7c3741160d0c",
- "4d6effa7a5e600cabf7528ed7234146a13ead1b2c151211d706b293a060b112a",
- "3a6032f63d37ae02bbc74ccd6a27440578cd71701f96532229d0154f55a8d3ff",
- "162042ac6556e73f93d4172d4c67532c1cbe4dc7a6a8fa7e44dd2e3d7cbb772b"
- ],
- "layer_256": [
- "270e998633eb22145100a3889a62ca270d5080654735e5ff8dda09a7c233af8d",
- "df18800c2a9d9318c4323d991a0fb24a6a9afceb41bea203812f60517c301536",
- "4c228b104f6b9b383e0808c9baa1998957f5125d8f90a4d98c1a86e71edd72dc",
- "f7fc81d8b5ae91ec28a5106ecc0d067be9a94fd3f394c4aa4686ed131ce5a5b3",
- "61ab42bd5c0fcb9fd3db1d4014cb844ccae8dc17fd69a108cf077a573d092946",
- "6c64e36cdda3bec7067e94b05619f882f5d31070792acaadac60ddbef580453a",
- "43c9e64995b485a7f128771c48defce128640df28e65c7f79537d472f43ebe46"
- ],
- "layer_b3": [
- "d754db276f2d89d2808abb7086b3b8eccee43ac521c128d21a071f3a631474a8",
- "2eb93685b34719e1d1e0541d8902b0a592d95848f80657e32816cf3b152a0f31",
- "e253a5cf3a6242c58037abd6b378bf0281f278e441f28dff7ca1bcfcd3cd6bd8",
- "16d0eec4e55b0aa63cdca4e4d36f78f66a4b1b9605ce3b1089305026f853c3d2",
- "f606463295ecf3bae8920d3d45bb9d180793418b3d08c3e84d4c4135c7dc2aa5",
- "7060993a5eb32d94d1ea8aef7a7301e7be73b199c639c63f8f7cfbfcd2abf10e",
- "b92af95334c657371af6051a91374a41b5455907fa6622bb66a8c112dc511600"
- ],
- "tasks": [
- "CLIPModel",
- "CLIPPreTrainedModel",
- "CLIPTextModel",
- "CLIPTextModelWithProjection",
- "CLIPVisionModel",
- "CLIPVisionModelWithProjection",
- "CLIPForImageClassification"
- ]
- }
- },
- "info.vit.clip-vit-h-14-laion-s-b": {
- "*": {
- "repo": "laion/CLIP-ViT-H-14-laion2B-s32B-b79K",
- "pkg": {
- "0": {
- "transformers": "CLIPModel"
- }
- },
- "file_256": [
- "036e6e2bd49697511f4f8b8cb5ee465f93025f7a69a145eadeb9a881ace9b18d",
- "0084e75319a50ad85ef45377bad5bc38f2f58824459eb690048d51c9f8863be5",
- "64a7ef761bfccbadbaa3da77366aac4185a6c58fa5de5f589b42a65bcc21f161"
- ],
- "layer_256": [
- "130a94ed12569e099196a6ca27388181922e20148dee5bcb58c5e309acfc2352",
- "cfdbd3fd2b90b64ba12d395a62dd7c3c3ea3e811f0a54593e91bae6516ca5061",
- "9125ce5970c649d6f9368c25493d3aaa6b41e224d4cc427e955115f7b7e53d1c"
- ],
- "layer_b3": [
- "227f26ed63120b9034f4a0c90b6b37eede721a8260f2c1e8f7ea3ccc0d109e7e",
- "3a38ffd1b60499cf2f451f3065079ff26efb9190a86f23ad1c8d993bbeb9af05",
- "ce06cf1fd684269ee96631b2bf9334c6ecde6a84a55760dfa0d9d2a6411f28e4"
- ],
- "tasks": [
- "CLIPModel",
- "CLIPPreTrainedModel",
- "CLIPTextModel",
- "CLIPTextModelWithProjection",
- "CLIPVisionModel",
- "CLIPVisionModelWithProjection",
- "CLIPForImageClassification"
- ]
- }
- },
- "info.aet.chatglm3": {
- "*": {
- "repo": "zai-org/chatglm3-6b",
- "pkg": {
- "0": {
- "transformers": "AutoModel"
- }
- },
- "file_256": [
- "0054d03310248928fdabdeef3fdc753170218dc49a1e9eb5f98323e27683f654",
- "b1052386eac358a18add3d0f92521c85ab338979da8eeb08a6499555b857f80d"
- ],
- "layer_256": [
- "174924fd7a07f370bb6fcd1ad07a73eecb7de901f15eefb80f420c1042c47d44"
- ],
- "layer_b3": [
- "a45dfba6a9fa8739777c76deb845fc9589b40f88670d3ce4661646a7b7b1d481"
- ]
- }
- },
- "info.art.qwen2": {
- "bagel-mot": {
- "repo": "ByteDance-Seed/BAGEL-7B-MoT",
- "pkg": {
- "0": {
- "Bagel": "app"
- }
- }
- }
- },
- "info.vae.tae": {
- "stable-diffusion-3": {
- "repo": "madebyollin/taesd3",
- "pkg": {
- "0": {
- "diffusers": "AutoencoderTiny"
- }
- },
- "file_256": [
- "6f79c1397cb9ce1dac363722dbe70147aee0ccca75e28338f8482fe515891399"
- ]
- },
- "stable-diffusion-xl-1": {
- "repo": "madebyollin/taesdxl",
- "pkg": {
- "0": {
- "diffusers": "AutoencoderTiny"
- }
- },
- "file_256": [
- "ff4824aca94dd6111e0340fa749347fb74101060d9712cb5ef1ca8f1cf17502f"
- ]
- },
- "stable-diffusion-v1-5": {
- "repo": "madebyollin/taesd",
- "pkg": {
- "0": {
- "diffusers": "AutoencoderTiny"
- }
- },
- "file_256": [
- "db169d69145ec4ff064e49d99c95fa05d3eb04ee453de35824a6d0f325513549"
- ]
- },
- "flux1-dev": {
- "repo": "madebyollin/taef1",
- "pkg": {
- "0": {
- "diffusers": "AutoencoderTiny"
- }
- },
- "file_256": [
- "927f7de7f11bbd3b2d5ce402e608d97a7649e0921a9601995b044e8efc81e449"
- ]
- }
- },
- "info.vae.kl": {
- "qwen-image": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLQwenImage"
- }
- },
- "file_256": [
- "0c8bc8b758c649abef9ea407b95408389a3b2f610d0d10fcb054fe171d0a8344"
- ],
- "layer_256": [
- "42f255440ef1d379a8a731456bc44312a73a8568716caa6100803990cd5ea7dc"
- ],
- "layer_b3": [
- "64af8fb08d2054c81ad2aef94965be8fb1366fcc6136cb9222ae046550af014b"
- ]
- },
- "ltx-video": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLLTXVideo"
- }
- },
- "file_256": [],
- "layer_256": [],
- "layer_b3": []
- },
- "allegro": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLAllegro"
- }
- },
- "file_256": [
- "47871a698b18f92f15019d361a81cbc8af4676f8eef9a47fd2b95354a39f831a"
- ],
- "layer_256": [
- "bfd496586118165a13243997101fc7cdd4f855b2d8a73ee2b771a4484c4c2f9f"
- ],
- "layer_b3": [
- "93654cbab7541504d2377c66e72943c7fd9947fca2eb1be01bcc8877c322c1e0"
- ]
- },
- "cosmos-1-diffusion-video2world": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLCosmos"
- }
- },
- "file_256": [],
- "layer_256": [],
- "layer_b3": []
- },
- "easyanimatev5-zh": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLMagvit"
- }
- },
- "file_256": [],
- "layer_256": [],
- "layer_b3": []
- },
- "hunyuanvideo-i2v": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLHunyuanVideo"
- }
- },
- "file_256": [
- "95d1fc707c1421ccd88ea542838ab4c5d45a5babb48205bac9ce0985525f9818",
- "7c68a6295f9034a88225fbafb1f3258291a08d57a1fdb938233fa57b1b8f4883",
- "fbe5ea338431bc8ba20f7019b474e83379fe5763abfd562adcc04b1c0d35c728",
- "019973c147e0c3462629d8d06bdbdbb83408f3ebd4ea4b4ae21a99c3cdcb54c0"
- ]
- },
- "mochi-1": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLMochi"
- }
- },
- "file_256": [],
- "layer_256": [],
- "layer_b3": []
- },
- "audioldm-s-v2": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKL"
- }
- },
- "file_256": [
- "42f64f7565b23eabde68c9694e39f18b8bba5f7a14f477e7ed4b51e0ea7de8a5"
- ],
- "layer_256": [
- "54d075953d5253a3abac651de070736c1d5510b857a8ab24c624304f428146b6"
- ],
- "layer_b3": [
- "00959677dae940b9cfdbe5380c8cbb5a6b4951864cd26f8211d74a3d22b4f3de"
- ]
- },
- "stable-video-diffusion-img2vid-xt": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLTemporalDecoder"
- }
- }
- },
- "stable-diffusion-xl-1": {
- "repo": "madebyollin/sdxl-vae-fp16-fix",
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKL"
- }
- },
- "file_256": [
- "235745af8d86bf4a4c1b5b4f529868b37019a10f7c0b2e79ad0abca3a22bc6e1",
- "1b909373b28f2137098b0fd9dbc6f97f8410854f31f84ddc9fa04b077b0ace2c",
- "78f6189c8492013e3cac81637a1f657f790a237387f8a9dfd6bfa5fee28eb646",
- "6353737672c94b96174cb590f711eac6edf2fcce5b6e91aa9d73c5adc589ee48",
- "bcb60880a46b63dea58e9bc591abe15f8350bde47b405f9c38f4be70c6161e68",
- "1598f3d24932bcfe6634e8b618ea1e30ab1d57f5aad13a6d2de446d2199f2341",
- "703abdcd7c389316b5128faa9b750a530ea1680b453170b27afebac5e4db30c4",
- "98a14dc6fe8d71c83576f135a87c61a16561c9c080abba418d2cc976ee034f88"
- ],
- "layer_256": [
- "c9399a4cd39a180a0bb2af96a8297b9330541e090c21e83317cebb2f7cc651da",
- "2240ae134a3b983abf45200c198f07e3d8068012fbbd2f658bbaa1fd6a0629c0"
- ],
- "layer_b3": [
- "bd5b356b509814025a9cf692710b87116d4fcd0e30a8232ed1db133e908d0e74",
- "9106380403dee83238af63ff1738396d2fdff9f6d78d0d9c1d0bf770ae4294d0"
- ]
- },
- "stable-diffusion-xl-1*": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKL"
- }
- },
- "file_256": [
- "235745af8d86bf4a4c1b5b4f529868b37019a10f7c0b2e79ad0abca3a22bc6e1",
- "27ed3b02e09638568e99d4398c67bc654dde04e6c0db61fb2d21dba630e7058a",
- "eb6516ab7e1104d5d1a174a4d65c57835ae38061531d0a2192103aecfb790cc1",
- "e6bb9ea85bbf7bf6478a7c6d18b71246f22e95d41bcdd80ed40aa212c33cfeff"
- ],
- "layer_256": [
- "c9399a4cd39a180a0bb2af96a8297b9330541e090c21e83317cebb2f7cc651da",
- "2240ae134a3b983abf45200c198f07e3d8068012fbbd2f658bbaa1fd6a0629c0"
- ],
- "layer_b3": [
- "bd5b356b509814025a9cf692710b87116d4fcd0e30a8232ed1db133e908d0e74"
- ]
- },
- "shuttle-jaguar": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKL"
- }
- },
- "file_256": [
- "6fdfa2add4f04d94f36157cbb0197f97966b612e3f8eff4095315aefea74b904"
- ],
- "layer_256": [
- "9b28f36873ea283905094a64e1ccb7cfc2b0f0aa166201d0ca63807ac37caa7b"
- ],
- "layer_b3": [
- "0ebf9b7010accc44e219e355dd24bf1e3128004093c0c1dfc06f88c0a39fdbdd",
- "d0e7ef3c4af06fa08b4c0485a073e2df55f7b1e9e3ba8f7b261688bc562568f0"
- ]
- },
- "flux1-dev": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKL"
- }
- },
- "file_256": [
- "afc8e28272cd15db3919bacdb6918ce9c1ed22e96cb12c4d5ed0fba823529e38",
- "f5b59a26851551b67ae1fe58d32e76486e1e812def4696a4bea97f16604d40a3",
- "8c717328c8ad41faab2ccfd52ae17332505c6833cf176aad56e7b58f2c4d4c94",
- "8f53304a79335b55e13ec50f63e5157fee4deb2f30d5fae0654e2b2653c109dc"
- ],
- "layer_256": [
- "7950e4f3897c75affaa5f9f3c51c88b4d9a27bfd9b05ad41c3f71d8c1c620b89",
- "79d2bfe93a2ac037cdc59ccb5576e32d00d75d4741fba49fc7e82b9724928216",
- "8f084dc91fd5b481875bc9c86a4ef05e5f176896b7d31c6a5c2ce45c2e174004",
- "322e01bd511e20bc2a3c27cd611f81ed85f0046b7c023b5622c2c9a5b8b34f80"
- ],
- "layer_b3": [
- "b6db93ed78c4a10d69e80831c1b8fbc1447f04e9b3d494889ee2056b98d41f17",
- "a8a3ebdec4d7b38d65b7169d3604c19b587330e5e66f69ebf0ded56a24ec6903"
- ]
- },
- "musicldm": {
- "file_256": [
- "16e0c6c7c34e459c19500cc15cf538e6331db14969ea15917caa9b0966e44fd4"
- ],
- "layer_256": [
- "1610c0ce39d1379091eb9ab2a4d14a8567e0f1a5dc6cca40fc0fa6f8e4e97c0f"
- ],
- "layer_b3": [
- "c5c32b3fb3e73799838836ccce27d883254254daecd10f86ba8ddc55214014e0"
- ]
- },
- "stable-diffusion-v1-5": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKL"
- }
- },
- "file_256": [
- "0b204ad0cae549e0a7e298d803d57e36363760dec71c63109c1da3e1147ec520",
- "95f26a5ab04779d5467d1fcecaf93160ffa523afe399b835b3e1bb77ff2d937a",
- "32db726da04f06c1b6b14c0043ce115cc87a501482945c5add89a40d838fcb46",
- "c6a580b13a5bc05a5e16e4dbb80608ff2ec251a162311590c1f34c013d7f3dab",
- "735e4c3a447a3255760d7f86845f09f937809baa529c17370d83e4c3758f3c75",
- "a1d993488569e928462932c8c38a0760b874d166399b14414135bd9c42df5815",
- "a2b5134f4dbc140d9c11f11cba3233099e00af40f262f136c691fb7d38d2194c",
- "4fbcf0ebe55a0984f5a5e00d8c4521d52359af7229bb4d81890039d2aa16dd7c"
- ],
- "layer_256": [
- "e43f3a227b5ecb43a6272fa92ed6011d2e9abcadadd1032dfa7ea7f875f9d5bd",
- "2494154245becf98891be884f943276aa3f54e9b3f0ea1042903fc15fba488f3"
- ],
- "layer_b3": [
- "82e2dc440a23d78bb91df8c9fce069a8512da51f8f54ea29e3431f545808171e",
- "2230487833925a104bee96e7ecfebaa4c3c43cc426c7a5b863f2584313dd4833"
- ]
- }
- },
- "info.vae.wan": {
- "wan2-i2v-480p": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLWan",
- "precision": "ops.precision.float.F32"
- }
- },
- "file_256": [
- "d6e524b3fffede1787a74e81b30976dce5400c4439ba64222168e607ed19e793",
- "2fc39d31359a4b0a64f55876d8ff7fa8d780956ae2cb13463b0223e15148976b"
- ],
- "layer_256": [
- "121b3974b39263dcca9d644d1b5c9b9251a911b6a8a8e307fcb21ca778e78ed2",
- "364be43a8959012d798d3f98e17d8b5c4b99ba1e70077008dd19acca3ced395e"
- ],
- "layer_b3": [
- "f867543d636029ebfc05b8075e572be0b313a83b0470e56bcf4bbad07a6db010",
- "6b5b229727a2d4e37993687c62c94ff8519a371ab4103c699ff1f5969ca0b433"
- ]
- },
- "skyreels-v2-t2v-720p": {
- "file_256": [],
- "layer_256": [],
- "layer_b3": []
- },
- "skyreels-v2-i2v-720p": {
- "file_256": [],
- "layer_256": [],
- "layer_b3": []
- }
- },
- "info.vae.cogvideox": {
- "cogvideox-i2v": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLCogVideoX"
- }
- },
- "file_256": [
- "a410e48d988c8224cef392b68db0654485cfd41f345f4a3a81d3e6b765bb995e"
- ],
- "layer_256": [
- "43c7e9cb4364e55fd563817f01484ede8a09ff19a8e69eb61a32a12f93d6f66e"
- ],
- "layer_b3": [
- "246addb8dc798240638bffee4546a3c5c83572139b4a2a602d68b4c4146226eb"
- ]
- },
- "cogvideox-fun-v-pose": {
- "file_256": [],
- "layer_256": [],
- "layer_b3": []
- },
- "consisid": {
- "file_256": [],
- "layer_256": [],
- "layer_b3": []
- }
- },
- "info.vae.dc": {
- "sana-1024px-bf16": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderDC"
- }
- },
- "file_256": [
- "15a4b09e56d95b768a0ec9da50b702e21d920333fc9b3480d66bb5c7fad9d87f"
- ],
- "layer_256": [
- "abfc39d1a6d71f03dde7bc40fec4a90478a97d17ae1688be9aad00e0512b9bde"
- ],
- "layer_b3": [
- "cf4ecc6697d18b0663e4eac58203f1dd6d9fb689cf99adfeadbc0019de0c73d0"
- ]
- }
- },
- "info.vae.oobleck": {
- "stable-audio-open-1": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderOobleck"
- }
- }
- }
- },
- "info.vae.eq": {
- "stable-diffusion-xl-1": {
- "repo": "KBlueLeaf/EQ-SDXL-VAE",
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKL"
- }
- }
- }
- },
- "info.vae.ms-lc-eq": {
- "stable-diffusion-xl-1": {
- "repo": "Anzhc/MS-LC-EQ-D-VR_VAE",
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKL"
- }
- }
- }
- }
-}
\ No newline at end of file
diff --git a/mir/spec/__init__.py b/mir/spec/__init__.py
index 618a5cc..4e29e96 100644
--- a/mir/spec/__init__.py
+++ b/mir/spec/__init__.py
@@ -1,18 +1,14 @@
# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
#
-
import urllib.parse
from collections import defaultdict
from dataclasses import dataclass
-from logging import INFO, Logger
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, TypeVar, Union
from pydantic import BaseModel, create_model
-nfo_obj = Logger(INFO)
-nfo = nfo_obj.info
T = TypeVar("T")
@@ -188,7 +184,7 @@ def __init__(self, series: str) -> None:
self.compatibility = defaultdict(dict)
self.flat_dict = defaultdict(dict)
- def add_compat(self, compat_label: str, compat_obj: Dict[str, int | float | list | str]) -> None:
+ def add_compat(self, compat_label: str, compat_obj: Dict[str, Any]) -> None:
"""Add compatibility: Attribute an object to a sub-class of the Series"""
self.compatibility[compat_label] = compat_obj
@@ -278,7 +274,7 @@ def to_dict(self) -> Dict[str, Any]:
return self.flat_dict
-def mir_entry(domain: str, arch: str, series: str, comp: str, **kwargs) -> None:
+def mir_entry(domain: str, arch: str, series: str, comp: str, **kwargs) -> dict[str, Any]:
"""Define a new Machine Intelligence Resource\n
:param domain: Broad name of the type of data (model/ops/info/dev)
:param arch: Common name of the neural network structure being referenced
@@ -297,18 +293,10 @@ def mir_entry(domain: str, arch: str, series: str, comp: str, **kwargs) -> None:
return domain_inst.to_dict()
-# def create_model_tag(model_header,metadata_dict):
-# parse_file = parse_model_header(model_header)
-# reconstructed_file_path = os.path.join(disk_path,each_file)
-# attribute_dict = metadata_dict | {"disk_path": reconstructed_file_path}
-# file_metadata = parse_file | attribute_dict
-# index_tag = create_model_tag(file_metadata)
-#
-
-
def main():
"""Add a single entry to MIR database\n"""
import argparse
+
from mir.maid import MIRDatabase
parser = argparse.ArgumentParser(
@@ -337,15 +325,19 @@ def main():
parser.add_argument("-a", "--arch", type=str, help=" Common name of the neural network structure being referenced")
parser.add_argument("-s", "--series", type=str, help="Specific release title or technique")
parser.add_argument("-c", "--comp", "--compatibility", type=str, help="Details about purpose, tasks")
- parser.add_argument(
- "-k", "--kwargs", "--keyword-arguments", type=dict[str | int, str | int | dict | list], help="Keyword arguments to pass to function constructors (default: NOne)"
- )
+ parser.add_argument("-k", "--kwargs", "--keyword-arguments", help="Keyword arguments to pass to function constructors (default: None)")
args = parser.parse_args()
mir_db = MIRDatabase()
mir_db.add(
- mir_entry(domain=args.domain, arch=args.arch, series=args.series, comp=args.compatibility, **args.kwargs),
+ mir_entry(
+ domain=args.domain,
+ arch=args.arch,
+ series=args.series,
+ comp=args.compatibility,
+ **args.kwargs,
+ ),
)
mir_db.write_to_disk()
diff --git a/mir/spec/versions.json b/mir/spec/regex.json
similarity index 68%
rename from mir/spec/versions.json
rename to mir/spec/regex.json
index 0fe7908..4430d31 100644
--- a/mir/spec/versions.json
+++ b/mir/spec/regex.json
@@ -1,4 +1,7 @@
{
+ "breaking": ".*(?:-)(prior)$|.*(?:-)(diffusers)$|.*[_-](\\d{3,4}px|-T2V$|-I2V$)",
+ "search": "\\d+[._-]?\\d+[BbMmKk](it)?|[._-]\\d+[BbMmKk](it)?",
+ "parameters": "(\\d{1,4}[KkMmBb]|[._-]\\d+[\\._-]\\d+[Bb][._-]).*?$",
"semantic": [
"-?\\d+[bBmMkK]",
"-?v\\d+",
@@ -8,7 +11,7 @@
"-large$",
"-medium$"
],
- "suffixes": [
+ "suffix": [
"-\\d{1,2}[bBmMkK]",
"-\\d[1-9][bBmMkK]",
"-v\\d{1,2}",
diff --git a/mir/tag.py b/mir/tag.py
index 3c1fec4..6cb4d16 100644
--- a/mir/tag.py
+++ b/mir/tag.py
@@ -2,7 +2,7 @@
#
from typing import Any
-from mir.config.constants import PARAMETERS_SUFFIX, BREAKING_SUFFIX, ClassMapEntry
+from mir import PARAMETERS, BREAKING, SEARCH
def tag_model_from_repo(repo_title: str, decoder=False, data: dict | None = None) -> tuple[str, Any]:
@@ -28,14 +28,14 @@ def tag_model_from_repo(repo_title: str, decoder=False, data: dict | None = None
parts = repo_title.split("_")
subtraction_prefixes = r"\d.b-|\-rl|tiny|large|mlx|onnx|gguf|medium|base|multimodal|mini|instruct|full|:latest|preview|small|pro|beta|hybrid|plus|dpo|community"
- pattern_2 = re.compile(PARAMETERS_SUFFIX)
+ pattern_2 = re.compile(PARAMETERS)
clean_parts = [re.sub(pattern_2, "", segment.lower()) for segment in parts]
cleaned_string = "-".join([x for x in clean_parts if x])
cleaned_string = re.sub(subtraction_prefixes, "", cleaned_string)
cleaned_string = re.sub("-it", "", cleaned_string.replace("-bit", "")).replace("--", "-")
cleaned_string = cleaned_string.replace("-b-", "")
# print(cleaned_string)
- suffix_match = re.findall(BREAKING_SUFFIX, cleaned_string) # Check for breaking suffixes first
+ suffix_match = re.findall(BREAKING, cleaned_string) # Check for breaking suffixes first
if suffix_match:
suffix = next(iter(suffix for suffix in suffix_match[0] if suffix))
cleaned_string = re.sub(suffix.lower(), "-", cleaned_string).rstrip("-,")
@@ -43,116 +43,3 @@ def tag_model_from_repo(repo_title: str, decoder=False, data: dict | None = None
suffix = root
cleaned_string = re.sub(r"[._]+", "-", cleaned_string.lower()).strip("-_")
return (cleaned_string, suffix)
-
-
-def tag_scheduler(series_name: str) -> tuple[str, str]:
- """Create a mir label from a scheduler operation\n
- :param class_name: Known period-separated prefix and model type
- :return: The assembled mir tag with compatibility pre-separated"""
-
- import re
-
- comp_name = None
- patterns = [r"Schedulers", r"Multistep", r"Solver", r"Discrete", r"Scheduler"]
- for scheduler in patterns:
- compiled = re.compile(scheduler)
- match = re.search(compiled, series_name)
- if match:
- comp_name = match.group()
- comp_name = comp_name.lower()
- break
- for pattern in patterns:
- series_name = re.sub(pattern, "", series_name)
- series_name.lower()
- assert series_name is not None
- assert comp_name is not None
- return series_name, comp_name
-
-
-def mir_prefix_from_forward_pass(transformers: bool = False, **kwargs):
- """Set type of MIR prefix depending on model type\n
- :param transformers: Use transformers data instead of diffusers data, defaults to False
- :raises ValueError: Model type not detected
- :return: MIR prefix based on model configuration"""
- from mir.config.json_io import read_json_file
-
- data = read_json_file("mir/spec/template.json")
-
- if transformers:
- flags = data["arch"]["transformer"] # pylint:disable=unsubscriptable-object
- else:
- flags = data["arch"]["diffuser"] # pylint:disable=unsubscriptable-object
- for mir_prefix, key_match in flags.items():
- if any(kwargs.get(param, None) for param in key_match):
- return mir_prefix
- return None
-
-
-def tag_base_model(repo_path: str, class_name: str, addendum: dict | None = None) -> tuple[str, str, str | dict[str, dict]]:
- """Convert model repo paths to MIR tags, classifying by feature\n
- :param name: Repo path
- :param class_name: The HF transformers class for the model
- :return: A segmented MIR tag useful for appending index entries"""
-
- from mir.config.constants import extract_init_parameters
-
- annotations = extract_init_parameters(class_name.replace("Model", "Config"), "transformers")
- if not annotations:
- class_name = class_name.replace("Config", "Model")
- annotations = extract_init_parameters(class_name, "transformers")
- if not annotations:
- raise TypeError("No mode type returned")
- mir_prefix = mir_prefix_from_forward_pass(True, **annotations)
- base_series, base_comp = tag_model_from_repo(repo_path)
- if not addendum:
- return mir_prefix, base_series, base_comp
- else:
- mir_prefix = f"info.{mir_prefix}"
- return mir_prefix, base_series, {base_comp: addendum}
-
-
-def tag_pipe(repo_path: str, class_name: str, addendum: dict) -> tuple:
- """Convert model repo pipes to MIR tags, classifying by feature\n
- :param name: Repo path
- :param class_name: The HF Diffusers class for the model
- :return: A segmented MIR tag useful for appending index entries"""
-
- from mir.indexers import create_pipe_entry
-
- mir_series, mir_data = create_pipe_entry(repo_path=repo_path, class_name=class_name)
- mir_prefix, mir_series = mir_series.rsplit(".", 1)
- mir_comp = list(mir_data)[0]
- return mir_prefix, mir_series, {mir_comp: addendum}
-
-
-def mir_tag_from_config(class_map: ClassMapEntry, repo_path: str) -> tuple[str, str, str]:
- """Change a transformers config class into a MIR series and comp
- :param class_map: Transformers class information extracted from dependency"""
-
- mir_prefix = mir_prefix_from_forward_pass(transformers=True, **class_map.config_params)
- if not mir_prefix:
- if class_map.model_params:
- if mir_prefix := mir_prefix_from_forward_pass(transformers=True, **class_map.model_params):
- pass
- else:
- raise ValueError(f"Unable to determine MIR prefix from {class_map, repo_path}")
- else:
- raise ValueError(f"Unrecognized model type, no tag matched {class_map.name} with {class_map.config_params} or {class_map.model_params}")
- mir_prefix = "info." + mir_prefix
- if class_map.name != "funnel":
- mir_suffix, mir_comp = tag_model_from_repo(repo_path)
- else:
- mir_suffix, mir_comp = ["funnel", "*"]
- mir_series = mir_prefix + "." + mir_suffix
- return mir_series, mir_comp, mir_suffix
-
-
-# def tag_mlx_model(repo_path: str, class_name: str, addendum: dict) -> tuple[str]:
-# dev_series, dev_comp = make_mir_tag("black-forest-labs/FLUX.1-dev")
-# schnell_series, schnell_comp = make_mir_tag("black-forest-labs/FLUX.1-schnell")
-# series, comp = make_mir_tag(repo_path)
-# if class_name == "Flux1":
-# mir_prefix = "info.dit"
-# base_series = dev_series
-# mir_comp = series
-# return mir_prefix, base_series, {base_comp: addendum}
diff --git a/pyproject.toml b/pyproject.toml
index 580736e..4c33193 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -39,7 +39,7 @@ Homepage = "https://github.com/darkshapes/MIR"
Documentation = "https://github.com/darkshapes/sdbx/wiki"
[project.scripts]
-mir = "mir.__init__:main"
+mir = "mir.generate.__main__:main"
[tool.setuptools_scm]
version_scheme = "guess-next-dev"
diff --git a/tests/test_deconstructors_root.py b/tests/test_deconstructors_root.py
index c8e01ad..67a0bed 100644
--- a/tests/test_deconstructors_root.py
+++ b/tests/test_deconstructors_root.py
@@ -2,17 +2,17 @@
# # #
import pytest
-from mir.inspect.classes import extract_init_params
+from mir.config.constants import extract_init_parameters
def test_root_class_with_builtin_types():
class DummyInitModule:
- def __init__(self, flag: bool, count: int):
+ def __init__(self):
pass
expected_output = {}
- result = extract_init_params(DummyInitModule)
+ result = extract_init_parameters(DummyInitModule)
assert result == expected_output
diff --git a/tests/test_seek_class.py b/tests/test_seek_class.py
index 28f847c..4d3a1de 100644
--- a/tests/test_seek_class.py
+++ b/tests/test_seek_class.py
@@ -10,7 +10,8 @@ def test_seek_diffusers_path():
def test_seek_transformers_path():
- assert get_class_parent_folder(import_submodules("AlbertModel", "transformers"), "transformers") == ["transformers", "models", "albert"]
+ module = import_submodules("AlbertModel", "transformers")
+ assert get_class_parent_folder(module, "transformers") == ["transformers", "models", "albert"]
def test_seek_class_attention():
diff --git a/tests/test_taskanalyzer.py b/tests/test_taskanalyzer.py
index 4161da7..77adb96 100644
--- a/tests/test_taskanalyzer.py
+++ b/tests/test_taskanalyzer.py
@@ -10,17 +10,16 @@
from mir.inspect.tasks import TaskAnalyzer
-
def test_show_transformers_tasks_by_code_name():
"""Test that show_transformers_tasks returns a list of class names when code_name is provided."""
tasks = TaskAnalyzer.show_transformers_tasks(code_name="bert")
-
+
# Should return a list (not a type object)
- assert isinstance(tasks, list), f"Expected list, got {type(tasks)}"
-
+ assert isinstance(tasks, list), f"Expected list, got {tasks} type {type(tasks)}"
+
# Should contain string class names
if tasks:
- assert all(isinstance(task, str) for task in tasks), f"Expected list of strings, got {tasks}"
+ assert all(isinstance(task, str) for task in tasks), f"Expected list of strings, got {tasks} type {type(tasks)}"
print(f"show_transformers_tasks('bert') returned: {tasks}")
@@ -178,6 +177,7 @@ def test_show_diffusers_tasks():
# assert tasks == ["DummyClass"]
+
@pytest.mark.asyncio
async def test_trace_finds_map_with_code_name():
ap = TaskAnalyzer()
From 5143276a2c59f80a7122c9fc55a4ab0b4aaf2e13 Mon Sep 17 00:00:00 2001
From: exdysa <91800957+exdysa@users.noreply.github.com>
Date: Tue, 13 Jan 2026 22:03:55 -0500
Subject: [PATCH 06/16] -rewrite
---
MIR.egg-info/PKG-INFO | 7 +-
MIR.egg-info/SOURCES.txt | 53 +-
MIR.egg-info/entry_points.txt | 2 +-
MIR.egg-info/requires.txt | 1 +
mir.json | 14941 ----------------
mir/__init__.py | 1 +
{data => mir/data}/__init__.py | 2 +-
{data => mir/data}/diffusers_adds.json | 2 +-
{data => mir/data}/exclusions.json | 0
{data => mir/data}/migrations.json | 34 +-
{data => mir/data}/nn_filter.json | 0
{data => mir/data}/parameters.json | 10 +-
.../data/pipe_markers.json | 0
{data => mir/data}/tag_scrape.json | 0
{data => mir/data}/transformers_adds.json | 0
mir/generate/.notes.txt | 66 -
mir/generate/__main__.py | 118 +
mir/generate/diffusers/doc_parse.py | 33 +-
mir/generate/from_module.py | 41 +-
mir/generate/indexers.py | 29 +-
mir/generate/transformers/__init__.py | 25 +-
mir/generate/transformers/index.py | 324 +-
mir/generate/transformers/raw_data.py | 66 +
mir/generate/transformers/tokenizers.py | 24 +
mir/generate/write_to_mir.py | 31 -
mir/maid.py | 153 +-
{data => mir}/mir.json | 0
mir/spec/docstring_patterns.json | 41 -
mir/spec/missing_params.json | 73 -
mir/spec/repo_migrations.json | 29 -
mir/tag.py | 6 +-
pyproject.toml | 1 +
uv.lock | 25 +
33 files changed, 504 insertions(+), 15634 deletions(-)
delete mode 100644 mir.json
rename {data => mir/data}/__init__.py (90%)
rename {data => mir/data}/diffusers_adds.json (99%)
rename {data => mir/data}/exclusions.json (100%)
rename {data => mir/data}/migrations.json (68%)
rename {data => mir/data}/nn_filter.json (100%)
rename {data => mir/data}/parameters.json (74%)
rename data/prefixes.json => mir/data/pipe_markers.json (100%)
rename {data => mir/data}/tag_scrape.json (100%)
rename {data => mir/data}/transformers_adds.json (100%)
delete mode 100644 mir/generate/.notes.txt
create mode 100644 mir/generate/transformers/raw_data.py
create mode 100644 mir/generate/transformers/tokenizers.py
delete mode 100644 mir/generate/write_to_mir.py
rename {data => mir}/mir.json (100%)
delete mode 100644 mir/spec/docstring_patterns.json
delete mode 100644 mir/spec/missing_params.json
delete mode 100644 mir/spec/repo_migrations.json
diff --git a/MIR.egg-info/PKG-INFO b/MIR.egg-info/PKG-INFO
index d98b3d3..ba67201 100644
--- a/MIR.egg-info/PKG-INFO
+++ b/MIR.egg-info/PKG-INFO
@@ -33,6 +33,7 @@ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
Requires-Python: >=3.11
Description-Content-Type: text/markdown
License-File: LICENSE
+Requires-Dist: chanfig>=0.0.114
Requires-Dist: diffusers>=0.35.2
Requires-Dist: ftfy>=6.3.1
Requires-Dist: huggingface-hub[hf-xet]>=1.1.7
@@ -139,14 +140,14 @@ Meant to be created by standards community, derived from code and file analysis
| ART
| Autoregressive Transformer |
| BRNN
| Bi-directional Recurrent Neural Network |
| CNN
| Convolutional Neural Network |
-| CONTROLNET
| Controlnet |
+| CONTROLNET
| ControlNet |
| DETR
| Detection Transformer |
| GAN
| Generative Adversarial Model |
-| GRU
| Gated recurrent unit |
+| GRU
| Gated Recurrent Unit |
| LORA
| Low-Rank Adaptation |
| LSTM
| Long Short-Term Memory |
| MOE
| Mixture of Experts |
-| RBM
| Restricted Boltzmann machine |
+| RBM
| Restricted Boltzmann Machine |
| RCNN
| Region-based Convolutional Neural Network |
| RESNET
| Residual Network |
| RNN
| Recurrent Neural Network |
diff --git a/MIR.egg-info/SOURCES.txt b/MIR.egg-info/SOURCES.txt
index dea9843..e7d1cc2 100644
--- a/MIR.egg-info/SOURCES.txt
+++ b/MIR.egg-info/SOURCES.txt
@@ -13,29 +13,42 @@ MIR.egg-info/entry_points.txt
MIR.egg-info/requires.txt
MIR.egg-info/top_level.txt
mir/__init__.py
-mir/__main__.py
-mir/automata.py
-mir/doc_parser.py
-mir/indexers.py
+mir/json_io.py
mir/maid.py
mir/mir.json
mir/tag.py
-mir/config/__init__.py
-mir/config/console.py
-mir/config/constants.py
-mir/config/conversion.py
-mir/config/json_io.py
-mir/inspect/__init__.py
-mir/inspect/classes.py
-mir/inspect/metadata.py
-mir/inspect/parenting.py
-mir/inspect/pipes.py
-mir/inspect/tasks.py
-mir/spec/docstring_patterns.json
-mir/spec/mir.py
-mir/spec/modes.json
-mir/spec/template.json
-mir/spec/versions.json
+mir/data/__init__.py
+mir/data/diffusers_adds.json
+mir/data/exclusions.json
+mir/data/migrations.json
+mir/data/nn_filter.json
+mir/data/parameters.json
+mir/data/pipe_markers.json
+mir/data/tag_scrape.json
+mir/data/transformers_adds.json
+mir/generate/__init__.py
+mir/generate/__main__.py
+mir/generate/_extras.py
+mir/generate/automata.py
+mir/generate/from_module.py
+mir/generate/indexers.py
+mir/generate/tasks.py
+mir/generate/diffusers/__init__.py
+mir/generate/diffusers/attention.py
+mir/generate/diffusers/doc_parse.py
+mir/generate/diffusers/guiders.py
+mir/generate/diffusers/index.py
+mir/generate/diffusers/schedulers.py
+mir/generate/mlx/__init__.py
+mir/generate/mlx/index.py
+mir/generate/torch/__init__.py
+mir/generate/torch/dtypes.py
+mir/generate/transformers/__init__.py
+mir/generate/transformers/index.py
+mir/generate/transformers/raw_data.py
+mir/generate/transformers/tokenizers.py
+mir/spec/__init__.py
+mir/spec/regex.json
tests/test_class_parent.py
tests/test_deconstructors_root.py
tests/test_doc_parser.py
diff --git a/MIR.egg-info/entry_points.txt b/MIR.egg-info/entry_points.txt
index e057fe6..cf321fe 100644
--- a/MIR.egg-info/entry_points.txt
+++ b/MIR.egg-info/entry_points.txt
@@ -1,2 +1,2 @@
[console_scripts]
-mir = mir.__init__:main
+mir = mir.generate.__main__:main
diff --git a/MIR.egg-info/requires.txt b/MIR.egg-info/requires.txt
index 089ac9c..5f1d4be 100644
--- a/MIR.egg-info/requires.txt
+++ b/MIR.egg-info/requires.txt
@@ -1,3 +1,4 @@
+chanfig>=0.0.114
diffusers>=0.35.2
ftfy>=6.3.1
huggingface-hub[hf-xet]>=1.1.7
diff --git a/mir.json b/mir.json
deleted file mode 100644
index c897555..0000000
--- a/mir.json
+++ /dev/null
@@ -1,14941 +0,0 @@
-{
- "info.controlnet.sd-controlnet-canny": {
- "*": {
- "repo": "lllyasviel/sd-controlnet-canny",
- "pkg": {
- "0": {
- "diffusers": "ControlNetModel"
- }
- }
- }
- },
- "info.controlnet.blipdiffusion-controlnet": {
- "*": {
- "repo": "Salesforce/blipdiffusion-controlnet",
- "pkg": {
- "0": {
- "diffusers": "BlipDiffusionControlNetPipeline"
- }
- }
- }
- },
- "info.controlnet.control-v11p-sd15-inpaint": {
- "*": {
- "repo": "lllyasviel/control_v11p_sd15_inpaint",
- "pkg": {
- "0": {
- "diffusers": "ControlNetModel"
- }
- }
- }
- },
- "info.controlnet.controlnet-canny-sdxl-1": {
- "*": {
- "repo": "diffusers/controlnet-canny-sdxl-1.0",
- "pkg": {
- "0": {
- "diffusers": "ControlNetModel"
- }
- }
- }
- },
- "info.controlnet.controlnet-depth-sdxl-1": {
- "*": {
- "repo": "diffusers/controlnet-depth-sdxl-1.0-small",
- "pkg": {
- "0": {
- "diffusers": "ControlNetModel"
- }
- }
- }
- },
- "info.controlnet.stable-diffusion-xl-1": {
- "*": {
- "repo": "stabilityai/stable-diffusion-xl-base-1.0",
- "pkg": {
- "0": {
- "diffusers": "StableDiffusionXLControlNetUnionInpaintPipeline"
- }
- }
- }
- },
- "info.controlnet.controlnet-union-sdxl-1": {
- "*": {
- "repo": "xinsir/controlnet-union-sdxl-1.0",
- "pkg": {
- "0": {
- "diffusers": "ControlNetUnionModel"
- }
- }
- }
- },
- "info.controlnet.sd3-controlnet-canny": {
- "*": {
- "repo": "InstantX/SD3-Controlnet-Canny",
- "pkg": {
- "0": {
- "diffusers": "SD3ControlNetModel"
- }
- }
- }
- },
- "info.controlnet.sd3-controlnet-inpainting": {
- "*": {
- "repo": "alimama-creative/SD3-Controlnet-Inpainting",
- "pkg": {
- "0": {
- "diffusers": "SD3ControlNetModel"
- }
- }
- }
- },
- "info.controlnet.testing-conrolnetxs-sd2-canny": {
- "*": {
- "repo": "UmerHA/Testing-ConrolNetXS-SD2.1-canny",
- "pkg": {
- "0": {
- "diffusers": "ControlNetXSAdapter"
- }
- }
- }
- },
- "info.controlnet.testing-conrolnetxs-sdxl-canny": {
- "*": {
- "repo": "UmerHA/Testing-ConrolNetXS-SDXL-canny",
- "pkg": {
- "0": {
- "diffusers": "ControlNetXSAdapter"
- }
- }
- }
- },
- "info.unet.marigold-depth-v1-1": {
- "*": {
- "repo": "prs-eth/marigold-depth-v1-1",
- "pkg": {
- "0": {
- "diffusers": "MarigoldDepthPipeline"
- }
- },
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "scheduler": [
- [
- "ops.scheduler.ddim",
- "scheduler"
- ],
- [
- "ops.scheduler.lcm",
- "scheduler"
- ]
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "marigold-depth-v1-1"
- ]
- }
- }
- },
- "info.unet.marigold-iid-appearance-v1-1": {
- "*": {
- "repo": "prs-eth/marigold-iid-appearance-v1-1",
- "pkg": {
- "0": {
- "diffusers": "MarigoldIntrinsicsPipeline"
- }
- },
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "scheduler": [
- [
- "ops.scheduler.ddim",
- "scheduler"
- ],
- [
- "ops.scheduler.lcm",
- "scheduler"
- ]
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "marigold-iid-appearance-v1-1"
- ]
- }
- }
- },
- "info.unet.marigold-normals-v1-1": {
- "*": {
- "repo": "prs-eth/marigold-normals-v1-1",
- "pkg": {
- "0": {
- "diffusers": "MarigoldNormalsPipeline"
- }
- },
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "scheduler": [
- [
- "ops.scheduler.ddim",
- "scheduler"
- ],
- [
- "ops.scheduler.lcm",
- "scheduler"
- ]
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "marigold-normals-v1-1"
- ]
- }
- }
- },
- "info.unet.stable-diffusion-v1-5": {
- "*": {
- "repo": "stable-diffusion-v1-5/stable-diffusion-v1-5",
- "pkg": {
- "0": {
- "diffusers": "StableDiffusionPipeline"
- }
- },
- "identifiers": [
- "up_blocks.3.attentions.0.transformer_blocks.0.norm3.weight"
- ],
- "file_256": [
- "6ce0161689b3853acaa03779ec93eafe75a02f4ced659bee03f50797806fa2fa",
- "1a189f0be69d6106a48548e7626207dddd7042a418dbf372cefd05e0cdba61b6",
- "e1441589a6f3c5a53f5f54d0975a18a7feb7cdf0b0dee276dfc3331ae376a053",
- "cc6cb27103417325ff94f52b7a5d2dde45a7515b25c255d8e396c90014281516",
- "19da7aaa4b880e59d56843f1fcb4dd9b599c28a1d9d9af7c1143057c8ffae9f1",
- "cd1b6db09a81cb1d39fbd245a89c1e3db9da9fe8eba5e8f9098ea6c4994221d3",
- "c83908253f9a64d08c25fc90874c9c8aef9a329ce1ca5fb909d73b0c83d1ea21"
- ],
- "layer_b3": [
- "909c6ff3192ab2767e789a6125865bc23163db467ab78b1c633bad46a4293fad",
- "b52807536902cabbf84f99e4fa2f8713fb4ef77e739f06367ee0d486e3222faa",
- "d31382d71a1044b636d80d861a2b4dbca51826bed34d34b5c14608b7679ccefd",
- "5fd8b28013b7e5a64c7c235f0a93d93e48bc19a0e5dde7b646a87b429219643a",
- "731f552f29edcb4f86112cc94d296377f3533a9633ccf83e202d9e1785d94a00",
- "2d2f97574a161cf01a6f6d476b141c7be06f940d94b695ffc12c4e74eca2de1c"
- ],
- "layer_256": [
- "ece771354ad470a82d56eda413ae3dd6c00d2de28ab3c56a88201d08d4424b4b",
- "65b084dada803461ab9ca9be9b892d211870a121dd6c555a111eea470b951c54",
- "dc937b59892604f5a86ac96936cd7ff09e25f18ae6b758e8014a24c7fa039e91",
- "92565dec90f7c8412dc872e820f66cd0c56263bbbc392439645b6fee270f41bb"
- ],
- "tasks": [
- "StableDiffusion3ControlNetInpaintingPipeline",
- "StableDiffusion3ControlNetPipeline",
- "StableDiffusion3Img2ImgPipeline",
- "StableDiffusion3InpaintPipeline",
- "StableDiffusion3PAGImg2ImgPipeline",
- "StableDiffusion3PAGPipeline",
- "StableDiffusion3Pipeline",
- "StableDiffusionControlNetImg2ImgPipeline",
- "StableDiffusionControlNetInpaintPipeline",
- "StableDiffusionControlNetPAGInpaintPipeline",
- "StableDiffusionControlNetPAGPipeline",
- "StableDiffusionControlNetPipeline",
- "StableDiffusionImg2ImgPipeline",
- "StableDiffusionInpaintPipeline",
- "StableDiffusionPAGImg2ImgPipeline",
- "StableDiffusionPAGInpaintPipeline",
- "StableDiffusionPAGPipeline",
- "StableDiffusionPipeline",
- "StableDiffusionXLControlNetImg2ImgPipeline",
- "StableDiffusionXLControlNetInpaintPipeline",
- "StableDiffusionXLControlNetPAGImg2ImgPipeline",
- "StableDiffusionXLControlNetPAGPipeline",
- "StableDiffusionXLControlNetPipeline",
- "StableDiffusionXLControlNetUnionImg2ImgPipeline",
- "StableDiffusionXLControlNetUnionInpaintPipeline",
- "StableDiffusionXLControlNetUnionPipeline",
- "StableDiffusionXLImg2ImgPipeline",
- "StableDiffusionXLInpaintPipeline",
- "StableDiffusionXLPAGImg2ImgPipeline",
- "StableDiffusionXLPAGInpaintPipeline",
- "StableDiffusionXLPAGPipeline",
- "StableDiffusionXLPipeline"
- ],
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "stable-diffusion-v1-5"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "safety_checker": [
- "StableDiffusionSafetyChecker"
- ],
- "feature_extractor": [
- "info.dit.flux1-schnell",
- "*"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ]
- }
- }
- },
- "info.unet.stable-unclip-2-1-l": {
- "*": {
- "repo": "fusing/stable-unclip-2-1-l",
- "pkg": {
- "0": {
- "diffusers": "StableUnCLIPPipeline"
- }
- },
- "tasks": [
- "StableDiffusion3ControlNetInpaintingPipeline",
- "StableDiffusion3ControlNetPipeline",
- "StableDiffusion3Img2ImgPipeline",
- "StableDiffusion3InpaintPipeline",
- "StableDiffusion3PAGImg2ImgPipeline",
- "StableDiffusion3PAGPipeline",
- "StableDiffusion3Pipeline",
- "StableDiffusionControlNetImg2ImgPipeline",
- "StableDiffusionControlNetInpaintPipeline",
- "StableDiffusionControlNetPAGInpaintPipeline",
- "StableDiffusionControlNetPAGPipeline",
- "StableDiffusionControlNetPipeline",
- "StableDiffusionImg2ImgPipeline",
- "StableDiffusionInpaintPipeline",
- "StableDiffusionPAGImg2ImgPipeline",
- "StableDiffusionPAGInpaintPipeline",
- "StableDiffusionPAGPipeline",
- "StableDiffusionPipeline",
- "StableDiffusionXLControlNetImg2ImgPipeline",
- "StableDiffusionXLControlNetInpaintPipeline",
- "StableDiffusionXLControlNetPAGImg2ImgPipeline",
- "StableDiffusionXLControlNetPAGPipeline",
- "StableDiffusionXLControlNetPipeline",
- "StableDiffusionXLControlNetUnionImg2ImgPipeline",
- "StableDiffusionXLControlNetUnionInpaintPipeline",
- "StableDiffusionXLControlNetUnionPipeline",
- "StableDiffusionXLImg2ImgPipeline",
- "StableDiffusionXLInpaintPipeline",
- "StableDiffusionXLPAGImg2ImgPipeline",
- "StableDiffusionXLPAGInpaintPipeline",
- "StableDiffusionXLPAGPipeline",
- "StableDiffusionXLPipeline"
- ],
- "pipe_names": {
- "prior_tokenizer": [
- "info.encoder.tokenizer",
- "stable-unclip-2-1-l"
- ],
- "prior_text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "prior": [
- "PriorTransformer"
- ],
- "prior_scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "image_normalizer": [
- "info.dit.flux1-schnell",
- "*"
- ],
- "image_noising_scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "stable-unclip-2-1-l"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "vae": [
- "AutoencoderKL"
- ]
- }
- }
- },
- "info.unet.stable-diffusion-2-1-unclip": {
- "*": {
- "repo": "stabilityai/stable-diffusion-2-1-unclip-small",
- "pkg": {
- "0": {
- "diffusers": "StableUnCLIPImg2ImgPipeline"
- }
- },
- "tasks": [
- "StableDiffusion3ControlNetInpaintingPipeline",
- "StableDiffusion3ControlNetPipeline",
- "StableDiffusion3Img2ImgPipeline",
- "StableDiffusion3InpaintPipeline",
- "StableDiffusion3PAGImg2ImgPipeline",
- "StableDiffusion3PAGPipeline",
- "StableDiffusion3Pipeline",
- "StableDiffusionControlNetImg2ImgPipeline",
- "StableDiffusionControlNetInpaintPipeline",
- "StableDiffusionControlNetPAGInpaintPipeline",
- "StableDiffusionControlNetPAGPipeline",
- "StableDiffusionControlNetPipeline",
- "StableDiffusionImg2ImgPipeline",
- "StableDiffusionInpaintPipeline",
- "StableDiffusionPAGImg2ImgPipeline",
- "StableDiffusionPAGInpaintPipeline",
- "StableDiffusionPAGPipeline",
- "StableDiffusionPipeline",
- "StableDiffusionXLControlNetImg2ImgPipeline",
- "StableDiffusionXLControlNetInpaintPipeline",
- "StableDiffusionXLControlNetPAGImg2ImgPipeline",
- "StableDiffusionXLControlNetPAGPipeline",
- "StableDiffusionXLControlNetPipeline",
- "StableDiffusionXLControlNetUnionImg2ImgPipeline",
- "StableDiffusionXLControlNetUnionInpaintPipeline",
- "StableDiffusionXLControlNetUnionPipeline",
- "StableDiffusionXLImg2ImgPipeline",
- "StableDiffusionXLInpaintPipeline",
- "StableDiffusionXLPAGImg2ImgPipeline",
- "StableDiffusionXLPAGInpaintPipeline",
- "StableDiffusionXLPAGPipeline",
- "StableDiffusionXLPipeline"
- ],
- "pipe_names": {
- "feature_extractor": [
- "info.dit.flux1-schnell",
- "*"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "image_normalizer": [
- "info.dit.flux1-schnell",
- "*"
- ],
- "image_noising_scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "stable-diffusion-2-1-unclip"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "vae": [
- "AutoencoderKL"
- ]
- }
- }
- },
- "info.unet.stable-diffusion-xl-1": {
- "*": {
- "repo": "stabilityai/stable-diffusion-xl-base-1.0",
- "pkg": {
- "0": {
- "precision": "ops.precision.float.F16",
- "generation": {
- "denoising_end": 0.8,
- "num_inference_steps": 40,
- "output_type": "latent",
- "safety_checker": false,
- "width": 1024,
- "height": 1024
- }
- },
- "1": {
- "diffusers": "DiffusionPipeline"
- }
- },
- "file_256": [
- "357650fbfb3c7b4d94c1f5fd7664da819ad1ff5a839430484b4ec422d03f710a",
- "83e012a805b84c7ca28e5646747c90a243c65c8ba4f070e2d7ddc9d74661e139",
- "31e35c80fc4829d14f90153f4c74cd59c90b779f6afe05a74cd6120b893f7e5b",
- "6f001c090fb13c0d0f8b0a5916da814712a94400b99471fabe77c1c4a51ecaaf"
- ],
- "layer_256": [
- "62a5ab1b5fdfa4fedb32323841298c6effe1af25be94a8583350b0a7641503ef",
- "34dff8d98898baa0f10e71943e56b588cc114253b0d2f1051f3ce7a8a45fee0b",
- "56b1ccd89b0d6ab658048aa34d659788b6ed663f13ef566f4b11bccef590b9da"
- ],
- "layer_b3": [
- "8be44fa13c1efa60f8bcadaa57f1d718473f9660f03c4f0e65dc037960d8cba1",
- "c9ab95ed1851418b65ef99651c1eb6bbdd2e3b0715e0e435d6d1e56ce310fac3",
- "adfa260098d87616d748e3cf9c10bb2c90ff8890a84abbb2853d4aa69664070b"
- ],
- "identifiers": [
- "logit_scale",
- "conditioner.embedders.0.transformer.text_model.encoder.layers.0.self_attn.k_proj.weight",
- "add_embedding.linear_2.bias"
- ],
- "pipe_names": {}
- },
- "pony-diffusion": {
- "file_256": [
- "67ab2fd8ec439a89b3fedb15cc65f54336af163c7eb5e4f2acc98f090a29b0b3"
- ],
- "layer_256": [
- "465425d4420dcf5aa4b4d5b456db11a1fcc7c8f61b2e4a87e2470297c98bb96e"
- ],
- "layer_b3": [
- "bf4c2154daa4ece7292277b210d081f98759e9ed4d5c889564632e3ccc4a1071"
- ]
- },
- "pony-diffusion-turbo": {
- "file_256": [
- "7555ac941f3a767833830ba5cc9a4508a9777cbf97b487b6baf0400ab7000587",
- "9322f9d91b28abf09e4137bc02ec806af23510221a164e71b81778e61cc3b4b2"
- ],
- "layer_256": [
- "7edf51ef09b39c46937a4e4141707c040cd12af0d95299a4d3cd2b7d3fabe035",
- "74e4dbc89d57d61ff7e8af8b0fddcf7466ba233d53ca4ffb7777138991bc3d52"
- ],
- "layer_b3": [
- "1e8f23fcd4be0f00eb52368b91c709fffa8a3b8e21772b92b2e0671eed9117d0",
- "5c8b3f34f9d0a58135cf72fbfe9b5d75b5545a10e3d726478543fa7cc510a8bc"
- ]
- },
- "animagine-xl-4": {
- "repo": "cagliostrolab/animagine-xl-4.0",
- "file_256": [
- "8ece83aa1bed1fb39a2b81f1660f0ce6889218e493c1f2ed55e9f15f59a7e03f",
- "6327eca98bfb6538dd7a4edce22484a1bbc57a8cff6b11d075d40da1afb847ac",
- "1449e5b0b9de87b0f414c5f29cb11ce3b3dc61fa2b320e784c9441720bf7b766",
- "e3c47aedb06418c6c331443cd89f2b3b3b34b7ed2102a3d4c4408a8d35aad6b0"
- ],
- "layer_256": [
- "c21d1c38813e078817122e12866ab39f5aa7f56945dd4a8beee3cae1e0f139e7",
- "b916c162c981155aaf74e93d5314038af6767bb5a129c51ee05a1fb6a206c6ac",
- "ecc6bfc73824a2d7c3b0ca184854a235859f329c83768f017b07a19a535d17b4",
- "97f6ca05de7fbdae7aacb2427a552f924492176c474a23dd252c192e1c0e9d65"
- ],
- "layer_b3": [
- "268ffbb120670b9c4b25158bd474c787740884b7738b48203aa03c4c3f00028f",
- "18fda1a55cad137d62c81d4328f5ece85d88b126261e06b9e14ab68055d5d484",
- "bae9bc8a5c43145bcf92ee3391618d9eaddd689f626991bae202de9cf5f1e70e",
- "d6bc5ccafa2b97c867b13a1e7a8c2c7ad9c4877055a66c71bb773557bc306447"
- ]
- },
- "illustrious-xl-v2": {
- "repo": "OnomaAIResearch/Illustrious-XL-v2.0",
- "file_256": [
- "c2a1a3eaa13d4c107dc7e00c3fe830cab427aa026362740ea094745b3422a331",
- "536863e9f0c13b0ce834e2f8a19ada425ee4f722c0ad3d0051ec7e6adaa8156c",
- "3e15ba00387db678ab4a099f75771c4f5ac67fda9e7100a01d263eaf30145aa9",
- "e3d12d0f76d61aa31d2668a2217e5b642592193f2946842c44d7056ea5469cce",
- "735cf3fefcbdc4f7817f53247e38b836ffd27c7641af6d8daa21d245242cb4bd"
- ],
- "layer_256": [
- "397791b3d77affb7bd35c5ded7377493c6bf456920a41388ba95bd0157109803",
- "b23c02b8519c6777a1f271662f4251a59468c4b3e11184a2d722fa8929b4ea48",
- "a373981494f5508c124a1960bdd096bbc96935fbb54b1218f563206d3892c176",
- "b709df257c40d9d981f686f2880bbe64f43b78805b7213768d659a142a593efd",
- "f1e6b4cab0fce608dca6fa851384e8728202449f16270fbd1f0c4c5ec4946c10"
- ],
- "layer_b3": [
- "93b061baf21d743d592327a61f027d099d8e18da9808a76c7704ad123eba4a29",
- "dc05fed2acbc73cef4c377cfa2a681c5cf6d065b88d8bf70d371bbcce6a223a8",
- "8eb1c30327e5b71b35b9a4513dc5f2cac9f244667393c0eedb10a26aa9991cd8",
- "3dafbe31f6ebaffa3d054e1b37049e1147faa2474ceb6dab7bc3c4cded0c845e",
- "892533778ee14454938f7b50830093f58e12f1e14560a148f71927e4ccff5f5c"
- ]
- },
- "playground-v2---aesthetic": {
- "repo": "playgroundai/playground-v2.5-1024px-aesthetic",
- "pkg": {
- "0": {
- "diffusers": "DiffusionPipeline",
- "precision": "ops.precision.float.F16",
- "generation": {
- "num_inference_steps": 50,
- "guidance_scale": 3
- }
- }
- },
- "file_256": [
- "11b6d7bce65674659cc6b7ea960658436edfd80e566cb240ebd4bfbc3e2076c8",
- "bcaa7dd6780974f000b17b5a6c63e6f867a75c51ffa85c67d6b196882c69b992",
- "956dca99114aaa5c3eb526381309d37ee96737e78ed64c8ae613409f47c3f65a",
- "933778ce76c1fc0ca918b37e1488411b8a99bbd3279c12f527a3ac995a340864",
- "5c7d38880d0940e6795158b7608ccef89217272b1f2a9331c5b0a2adffcd82c4",
- "0411e988479884b1a3ecd184123efe38d051d8d0ef24270585a7d1d57499464a"
- ],
- "layer_256": [
- "adb7be228d4ee6e583c3e5ae4ddb579fef64c3987617ce4d4aff3eb7f8d6a3f7",
- "d4813e9f984aa76cb4ac9bf0972d55442923292d276e97e95cb2f49a57227843",
- "fe2e9edf7e3923a80e64c2552139d8bae926cc3b028ca4773573a6ba60e67c20",
- "bc7021473a04a6de3fe0d0fed600875d852ad1ad9d47c445278f66ce9e8ec7a0fc94481f0c52b21c5ac1fdade8d9c5b210f7239253f86ef21e6198fe393ed60e",
- "a6f31493ceeb51c88c5239188b9078dc64ba66d3fc5958ad48c119115b06120c"
- ],
- "layer_b3": [
- "d55b22740da2d5b98020ad2390cdc0a7ee08cf9e0d98c11957f16cc20c49815b",
- "7e9be9bd9a3aed1ad7207e2f77c98c24c3a75f6adcc9b53514033c6c3365d289",
- "5c6dfcc8d01dfb64723f8f5785caa080e2987859c0a050470bfdbe5312be9efc",
- "703f775c6e48ed5b0eba6e847414f047bcd4adc677dbc1bf221b3ef05b2ac471",
- "72d4ebe4af61f8a7add8fe36b8acd16602894279fb5a744ad50b5b5bac7067b8",
- "acb757b851db12cdf9d4365a45ee0d6e64afa77ac95583bb82711baf7c4125fd"
- ],
- "pipe_names": {}
- },
- "segmind-vega": {
- "repo": "segmind/Segmind-Vega",
- "file_256": [
- "94762e983e5942056be73c5c1d4464b8ffa1ada500b4fef1267550e2447953ce",
- "1ab33e37fbb2566c55cd729e4ab79cc2f99cd9d0a578fabc7a2cf4ee47968be1",
- "8cfa375669b1222d6fecf470f41b2abb370c76a90ab9568964c4bb15b34ec8a2"
- ],
- "layer_256": [
- "029b89ee311110c8f945dbdfc52c1d5daeb1e78c353c38aa3141ec68ce28e7cc",
- "5cdb948e5f3873300679073391d48fc648171f02093d7737d078557ff75762bb",
- "f73afbe43cc76571cb86ebcfced618668a2fb2252b0bc6ba88d6e942bae75741"
- ],
- "layer_b3": [
- "2f353c5e6ed0a2c05af00d014e18e65f69f1ce8c48f8eefbf8ad71b34f940fbf",
- "cc34bd3135d7cafc3cb6e3f6e7cb6896c98277bad52877a952ddbd2ffe222e01",
- "b90efdc848f5386d5250b6fb233ce380cf6cc299f497cfa1d2feaef22f87c9d1"
- ]
- },
- "ssd": {
- "repo": "segmind/SSD-1B",
- "file_256": [
- "7cb406ec0662e91570a79f3c4fb8f0ea5325bffe6af5d9382edae838698f72bd",
- "1895a00bfc769a00b0c0c43a95e433e79e9db8a85402b45a33e8448785bde94d",
- "0bf1ce6b065a6b969ab02dc8e8fa21eb20ee189b10935c49ce68c77a7e432c1c",
- "02ed8ebd0ed55aec686fcf20946d7a1659a31f9f8d9c3798cd254ba6b67434ca",
- "40d8ea9159f3e875278dacc7879442d58c45850cf13c62f5e26681061c51829a"
- ],
- "layer_256": [
- "52267d5d327a2ba92c7a14261a9d081df621b8366819b1bb3a47d130523a813c",
- "b365a3631c6c74532f3a571c84c68e088be35496d35be1e932031713ddd2a2f4",
- "52267d5d327a2ba92c7a14261a9d081df621b8366819b1bb3a47d130523a813c",
- "89f86d9c846495870416b4945b6a46a517f28405e5bab666feb4057f012340be",
- "535b47e9b70da6494878ca6d45af3f2e201b7f17748432911c12232e586855e6"
- ],
- "layer_b3": [
- "c074dc38e8ec836816b91cbcc2ca17f80d6106de8d196d416ef9a27c8837ee45",
- "1d6c0216da57fe98e7ad29e9653566725f5b2a87845fdbdcda257b3be817b5f4",
- "c074dc38e8ec836816b91cbcc2ca17f80d6106de8d196d416ef9a27c8837ee45",
- "89f86d9c846495870416b4945b6a46a517f28405e5bab666feb4057f012340be",
- "535b47e9b70da6494878ca6d45af3f2e201b7f17748432911c12232e586855e6"
- ]
- }
- },
- "info.unet.stable-diffusion-xl-refiner-1": {
- "*": {
- "repo": "stabilityai/stable-diffusion-xl-refiner-1.0",
- "pkg": {
- "0": {
- "diffusers": "StableDiffusionXLImg2ImgPipeline"
- },
- "1": {
- "diffusers": "DiffusionPipeline",
- "generation": {
- "num_inference_steps": 40,
- "denoising_end": 0.8
- }
- }
- },
- "identifiers": [
- "conditioner.embedders.0.model.transformer.resblocks.9.mlp.c_proj.bias"
- ],
- "file_256": [
- "54f9cd2f2daf3aeec0b2708fa3dbc0e84e4f8ddd1ddead42e5bc60c6572c989f",
- "7440042bbdc8a24813002c09b6b69b64dc90fded4472613437b7f55f9b7d9c5f",
- "3ea0376dcf065eaefd27806394a90e310001b1a71d4f1cf1f655e86c0e566ffe"
- ],
- "layer_b3": [
- "6281355dbb37e5769c9460ae0ac75506d89932e2f97b09d9ade32ecf191e75ba",
- "afb0639aae2eb65577c12d4a30cf7c9b3620ae63ba64a8fa632b58608c8a7a2e",
- "669046014b69d98ab0f6fbb59547644436e0275f8b638f467ce2a873c3313683"
- ],
- "layer_256": [
- "bb9eadbfabb52c0d8645783525a3fa70b59e9d7d09d5290d742a303262e793a2",
- "c5adb56fe51343af2c3d493eb9f41515c204bd91eb9f40b983d45f70a1fa3b6d",
- "1f838e39ed6e916258aee6990b72c09b34aa8eb3b5342234a497b8852b3df1c6"
- ],
- "tasks": [
- "StableDiffusionXLControlNetImg2ImgPipeline",
- "StableDiffusionXLControlNetInpaintPipeline",
- "StableDiffusionXLControlNetPAGImg2ImgPipeline",
- "StableDiffusionXLControlNetPAGPipeline",
- "StableDiffusionXLControlNetPipeline",
- "StableDiffusionXLControlNetUnionImg2ImgPipeline",
- "StableDiffusionXLControlNetUnionInpaintPipeline",
- "StableDiffusionXLControlNetUnionPipeline",
- "StableDiffusionXLImg2ImgPipeline",
- "StableDiffusionXLInpaintPipeline",
- "StableDiffusionXLPAGImg2ImgPipeline",
- "StableDiffusionXLPAGInpaintPipeline",
- "StableDiffusionXLPAGPipeline",
- "StableDiffusionXLPipeline"
- ],
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "text_encoder_2": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "stable-diffusion-xl-refiner-1"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "stable-diffusion-xl-refiner-1"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "feature_extractor": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.unet.sdxl-pix2pix-768": {
- "*": {
- "repo": "diffusers/sdxl-instructpix2pix-768",
- "pkg": {
- "0": {
- "diffusers": "StableDiffusionXLInstructPix2PixPipeline"
- }
- },
- "tasks": [
- "StableDiffusionXLControlNetImg2ImgPipeline",
- "StableDiffusionXLControlNetInpaintPipeline",
- "StableDiffusionXLControlNetPAGImg2ImgPipeline",
- "StableDiffusionXLControlNetPAGPipeline",
- "StableDiffusionXLControlNetPipeline",
- "StableDiffusionXLControlNetUnionImg2ImgPipeline",
- "StableDiffusionXLControlNetUnionInpaintPipeline",
- "StableDiffusionXLControlNetUnionPipeline",
- "StableDiffusionXLImg2ImgPipeline",
- "StableDiffusionXLInpaintPipeline",
- "StableDiffusionXLPAGImg2ImgPipeline",
- "StableDiffusionXLPAGInpaintPipeline",
- "StableDiffusionXLPAGPipeline",
- "StableDiffusionXLPipeline"
- ],
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "text_encoder_2": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "sdxl-pix2pix-768"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "sdxl-pix2pix-768"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ]
- }
- }
- },
- "info.dit.allegro": {
- "*": {
- "repo": "rhymes-ai/Allegro",
- "pkg": {
- "0": {
- "precision": "ops.precision.bfloat.B16",
- "generation": {
- "guidance_scale": 7.5,
- "max_sequence_length": 512,
- "num_inference_steps": 100
- }
- }
- },
- "file_256": [
- "6927dcc812841c1da549bf11c97ddf30532aee0e708a6642fa64cf8e0dfcdef7"
- ],
- "layer_b3": [
- "8b20714a6af89ea4bf4ada1f805c5b9d529ef136c229e9b75392242d62d80c3e"
- ],
- "layer_256": [
- "9e44e6c919dc71c24a193641e6265cd9983a2a773b9bbaf527c10ac4837b29fd"
- ]
- }
- },
- "info.dit.amused-512": {
- "*": {
- "repo": "amused/amused-512",
- "pkg": {
- "0": {
- "diffusers": "AmusedInpaintPipeline"
- }
- },
- "pipe_names": {
- "vqvae": [
- "VQModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "amused-512"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "transformer": [
- "UVit2DModel"
- ],
- "scheduler": [
- "ops.scheduler.amused",
- "scheduler"
- ]
- }
- }
- },
- "info.lora.animatediff-motion-adapter-v1-5-2": {
- "*": {
- "repo": "guoyww/animatediff-motion-adapter-v1-5-2",
- "pkg": {
- "0": {
- "diffusers": "AnimateDiffVideoToVideoPipeline"
- }
- }
- }
- },
- "info.lora.animatelcm": {
- "*": {
- "repo": "wangfuyun/AnimateLCM",
- "pkg": {
- "0": {
- "diffusers": "MotionAdapter"
- }
- }
- }
- },
- "info.lora.animatediff-motion-adapter-sdxl": {
- "*": {
- "repo": "a-r-r-o-w/animatediff-motion-adapter-sdxl-beta",
- "pkg": {
- "0": {
- "diffusers": "AnimateDiffSDXLPipeline"
- }
- }
- }
- },
- "info.controlnet.animatediff-sparsectrl-scribble": {
- "*": {
- "repo": "guoyww/animatediff-sparsectrl-scribble",
- "pkg": {
- "0": {
- "diffusers": "SparseControlNetModel"
- }
- }
- }
- },
- "info.controlnet.animatelcm": {
- "*": {
- "repo": "wangfuyun/AnimateLCM",
- "pkg": {
- "0": {
- "diffusers": "ControlNetModel"
- }
- }
- }
- },
- "info.dit.bria-3": {
- "*": {
- "repo": "briaai/BRIA-3.2",
- "pkg": {
- "0": {
- "diffusers": "BriaPipeline"
- }
- },
- "pipe_names": {
- "transformer": [
- "BriaTransformer2DModel"
- ],
- "scheduler": [
- [
- "ops.scheduler.euler",
- "discrete"
- ],
- [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ]
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "bria-3"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "feature_extractor": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.flux2-dev": {
- "*": {
- "repo": "black-forest-labs/FLUX.2-dev",
- "pkg": {
- "0": {
- "diffusers": "Flux2Pipeline"
- }
- },
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "text_encoder": [
- "info.vit.mistral-3-2503",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "flux2-dev"
- ],
- "transformer": [
- "Flux2Transformer2DModel"
- ]
- }
- }
- },
- "info.dit.flux1-schnell": {
- "*": {
- "repo": "black-forest-labs/FLUX.1-schnell",
- "pkg": {
- "0": {
- "precision": "ops.precision.bfloat.B16",
- "generation": {
- "height": 1024,
- "width": 1024,
- "guidance_scale": 0.0,
- "num_inference_steps": 4,
- "max_sequence_length": 256
- }
- },
- "1": {
- "mflux": "flux.flux.Flux1",
- "generation": {
- "height": 1024,
- "width": 1024,
- "num_inference_steps": 4
- }
- }
- },
- "identifiers": [
- "double_blocks.12.txt_mod.lin.weight",
- "add_q_proj.weight",
- "single_transformer_blocks.9.norm.linear.weight"
- ],
- "file_256": [
- "9403429e0052277ac2a87ad800adece5481eecefd9ed334e1f348723621d2a0a",
- "9b633dbe87316385c5b1c262bd4b5a01e3d955170661d63dcec8a01e89c0d820"
- ],
- "layer_b3": [
- "c65ba812ce3ce056eb1585673f62fb896afe6ec049faaf00a97bc35c9a398c44",
- "03049273329fc7db2da10de6d3eb27cb03f190e379c0556cc97b3f0f29001d0c",
- "483c4be8ef031c56bc8450d1a3cfbe54445ed317bcd801be5abe89f1d3c48790"
- ],
- "layer_256": [
- "79c07e339865fe9e22c80f723d728c778130acd07a330339c68218b92bb7b3b8",
- "ef5c9cd1ebe6e3be5e8b1347eca0a6f0b138986c71220a7f1c2c14f29d01beed",
- "27bc71eca2d2ff7459165acc12010230911db7709a4f6a5c255befedfa6b1649"
- ],
- "tasks": [
- "Image",
- "Redux",
- "Kontext",
- "Depth",
- "Fill",
- "ConceptAttention",
- "ControlNet",
- "CavTon",
- "IC-Edit"
- ]
- },
- "shuttle-3-aesthetic": {
- "repo": "shuttleai/shuttle-3.1-aesthetic",
- "pkg": {
- "2": {
- "diffusers": "DiffusionPipeline",
- "generation": {
- "guidance_scale": 3.5,
- "num_inference_steps": 4
- }
- }
- },
- "file_256": [
- "176871da1d5d2d511a52ae9b0dd70faa1f5d1b7734b7e33ed6b4bffa52050e0d",
- "4b80d37681eaed07b7f5b3825a392da929d1620933ede7c2749ef3613cc53f42"
- ],
- "layer_256": [
- "e5d95de314cbfc49b79479118a1ac0b90fc95ccd6bb1a5c95803996d6cebf8fe",
- "d299e8ea4a605917ab98a4a7330d4d398b4ae295efbf458eeeceb5ff1bd7959a"
- ],
- "layer_b3": [
- "ff422d1734abf33366e87bbf44267dc6096c5d499e695287c35558174877412e",
- "5ad8034eac6b82d842311437101c52b5d35826ce34994940d9e667e702a0d45c"
- ]
- },
- "shuttle-3-diffusion": {
- "repo": "shuttleai/shuttle-3-diffusion",
- "pkg": {
- "2": {
- "diffusers": "DiffusionPipeline",
- "generation": {
- "guidance_scale": 3.5,
- "num_inference_steps": 4
- }
- }
- },
- "file_256": [
- "a5b04df4072698395387c21e8da0176d03f6557e0c38ff1dd3bf469ebab9d0fd",
- "a91b46de2055b3511ee87523b57862648856e8c00100161d5b520543a7302755",
- "23a77c86189d5934da48bf44bb871cf80ba99177ffd3fd5272cdecb208c8b8be",
- "d3782d5a8f6e82c6676e8e26d54020934ada589d2aceb17fc5ca604b1bd55da8"
- ],
- "layer_256": [
- "14d0e1b573023deb5a4feaddf85ebca10ab2abf3452c433e2e3ae93acb216443",
- "7ce8d449b32a9c959431ade729b513ee7a6457f11e1c13e3ef04dd8db3494621",
- "9c3395f67a3d844483b77f0ddd5e2ea64b61732fa9d9da19845bb8ae574c1f8c"
- ],
- "layer_b3": [
- "4dd3174edf6b680ce9daf3de643e33ae2c4f09a4d5968da61ea48885f3a193c0",
- "9fdf191b2c58b2a6e190396e12314530593dca4f2a2bee389ec5175da5e52af8",
- "ad203ad6a00d8b1315337e34069e7c41016ea407469a536de8ad6807042017fd"
- ]
- },
- "shuttle-jaguar": {
- "repo": "shuttleai/shuttle-jaguar",
- "pkg": {
- "2": {
- "diffusers": "DiffusionPipeline",
- "generation": {
- "guidance_scale": 3.5,
- "num_inference_steps": 4
- }
- }
- },
- "file_256": [
- "dcbc4f2470b177eed12c7d7515c0e7342515a849ebd31a50c8d8d43913d7bd32",
- "26a7aa64c0798a3549e1d767932da0a7fb82b49f8edcbdcde804a20d9ed1478f"
- ],
- "layer_b3": [
- "9906c29933d0c33a6ee8d9712f33fa8bd4b35b46a1c7b565ae48832b757dd980",
- "89c453c4bf99220405687eed984dace4492bdae1b6fb08f3d9629145b1a11672"
- ]
- }
- },
- "info.controlnet.flux1-canny-dev": {
- "*": {
- "repo": "black-forest-labs/FLUX.1-Canny-dev",
- "pkg": {
- "0": {
- "diffusers": "FluxControlPipeline"
- }
- }
- }
- },
- "info.controlnet.flux1-depth-dev": {
- "*": {
- "repo": "black-forest-labs/FLUX.1-Depth-dev",
- "pkg": {
- "0": {
- "diffusers": "FluxControlInpaintPipeline"
- }
- }
- }
- },
- "info.controlnet.flux1-dev-controlnet-canny": {
- "*": {
- "repo": "InstantX/FLUX.1-dev-controlnet-canny",
- "pkg": {
- "0": {
- "diffusers": "FluxControlNetModel"
- }
- }
- }
- },
- "info.controlnet.flux1-dev-controlnet-canny-alpha": {
- "*": {
- "repo": "InstantX/FLUX.1-dev-Controlnet-Canny-alpha",
- "pkg": {
- "0": {
- "diffusers": "FluxControlNetModel"
- }
- }
- }
- },
- "info.dit.flux1-fill-dev": {
- "*": {
- "repo": "black-forest-labs/FLUX.1-Fill-dev",
- "pkg": {
- "0": {
- "diffusers": "FluxFillPipeline"
- }
- },
- "tasks": [
- "FluxControlImg2ImgPipeline",
- "FluxControlInpaintPipeline",
- "FluxControlNetImg2ImgPipeline",
- "FluxControlNetInpaintPipeline",
- "FluxControlNetPipeline",
- "FluxControlPipeline",
- "FluxImg2ImgPipeline",
- "FluxInpaintPipeline",
- "FluxKontextPipeline",
- "FluxPipeline"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "flux1-fill-dev"
- ],
- "text_encoder_2": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "flux1-fill-dev"
- ],
- "transformer": [
- "FluxTransformer2DModel"
- ]
- }
- }
- },
- "info.dit.flux1-kontext-dev": {
- "*": {
- "repo": "black-forest-labs/FLUX.1-Kontext-dev",
- "pkg": {
- "0": {
- "diffusers": "FluxKontextInpaintPipeline"
- }
- },
- "tasks": [
- "FluxControlImg2ImgPipeline",
- "FluxControlInpaintPipeline",
- "FluxControlNetImg2ImgPipeline",
- "FluxControlNetInpaintPipeline",
- "FluxControlNetPipeline",
- "FluxControlPipeline",
- "FluxImg2ImgPipeline",
- "FluxInpaintPipeline",
- "FluxKontextPipeline",
- "FluxPipeline"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "flux1-kontext-dev"
- ],
- "text_encoder_2": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "flux1-kontext-dev"
- ],
- "transformer": [
- "FluxTransformer2DModel"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "feature_extractor": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.prx-512-t2i-sft": {
- "*": {
- "repo": "Photoroom/prx-512-t2i-sft",
- "pkg": {
- "0": {
- "diffusers": "PRXPipeline"
- }
- },
- "pipe_names": {
- "transformer": [
- "PRXTransformer2DModel"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "text_encoder": [
- "info.stst.t5gemma-prefixlm",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "prx-512-t2i-sft"
- ],
- "vae": [
- "AutoencoderKL",
- [
- "info.vae.dc",
- "sana-1024px-bf16"
- ],
- "NoneType"
- ]
- }
- }
- },
- "info.unet.audioldm-s-v2": {
- "*": {
- "repo": "cvssp/audioldm-s-full-v2",
- "pkg": {
- "0": {
- "diffusers": "AudioLDMPipeline"
- }
- },
- "file_256": [
- "fc30d5b5a3bb8d08672736efb1fff10755ba7024dace39b2dcb579a105aa2a5a"
- ],
- "layer_b3": [
- "82fbcc553c1ad770d28fd1866b935249c5ebfbf75f3166ae823e1bc6ef39a95a"
- ],
- "layer_256": [
- "d076446a58a36bf436e37444679d62bcf2f45689d4aa3d799b3fe801c71ed2c8"
- ],
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.clap-htsat-fused",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "vocoder": [
- "info.stst.speecht5-asr",
- "*"
- ]
- }
- }
- },
- "info.unet.audioldm2": {
- "*": {
- "repo": "cvssp/audioldm2",
- "pkg": {
- "0": {
- "precision": "ops.precision.float.F16",
- "generation": {
- "num_inference_steps": 200,
- "audio_length_in_s": 10.0
- }
- }
- },
- "file_256": [
- "359a5ffb89a844beb2fcfac584aae2cd7cd6e87c3ab1ec4e892ef45d91db77c2"
- ],
- "layer_b3": [
- "eac241273f9f30982fc04aa88b4dc1c38b533430956a55b9ed4d3e5c717ec962"
- ],
- "layer_256": [
- "ab109d01b43788063802f00c6ecab024c830ea58d668f5c2df9e3ae5b87d86cb"
- ]
- }
- },
- "info.unet.blipdiffusion": {
- "*": {
- "repo": "Salesforce/blipdiffusion",
- "pkg": {
- "0": {
- "diffusers": "BlipDiffusionPipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "blipdiffusion"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "scheduler": [
- "ops.scheduler.pndm",
- "scheduler"
- ],
- "qformer": [
- "info.vit.blip2-opt",
- "*"
- ],
- "image_processor": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.chroma": {
- "*": {
- "repo": "lodestones/Chroma",
- "pkg": {
- "0": {
- "diffusers": "ChromaPipeline"
- },
- "1": {
- "generation": {
- "neg_text": "",
- "num_steps": "28",
- "latent_size": [
- 64,
- 64
- ]
- }
- }
- },
- "file_256": [
- "53adcb3b6b6005758d40e2d8058b044ed4892bc8616efb7a62cc2dd384be07de",
- "2c41e8a9831f3be1eaff2c2ed590abb62e4534e814f7ec58a5fd74ff71dc2036",
- "0a7b2d9699dbd22b3744ee2692900cabcfb731a43dac13729c33807f2bb7c9f6",
- "6ddc9e2bbe3376ab5ee9f10b2d947f127b6bf6f879f06f316a2208bb0da357b8"
- ],
- "layer_b3": [
- "15e227ced8a89c41abaa9cc44f84dfffdf5ead0c626035e5a2dde2bbb0935479"
- ],
- "layer_256": [
- "a4daa6ff6f45ca70c738adb8c19bc3b6f228df931e6bf2a3394463e4dd7ec882"
- ],
- "tasks": [
- "ChromaPipeline"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "chroma"
- ],
- "transformer": [
- "ChromaTransformer2DModel"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "feature_extractor": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- },
- "chroma1-hd": {
- "repo": "lodestones/Chroma1-HD",
- "pkg": {
- "0": {
- "generation": {
- "num_inference_steps": 40
- }
- }
- },
- "file_256": [
- "d845553f11e6afe8139c41ca73678f9f03eab2e68d2e1c6f03ae19509a4d546",
- "1b2993a44e63b2250496f69edce643bac2fb79833cf92ba8dd95cbd764d970c7",
- "2dd46f08516246df1f582047cc09268ce4f747357baff05b13148e71519029fc"
- ]
- },
- "chroma1-flash": {
- "repo": "lodestones/Chroma1-Flash",
- "pkg": {
- "0": {
- "diffusers": "ChromaPipeline",
- "generation": {
- "num_inference_steps": 8,
- "guidance_scale": 1.0,
- "num_images_per_prompt": 1
- }
- }
- },
- "file_256": [
- "2c0c7d908d04418a48b453c293237a9826d54472cf0ba76e28697d1309d1021b",
- "c88f6794753ba23e8f6bf8c84cf220daa35a6aa16d54ea0c3e0136f52e5da7e1",
- "c759d67ca3ef50a9a1c242e3291c57f406646f226a95f43f66577996494986db"
- ],
- "tasks": [
- "ChromaPipeline"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "chroma"
- ],
- "transformer": [
- "ChromaTransformer2DModel"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "feature_extractor": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.chroma1-hd": {
- "*": {
- "repo": "lodestones/Chroma1-HD",
- "pkg": {
- "0": {
- "diffusers": "ChromaImg2ImgPipeline"
- }
- },
- "tasks": [
- "ChromaPipeline"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "chroma1-hd"
- ],
- "transformer": [
- "ChromaTransformer2DModel"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "feature_extractor": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.cogvideox": {
- "*": {
- "repo": "zai-org/CogVideoX-2b",
- "pkg": {
- "0": {
- "precision": "ops.precision.float.F16",
- "generation": {
- "num_videos_per_prompt": 1,
- "num_inference_steps": 50,
- "num_frames": 49,
- "guidance_scale": 6
- }
- }
- },
- "file_256": [
- "8fbb6a5e67c70885a8ed8e33df144ac61253e45977be5035fa18cfdf77d386c7"
- ],
- "layer_b3": [
- "1db3439649b5362448455fb2ed6ebde0c3b973655a206832731149757ad165bb"
- ],
- "layer_256": [
- "edd6bd51f1236f528ff8d32dc754f0b86cfac901b800642ea497358156dc00bd"
- ]
- }
- },
- "info.controlnet.cogvideox-fun-v-pose": {
- "*": {
- "repo": "alibaba-pai/CogVideoX-Fun-V1.1-5b-Pose",
- "pkg": {
- "0": {
- "diffusers": "CogVideoXFunControlPipeline"
- }
- }
- }
- },
- "info.dit.cogvideox-i2v": {
- "*": {
- "repo": "zai-org/CogVideoX-5b-I2V",
- "pkg": {
- "0": {
- "diffusers": "CogVideoXImageToVideoPipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "cogvideox-i2v"
- ],
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "vae": [
- "info.vae.cogvideox",
- "cogvideox-i2v"
- ],
- "transformer": [
- "CogVideoXTransformer3DModel"
- ],
- "scheduler": [
- [
- "ops.scheduler.cogvideoxddim",
- "scheduler"
- ],
- [
- "ops.scheduler.cogvideoxdpm",
- "scheduler"
- ]
- ]
- }
- }
- },
- "info.dit.cogview3": {
- "*": {
- "repo": "zai-org/CogView3-Plus-3B",
- "pkg": {
- "0": {
- "precision": "ops.precision.float.F16",
- "generation": {
- "guidance_scale": 7.0,
- "num_images_per_prompt": 1,
- "num_inference_steps": 50,
- "width": 1024,
- "height": 1024
- }
- }
- }
- }
- },
- "info.dit.cogview4": {
- "*": {
- "repo": "zai-org/CogView4-6B",
- "pkg": {
- "0": {
- "diffusers": "CogView4Pipeline"
- }
- },
- "tasks": [
- "CogView4ControlPipeline",
- "CogView4Pipeline"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "cogview4"
- ],
- "text_encoder": [
- "info.stst.glm-4-chat",
- "*"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "transformer": [
- "CogView4Transformer2DModel"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.controlnet.cogview4-control": {
- "*": {
- "repo": "zai-org/CogView4-6B-Control",
- "pkg": {
- "0": {
- "diffusers": "CogView4ControlPipeline"
- }
- }
- }
- },
- "info.dit.pre-trianed": {
- "*": {
- "repo": "model_id, revision=\"diffusers/base/pre-trianed",
- "pkg": {
- "0": {
- "diffusers": "Cosmos2_5_PredictBasePipeline"
- }
- },
- "pipe_names": {
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "pre-trianed"
- ],
- "transformer": [
- "CosmosTransformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.unipc",
- "multistep"
- ],
- "safety_checker": [
- "CosmosSafetyChecker"
- ]
- }
- }
- },
- "info.dit.cosmos-predict2-text2image": {
- "*": {
- "repo": "nvidia/Cosmos-Predict2-2B-Text2Image",
- "pkg": {
- "0": {
- "diffusers": "Cosmos2TextToImagePipeline"
- }
- },
- "file_256": [
- "7fbd20dae97cc26a55c7aff3024bc84e554cff8f69966c725a24c8238c5431ec",
- "6d211f1c14cd793156da3a840dd5462ae072046fcd6f1dc64c613a5343bfe896",
- "95a2b32ad31a271eb64d35985c7ea46f1448528af70932eb1f35d57f90c27be2",
- "344e67faf333b7849fa94290c9028bdd5e40eb19700754c833cda0423bc10ad0",
- "ce15ef565cbb9ef414a6f7a396c455d82d5f762d2174493da87fe009c5fee75b",
- "94aa9f2b59330b88e97b6b439e2f206a51c86e6b154fb66d43ed149bfac23cf8",
- "636de5388da249130d51752991a1792b90af31cbf43f021ae07f75756ee2d79a",
- "472c5e4cf5056a1a59085addb5a86d801de39bf5e000d253f206a7f63c710029",
- "663266ace67c22529c3b6bfa0e8bd69f0ba6e683f5f02b8e3da50881057ba142",
- "21a674b314c1364d0dbb3712f5ed702996a7b7403c452835cac22709e01c2f77",
- "3bf2df806c6472e039efc9e8d3181163d7faa7b385e61519b7d17d5e9c993a49",
- "1de35e1603c4c30bc80b132ccea15fc0503369caf68290708f17e679e98cd41f",
- "0738e559bbd71f7351ccba34b2b47362a3f829b92f3dbcffeaf1e44b0d52f42c"
- ],
- "layer_b3": [
- "5a18ba14c41c6601dcc1195ca180ac7744357eb15ace39272788bda1a7151e9b",
- "67cc3eaf7987c89cd7ccff13de6bc03e3eec59d260d44486e2367cd946ce6f20",
- "3c6fefa107742488d2e6856714198a762f2fd35c67edd50d4657eaf4b59c7ca3",
- "4e1f90ee1e8959d334c9b1ea2cc5e58d0b8340e271c35f81c8a5ec26e16d9d76",
- "f8171071e828524fcc2806126ad100a2198e450c82c0864c8fe8b358c5cbbfbd",
- "8126101a0207ecfbd741394fd59f306bcb4c492b2a921e0921c426ca7bd38985",
- "c942c5a85ff7cb602d8ca894f5d180c2224e91f0b62c3a21f6a425f9e0e8554b",
- "c8c500de74da879a547875fe1046f62ab18bdfd09c09eb3da723cbc2319cb4e3",
- "c0ac3f67501004e9e9a55d1658402ad97e42bf8a266edf81f6f3bb835ee476b9",
- "84f5926eb4e11d826815682b076ed7d3bba4c86520859be80aa1ef92c72b26a4",
- "1d4375aab5548708559b0fde150754a2163cd211eb20a5471e17afaeeb26e082",
- "68bd8982f59c60d69c301d16dfb5a60f5d43d66c0b60138d48a22f5ded598e7b",
- "c3e9a10cad7aebf979072092008be6e2815d03d28cbf316c15e8daf22116bd7d"
- ],
- "layer_256": [
- "38f2a75eab667c0cc85f3946a23ca6dc2278438c25a9f93aaaa9f79c3808e180",
- "ee8434a5e9bc6fa07199de2d0c69fb87f7922c31792bafd13f527c9d92fecb0c",
- "2f8382657babb4d0ae4f8e425ae33b21ad71deb6ba457fd6734f05208d52e06a",
- "34b181a8291b571857cdbf67ac0081fea594a2f223bf20bd2fc8b0c889e9602d",
- "d198c412b972e381acfb812304fa98ed0d97a2f072ddc195cd9a1eb83b1d8146",
- "79580a13aff9859e67b0a9f4f8893236cdcfa58c3d43770641aaac8daee55a94",
- "cfd48c7ad71c913fa8768167ed0c2ee8c207311b22b1e5a8761369b5a780e8d6",
- "da91362ad85d4d2e80a2cb7a55e4ae0e52c9eef8b437a95894ce5ab75d36568c",
- "15f84001f5205b6dd8c6f1334cb51c46f6171c7795fb2a557ea16b874f0c71e5",
- "5d29179ad15a15d2561defcdda66f1d1e4d065c1e0738f9cba4db5b68b93d2ea",
- "7ec489d1e461f5fb2af627b68034ca57f19c516aeccbc5d188b3bd27e3353a15",
- "c8dc42fe7b411d746ebdf86286b91cd6893c5f028076b8fe4103f7ea8e1d8833",
- "86df7c095aee01588e961438f322b85ca0100a9e440b8a2b6c724e00f748d8b5"
- ],
- "pipe_names": {
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "cosmos-predict2-text2image"
- ],
- "transformer": [
- "CosmosTransformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "safety_checker": [
- "CosmosSafetyChecker"
- ]
- }
- }
- },
- "info.dit.cosmos-predict2-video2world": {
- "*": {
- "repo": "nvidia/Cosmos-Predict2-2B-Video2World",
- "pkg": {
- "0": {
- "diffusers": "Cosmos2VideoToWorldPipeline"
- }
- },
- "pipe_names": {
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "cosmos-predict2-video2world"
- ],
- "transformer": [
- "CosmosTransformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "safety_checker": [
- "CosmosSafetyChecker"
- ]
- }
- }
- },
- "info.dit.cosmos-1-diffusion-text2world": {
- "*": {
- "repo": "nvidia/Cosmos-1.0-Diffusion-7B-Text2World",
- "pkg": {
- "0": {
- "diffusers": "CosmosTextToWorldPipeline"
- }
- },
- "pipe_names": {
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "cosmos-1-diffusion-text2world"
- ],
- "transformer": [
- "CosmosTransformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "cosmos-1-diffusion-video2world"
- ],
- "scheduler": [
- "ops.scheduler.edmeuler",
- "scheduler"
- ],
- "safety_checker": [
- "CosmosSafetyChecker"
- ]
- }
- }
- },
- "info.dit.cosmos-1-diffusion-video2world": {
- "*": {
- "repo": "nvidia/Cosmos-1.0-Diffusion-7B-Video2World",
- "pkg": {
- "0": {
- "diffusers": "CosmosVideoToWorldPipeline"
- }
- },
- "pipe_names": {
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "cosmos-1-diffusion-video2world"
- ],
- "transformer": [
- "CosmosTransformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "cosmos-1-diffusion-video2world"
- ],
- "scheduler": [
- "ops.scheduler.edmeuler",
- "scheduler"
- ],
- "safety_checker": [
- "CosmosSafetyChecker"
- ]
- }
- }
- },
- "info.unet.if-ii-l-v1": {
- "*": {
- "repo": "DeepFloyd/IF-II-L-v1.0",
- "pkg": {
- "0": {
- "diffusers": "IFSuperResolutionPipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "if-ii-l-v1"
- ],
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "scheduler": [
- "ops.scheduler.ddpm",
- "scheduler"
- ],
- "image_noising_scheduler": [
- "ops.scheduler.ddpm",
- "scheduler"
- ]
- }
- }
- },
- "info.dit.easyanimatev5-zh": {
- "diffusers": {
- "repo": "alibaba-pai/EasyAnimateV5.1-7b-zh-diffusers",
- "pkg": {
- "0": {
- "diffusers": "EasyAnimatePipeline"
- }
- },
- "pipe_names": {
- "vae": [
- "info.vae.kl",
- "easyanimatev5-zh"
- ],
- "text_encoder": [
- "Qwen2VLForConditionalGeneration",
- [
- "info.art.bert-uncased",
- "*"
- ]
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "easyanimatev5-zh"
- ],
- "transformer": [
- "EasyAnimateTransformer3DModel"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.controlnet.easyanimatev5-zh-control": {
- "diffusers": {
- "repo": "alibaba-pai/EasyAnimateV5.1-12b-zh-Control-diffusers",
- "pkg": {
- "0": {
- "diffusers": "EasyAnimateControlPipeline"
- }
- }
- }
- },
- "info.dit.easyanimatev5-zh-inp": {
- "diffusers": {
- "repo": "alibaba-pai/EasyAnimateV5.1-12b-zh-InP-diffusers",
- "pkg": {
- "0": {
- "diffusers": "EasyAnimateInpaintPipeline"
- }
- },
- "pipe_names": {
- "vae": [
- "info.vae.kl",
- "easyanimatev5-zh"
- ],
- "text_encoder": [
- "Qwen2VLForConditionalGeneration",
- [
- "info.art.bert-uncased",
- "*"
- ]
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "easyanimatev5-zh-inp"
- ],
- "transformer": [
- "EasyAnimateTransformer3DModel"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.dit.hidream-i1": {
- "*": {
- "repo": "HiDream-ai/HiDream-I1-Full",
- "pkg": {
- "0": {
- "diffusers": "HiDreamImagePipeline"
- }
- },
- "file_256": [
- "3cb3f6d77a3fce19b90fa7f66da0cbe997b0785a38a788b559290d3062f6fd26"
- ],
- "layer_b3": [
- "612eb9b2676a3e7b28b10aae045a97a95de2a399fe3801c8f6369589c3a832a6"
- ],
- "layer_256": [
- "78fbfb7fddb9ccbdf91f22b0c3d304cbf0cc7305dbccb216982233849ec727df"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "hidream-i1"
- ],
- "text_encoder_2": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "hidream-i1"
- ],
- "text_encoder_3": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer_3": [
- "info.encoder.tokenizer",
- "hidream-i1"
- ],
- "text_encoder_4": [
- "info.stst.llama-2-hf",
- "*"
- ],
- "tokenizer_4": [
- "info.encoder.tokenizer",
- "hidream-i1"
- ],
- "transformer": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.hunyuandit-v1": {
- "diffusers": {
- "repo": "tencent-hunyuan/hunyuandiT-v1.2-diffusers",
- "pkg": {
- "0": {
- "precision": "ops.precision.float.F16"
- }
- },
- "identifiers": [
- "extra_embedder",
- "model.blocks",
- "skip_norm.weight"
- ],
- "file_256": [
- "4fb84f84079cda457d171b3c6b15d1be95b5a3e5d9825703951a99ddf92d1787",
- "e01db5e129e8ca1117e9cf473fc5a2b096949f03ab90048aeabbc328de7ec800",
- "8af691cadb78047d55721259355d708e87ddbba1b7845df9377d9a5ae917b45d"
- ],
- "layer_b3": [
- "aead6b61b17ebc77c4c186a4b82c193f11ec267b20d909726422ee9852e2e0b2",
- "885a056b94f6f9844c0660be489844d63bb74cc13316f441d10968fff3dd3120",
- "390d951cbdda6e2cffb690031b60f02921624651534c2effaaa7d68ab476c700"
- ],
- "layer_256": [
- "d4842ce2b7f927203326b25ff4d6738ec9a8b95327f06791c387e4a351ed6ed0",
- "5af943f96f5dc9fecb1e92fe2b1fa17c94dd6947690201f4a5ee1a4a2721a68e",
- "4a1f2b8234fa4336e263842e042d42e8d64d8a4d3941d9c0c78366b50303950c"
- ]
- }
- },
- "info.dit.hunyuanvideo": {
- "*": {
- "repo": "hunyuanvideo-community/HunyuanVideo",
- "pkg": {
- "0": {
- "diffusers": "HunyuanVideoPipeline"
- }
- },
- "file_256": [
- "bdb957b35585ea74ae42ca92865a68fa1bf1ebc6c5b7e686a889e5c977dc24c7"
- ],
- "layer_b3": [
- "d31c56b4c9444d4c2f1b10120fe964e0956f6b8c7e7c1e4cc5a1f37406fc49f5"
- ],
- "layer_256": [
- "fe741fdfd163bcb1e0ed81d80f79ac3576dbf6e6740674efadfeff782a48bed4"
- ],
- "pipe_names": {
- "text_encoder": [
- "info.stst.llama-2-hf",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "hunyuanvideo"
- ],
- "transformer": [
- "HunyuanVideoTransformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "hunyuanvideo-i2v"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "text_encoder_2": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "hunyuanvideo"
- ]
- }
- }
- },
- "info.dit.hunyuanvideo-i2v": {
- "*": {
- "repo": "hunyuanvideo-community/HunyuanVideo-I2V",
- "pkg": {
- "0": {
- "diffusers": "HunyuanVideoImageToVideoPipeline"
- }
- },
- "pipe_names": {
- "text_encoder": [
- "info.vit.llava",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "hunyuanvideo-i2v"
- ],
- "transformer": [
- "HunyuanVideoTransformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "hunyuanvideo-i2v"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "text_encoder_2": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "hunyuanvideo-i2v"
- ],
- "image_processor": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.hunyuanvideo-1-480p-t2v": {
- "*": {
- "repo": "hunyuanvideo-community/HunyuanVideo-1.5-480p_t2v",
- "pkg": {
- "0": {
- "diffusers": "HunyuanVideo15Pipeline"
- }
- },
- "pipe_names": {
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "hunyuanvideo-1-480p-t2v"
- ],
- "transformer": [
- "HunyuanVideo15Transformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "hunyuanvideo-i2v"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "text_encoder_2": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "hunyuanvideo-1-480p-t2v"
- ],
- "guider": [
- "ClassifierFreeGuidance"
- ]
- }
- }
- },
- "info.dit.hunyuanvideo-1-480p-i2v": {
- "*": {
- "repo": "hunyuanvideo-community/HunyuanVideo-1.5-480p_i2v",
- "pkg": {
- "0": {
- "diffusers": "HunyuanVideo15ImageToVideoPipeline"
- }
- },
- "pipe_names": {
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "hunyuanvideo-1-480p-i2v"
- ],
- "transformer": [
- "HunyuanVideo15Transformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "hunyuanvideo-i2v"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "text_encoder_2": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "hunyuanvideo-1-480p-i2v"
- ],
- "guider": [
- "ClassifierFreeGuidance"
- ],
- "image_encoder": [
- "SiglipVisionModel"
- ],
- "feature_extractor": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.hunyuanimage-2": {
- "diffusers": {
- "repo": "hunyuanvideo-community/HunyuanImage-2.1-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "HunyuanImagePipeline"
- }
- },
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "hunyuanimage-2"
- ],
- "text_encoder_2": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "hunyuanimage-2"
- ],
- "transformer": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.hunyuanimage-2-refiner": {
- "diffusers": {
- "repo": "hunyuanvideo-community/HunyuanImage-2.1-Refiner-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "HunyuanImageRefinerPipeline"
- }
- },
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "hunyuanimage-2-refiner"
- ],
- "transformer": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.unet.kandinsky-2-1": {
- "prior": {
- "repo": "kandinsky-community/kandinsky-2-1-prior",
- "pkg": {
- "0": {
- "diffusers": "KandinskyPriorPipeline"
- }
- },
- "tasks": [
- "Kandinsky3Img2ImgPipeline",
- "Kandinsky3Pipeline",
- "KandinskyCombinedPipeline",
- "KandinskyImg2ImgCombinedPipeline",
- "KandinskyImg2ImgPipeline",
- "KandinskyInpaintCombinedPipeline",
- "KandinskyInpaintPipeline",
- "KandinskyPipeline",
- "KandinskyV22CombinedPipeline",
- "KandinskyV22Img2ImgCombinedPipeline",
- "KandinskyV22Img2ImgPipeline",
- "KandinskyV22InpaintCombinedPipeline",
- "KandinskyV22InpaintPipeline",
- "KandinskyV22Pipeline"
- ],
- "pipe_names": {
- "prior": [
- "PriorTransformer"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "kandinsky-2-1"
- ],
- "scheduler": [
- "ops.scheduler.unclip",
- "scheduler"
- ],
- "image_processor": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.unet.kandinsky-2-2": {
- "prior": {
- "repo": "kandinsky-community/kandinsky-2-2-prior",
- "pkg": {
- "0": {
- "diffusers": "KandinskyV22PriorPipeline"
- }
- },
- "pipe_names": {
- "prior": [
- "PriorTransformer"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "kandinsky-2-2"
- ],
- "scheduler": [
- "ops.scheduler.unclip",
- "scheduler"
- ],
- "image_processor": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.latte-1": {
- "*": {
- "repo": "maxin-cn/Latte-1",
- "pkg": {
- "0": {
- "diffusers": "LattePipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "latte-1"
- ],
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "transformer": [
- "LatteTransformer3DModel"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ]
- }
- }
- },
- "info.dit.ltx-video": {
- "*": {
- "repo": "Lightricks/LTX-Video",
- "pkg": {
- "0": {
- "diffusers": "LTXImageToVideoPipeline"
- }
- },
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "info.vae.kl",
- "ltx-video"
- ],
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "ltx-video"
- ],
- "transformer": [
- "LTXVideoTransformer3DModel"
- ]
- }
- }
- },
- "info.dit.ltx-video-09": {
- "*": {
- "repo": "Lightricks/LTX-Video-0.9.5",
- "pkg": {
- "0": {
- "diffusers": "LTXConditionPipeline"
- }
- },
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "info.vae.kl",
- "ltx-video"
- ],
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "ltx-video-09"
- ],
- "transformer": [
- "LTXVideoTransformer3DModel"
- ]
- }
- }
- },
- "info.dit.lumina-next-sft": {
- "diffusers": {
- "repo": "Alpha-VLLM/Lumina-Next-SFT-diffusers",
- "pkg": {
- "0": {
- "precision": " ops.precision.bfloat.B16"
- }
- },
- "identifiers": [
- "time_caption",
- "feed_forward"
- ],
- "file_256": [
- "371153b7c7b7a64899d4016970c7cc472039f9c9b21ebe073adf0b8525cdf1bd"
- ],
- "layer_b3": [
- "fa134efd6e9672e7de2965e4895fc58879bd0a6c4fdf9165c278f2748254675f",
- "4d960ec35c53f72f065b94b836bcd923ea6074d38ad49881061f315d62e3c839"
- ],
- "layer_256": [
- "3938a85568d9df186923edf04391d79e89e6199123bc175afb520e0948d1ae05",
- "c0ca51fdea051fcd042bf4b56d32e1e8bb9525a921f2e197f370f101e90527f0"
- ]
- }
- },
- "info.dit.lumina-image-2": {
- "*": {
- "repo": "Alpha-VLLM/Lumina-Image-2.0",
- "pkg": {
- "0": {
- "diffusers": "Lumina2Pipeline"
- }
- },
- "file_256": [
- "132b4d213fdd3cfc14333746fc3eb8bbe6358cd73c3bc95ac4ccec230b97dca3",
- "a7c09ebae62996a8289782161338a3cdba58c11d2d849c50b2d6502e152b0d6d"
- ],
- "layer_b3": [
- "198bde52f09736f1fc650dcdbd0e6b0f6a5ce186582554c1d9ee8ab16ac0feb2",
- "b52807536902cabbf84f99e4fa2f8713fb4ef77e739f06367ee0d486e3222faa"
- ],
- "layer_256": [
- "982893c99860aac8198c2e435cf85f782fce8f10732daf1f2881a26864400a4e",
- "dc937b59892604f5a86ac96936cd7ff09e25f18ae6b758e8014a24c7fa039e91"
- ],
- "tasks": [
- "Lumina2Pipeline"
- ],
- "pipe_names": {
- "transformer": [
- "Lumina2Transformer2DModel"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.stst.gemma2",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "lumina-image-2"
- ]
- }
- },
- "illustrious-lumina-v3": {
- "repo": "OnomaAIResearch/Illustrious-Lumina-v0.03",
- "file_256": [
- "dc6cffcfb0ccfca6332ddb5d2fe25bcb5f496f44b481627f48c42626156fa6a8",
- "2ac549741fa1c6de2d6cd8be06abcdce52d472eeae2439f948e285258b66a214"
- ],
- "layer_256": [
- "39086c199b9ac296dcba53461ba1e113906d91fbc1b12556d92f5cc77ca11f9f",
- "e51ba2ded40f1af5ca6f78c46eed8305fbd87cd6401e9d439837e10d35cc5828"
- ],
- "layer_b3": [
- "a97b4a63e1e7678e8e7154fae55252267bd1f0ba76b03dba622d801644e657ac",
- "aa6c1b2d1971cea3c4ed0963c8d68d4c50db683f8eab9f77f60ea2d04ed6ce5c"
- ]
- }
- },
- "info.dit.lucy-edit-dev": {
- "*": {
- "repo": "decart-ai/Lucy-Edit-Dev",
- "pkg": {
- "0": {
- "diffusers": "LucyEditPipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "lucy-edit-dev"
- ],
- "text_encoder": [
- "info.stst.mt5",
- "*"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.dit.longcat-image": {
- "*": {
- "repo": "meituan-longcat/LongCat-Image",
- "pkg": {
- "0": {
- "diffusers": "LongCatImagePipeline"
- }
- },
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "longcat-image"
- ],
- "text_processor": [
- "Qwen2VLProcessor"
- ],
- "transformer": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.longcat-image-edit": {
- "*": {
- "repo": "meituan-longcat/LongCat-Image-Edit",
- "pkg": {
- "0": {
- "diffusers": "LongCatImageEditPipeline"
- }
- },
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "longcat-image-edit"
- ],
- "text_processor": [
- "Qwen2VLProcessor"
- ],
- "transformer": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.mochi-1": {
- "*": {
- "repo": "genmo/mochi-1-preview",
- "pkg": {
- "0": {
- "diffusers": "MochiPipeline"
- }
- },
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "info.vae.kl",
- "mochi-1"
- ],
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "mochi-1"
- ],
- "transformer": [
- "MochiTransformer3DModel"
- ]
- }
- }
- },
- "info.unet.musicldm": {
- "*": {
- "repo": "ucsd-reach/musicldm",
- "pkg": {
- "0": {
- "generation": {
- "num_inference_steps": 200,
- "audio_length_in_s": 10.0
- }
- }
- },
- "file_256": [
- "853d0ef1d61cbf5d682872322ea8b761ba3d2f85bfbccd58363bd6b2f837268f"
- ],
- "layer_b3": [
- "82fbcc553c1ad770d28fd1866b935249c5ebfbf75f3166ae823e1bc6ef39a95a"
- ],
- "layer_256": [
- "d076446a58a36bf436e37444679d62bcf2f45689d4aa3d799b3fe801c71ed2c8"
- ]
- }
- },
- "info.dit.omnigen-v1": {
- "diffusers": {
- "repo": "Shitao/OmniGen-v1-diffusers",
- "pkg": {
- "0": {
- "diffusers": "OmniGenPipeline"
- }
- },
- "pipe_names": {
- "transformer": [
- "OmniGenTransformer2DModel"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "omnigen-v1"
- ]
- }
- }
- },
- "info.dit.ovis-image": {
- "*": {
- "repo": "AIDC-AI/Ovis-Image-7B",
- "pkg": {
- "0": {
- "diffusers": "OvisImagePipeline"
- }
- },
- "tasks": [
- "OvisImagePipeline"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.stst.qwen3",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "ovis-image"
- ],
- "transformer": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.visualclozepipeline-384": {
- "*": {
- "repo": "VisualCloze/VisualClozePipeline-384",
- "pkg": {
- "0": {
- "diffusers": "VisualClozeGenerationPipeline"
- }
- },
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "visualclozepipeline-384"
- ],
- "text_encoder_2": [
- "info.stst.t5",
- "*"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "visualclozepipeline-384"
- ],
- "transformer": [
- "FluxTransformer2DModel"
- ]
- }
- }
- },
- "info.lora.pia-condition-adapter": {
- "*": {
- "repo": "openmmlab/PIA-condition-adapter",
- "pkg": {
- "0": {
- "diffusers": "PIAPipeline"
- }
- }
- }
- },
- "info.dit.pixart-xl-2-1024-ms": {
- "*": {
- "repo": "PixArt-alpha/PixArt-XL-2-1024-MS",
- "pkg": {
- "0": {
- "diffusers": "PixArtAlphaPipeline"
- }
- },
- "identifiers": [
- "aspect_ratio",
- "y_embedding",
- "emb.resolution",
- "caption_projection"
- ],
- "file_256": [
- "809a92d52a4a228f381a4b4f4b76051294b73285fb0cbb02f0ad24f9372217a8"
- ],
- "layer_b3": [
- "c5be83545ce9dbc564bcc9fd8fe4157d131347ccfc8f62adc877ec205b20acee"
- ],
- "layer_256": [
- "117225c0e91423746114b23d3e409708ad55c90ff52b21fa7a1c5105d2e935a5"
- ],
- "tasks": [
- "PixArtAlphaPipeline"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "pixart-xl-2-1024-ms"
- ],
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "transformer": [
- "PixArtTransformer2DModel"
- ],
- "scheduler": [
- "ops.scheduler.dpm",
- "multistep"
- ]
- }
- }
- },
- "info.dit.pixart-sigma-xl-2-1024-ms": {
- "*": {
- "repo": "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS",
- "pkg": {
- "0": {
- "diffusers": "PixArtSigmaPipeline"
- }
- },
- "identifiers": [
- "adaln_single",
- "scale_shift_table"
- ],
- "file_256": [
- "c34b520ef473329b945c2a21083cdf1337c5a468d23b3215b65576789bfd0305",
- "2fa4dee9229c02b03163f57bdb8e80c7a5ee364b7161796abe9c05e8dd13f239"
- ],
- "layer_b3": [
- "a199930ff537994872da77391955f0dd52eddd22ab9105388f0c5852f1b8021f",
- "ee6f980c32e98da6885f3e97d3f88d9158031e362cd3a49b20d1e23924b251e3"
- ],
- "layer_256": [
- "e0afd203aff5a1d192e325d0f59361373273d85d138b51768c3f10a75c154dc0",
- "987f3c2ff5d399191e5fd7dd7b1f1f285c197dc8124ad77f05cde7f2fb677a3c"
- ],
- "tasks": [
- "PixArtAlphaPipeline",
- "PixArtSigmaPipeline"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "pixart-sigma-xl-2-1024-ms"
- ],
- "text_encoder": [
- "info.stst.t5",
- "*"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "transformer": [
- "PixArtTransformer2DModel"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ]
- }
- }
- },
- "info.dit.sana-1024px-bf16": {
- "diffusers": {
- "repo": "Efficient-Large-Model/Sana_1600M_1024px_BF16_diffusers",
- "pkg": {
- "0": {
- "generation": {
- "height": 1024,
- "width": 1024,
- "guidance_scale": 4.5,
- "num_inference_steps": 20
- },
- "precision": "ops.precision.bfloat.B16"
- }
- },
- "file_256": [
- "b0b50c33be8758713459aa3c760feef6315d4bea31521fb5b8c3e8fdd9841ffe"
- ],
- "layer_b3": [
- "461e3d83dfa7e075ef21e2138ef153922ecfadde3db464b03dff92819f3e86dd"
- ],
- "layer_256": [
- "b928bbcc2ce99d55d21c189e2b1c57498bc313ef5b1457036e356107d567fc4e"
- ]
- }
- },
- "info.controlnet.sana-1024px-controlnet": {
- "diffusers": {
- "repo": "ishan24/Sana_600M_1024px_ControlNetPlus_diffusers",
- "pkg": {
- "0": {
- "diffusers": "SanaControlNetPipeline"
- }
- }
- }
- },
- "info.dit.sana-sprint-1024px": {
- "diffusers": {
- "repo": "Efficient-Large-Model/Sana_Sprint_1.6B_1024px_diffusers",
- "pkg": {
- "0": {
- "diffusers": "SanaSprintPipeline"
- }
- },
- "tasks": [
- "SanaPAGPipeline",
- "SanaPipeline"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "sana-sprint-1024px"
- ],
- "text_encoder": [
- "info.stst.gemma2",
- "*"
- ],
- "vae": [
- "info.vae.dc",
- "sana-1024px-bf16"
- ],
- "transformer": [
- "SanaTransformer2DModel"
- ],
- "scheduler": [
- "ops.scheduler.dpm",
- "multistep"
- ]
- }
- }
- },
- "info.dit.sana-video": {
- "*": {
- "repo": "Efficient-Large-Model/SANA-Video_2B_480p_diffusers",
- "pkg": {
- "0": {
- "diffusers": "SanaImageToVideoPipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "sana-video"
- ],
- "text_encoder": [
- "info.stst.gemma2",
- "*"
- ],
- "vae": [
- [
- "info.vae.dc",
- "sana-1024px-bf16"
- ],
- [
- "info.vae.kl",
- "audioldm-s-v2"
- ]
- ],
- "transformer": [
- "SanaVideoTransformer3DModel"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.unet.shap-e": {
- "*": {
- "repo": "openai/shap-e",
- "pkg": {
- "0": {
- "precision": "ops.precision.float.F16",
- "generation": {
- "num_inference_steps": 64,
- "size": 256,
- "guidance_scale": 15
- }
- }
- }
- }
- },
- "info.dit.stable-audio-open-1": {
- "*": {
- "repo": "stabilityai/stable-audio-open-1.0",
- "pkg": {
- "0": {
- "precision": "ops.precision.float.F16",
- "generation": {
- "num_inference_steps": 200,
- "audio_end_in_s": 10,
- "num_waveforms_per_prompt": 3
- }
- }
- }
- }
- },
- "info.unet.stable-cascade": {
- "prior": {
- "repo": "stabilityai/stable-cascade-prior",
- "pkg": {
- "0": {
- "precision": "ops.precision.bfloat.B16",
- "generation": {
- "negative_prompt": "",
- "num_images_per_prompt": 1,
- "num_inference_steps": 20,
- "guidance_scale": 4.0,
- "width": 1024,
- "height": 1024
- }
- }
- },
- "file_256": [
- "673b3173b037fb5f65b14fde37267390641a36726683de75dcf9df76fce2b866",
- "45c1eb5ce9b69efac891ad459b15c215cd90a986adbbfaf3effd3a89578cbcaf",
- "088ddf1e444abf399007b2da2bac87791df165c69f477994f6b3c745a20904b0",
- "39cec96c7212607f9e526db719bf1df507166d09f4748676c13b0d31cd4adb07",
- "31ffe2f1a3e2351d658fc7d3002a4eca22466a680f7fb3715b1e3768476f9633",
- "dfe24009fc881011f350d08d9d13be13a1a3b3cbfed667435efe0fd419aca099"
- ],
- "layer_b3": [
- "c55c83fa435ed128457f605bf1312e54727996d1c94413fc5ab5b49e9933857c",
- "6fb07ed9fc6ee636e50783802754b3a37bbecfc67037813b616223aeaf6fe877",
- "2ea194240e105c8962923e2baca88cb6a0c826794afc2ef82474301694711d68",
- "3412c8a184805621e4595d57268ced0b5c3c1974cd221bf67b2c908eec4fd61c",
- "53abfb013cfb0e41d0bc7b96bb83e42a4d4c67cb7325f9acf645b02d90efd8fe",
- "34556558f680c183adc2accd493cb9888a98ba853226bbecb07d95eb2055ff4f"
- ],
- "layer_256": [
- "4f5e0a738b963d3d4f8413387a0966ac1ce51f0f985bcbcc124fa221a2fff467",
- "8aa77e732a398b7d0dcd9a35d5682c2b5ab090ae90e915c7c91878abff0284d8",
- "4bbd46ded0916de3108f0da7145a80f5c7acea26ed35b0aaa29af12008352453",
- "415d1f3ecd06416708c1b83ab21e50b39c9d88d19dc33e60b977b7b7061880b9",
- "f678c32815c238e14091f690c8a83c3375c8f7738dc7abff79ff086ed9b59204",
- "17c8da803df7b9bbc8b1d7cc0c44916fea5b5ac0891330c4fdf0326fcd4496cb"
- ],
- "identifiers": [
- "down_blocks.0.2.kv_mapper",
- "previewer",
- "backbone"
- ]
- },
- "decoder": {
- "pkg": {
- "0": {
- "generation": {
- "negative_prompt": "",
- "guidance_scale": 0.0,
- "output_type": "pil",
- "num_inference_steps": 10
- },
- "precision": "ops.precision.bfloat.B16"
- }
- },
- "file_256": [
- "fe92687deefcfb33bb3ec181254b55fe4e434c5084ce9d38815eaa32487ad376",
- "2c8d58b267678aecfa6705a0a0375c88613065a8a8d32ad3a4c3867f5461cb3a",
- "6c218dc948575e3b14b03dffe2014d7870ac505005770ce3abdc28e920a03c05",
- "a6c3d534a9be308e95d2c3224af94a854bebd9b503f620f1ae3c8e6ba4a341bf",
- "7b431ea7d0f10e72b3eaece353bf6bf2f6bc717b6f4207411be186b40dec1f43"
- ],
- "layer_b3": [
- "9506d989de0226018de214f7ced4670eb5aad4a0c399a9229488ceccdf9a3ceb",
- "6c09dcb83e0cd7ad735eb763c5e3721c579d796853f0b9d31ba74fb13cad4f94",
- "e07025965cee925e31f1d617ea8baa575e7db910d40cc0482fd83df317c0812b",
- "d9a42e4226fb2778aaeaf0d6bda173a4ff95aa574c6d9e27e41542aa469e40a3",
- "8dcd87dc7a9b877e8e2a00abac44c4da9eadf2b8df4ae68f27415bb791381a96"
- ],
- "layer_256": [
- "630ec0f3adf97145316c034139836f9df952060d0237ac4e478c55d9a3a50bc8",
- "80904f707c192ddd06be2cebeb2ebbec3eb0e9c99076d50824d391ef3ac67bf2",
- "8ccedbe1e8cc4093f05b5f8d90e6103e688ae1ac71e0d6261fb17c42ff7c25e4",
- "3524e7fa9ca6f7ef695bc2d3410934eabd5272946a05c8cacd7f329e0bd9f1dd",
- "40499a8f45ae28558ed2fe4fc549a4cb469bd237434b331ccc0b1910310ed733"
- ],
- "identifiers": [
- "0.2.channelwise",
- "clip_mapper.bias",
- ".12.self_attn.k_proj.weight"
- ]
- }
- },
- "info.dit.auraflow": {
- "*": {
- "repo": "fal/AuraFlow",
- "pkg": {
- "0": {
- "diffusers": "AuraFlowPipeline"
- }
- },
- "identifiers": [
- [
- 8192,
- 3072
- ],
- "mlpX.c_fc2.weight",
- "joint_transformer_blocks.2.ff_context.linear_2.weight"
- ],
- "file_256": [
- "ce3e475246258b94ee9dcb8b83292cb34edfffc2bbde46c74604d9c6cd7c585c",
- "526be97cf581c89ad87c6b19c1f7c2378851137698f7ec436596d061a382d37b",
- "6a40b011f287452dbca80face78e667055904c5ad97eb2097ade3200259b2203",
- "05e5493018333d947bb5940083dbc2f071093027ff414bc5b1b1229e4836e5cb"
- ],
- "layer_b3": [
- "cc6d383576c35a9709798d2e2b9e3eb31ba8c608040cf3712bc37871cfd14e21",
- "ddd54c44fa28fbddecf7cfae91cfa04917fd2f2fa94fc78c528cef2356a4ec3a",
- "90c694e7d1e20e6da49b571e9954338d384775419790be315304103227b1051b",
- "9e85aec1bdb616f52f88c80ddc7ab1eae8c16c0b5fbfcdb61a71ac02c325003d"
- ],
- "layer_256": [
- "3c13e6a965d03a49227d8b1606ba6a343a23772d8768407cc78d4ddb9102bc80",
- "b356cc84a23bc93bda4cc0fce1d0ba1b8e3d5a521e659ffc72e9e4a2d2c7f204",
- "270df7317fe01abf06333acbbd4f15f8fc7a7c56053219f42efb598454a3af24",
- "7ab6aa4514dd09f3cf589587d51a81734193ce45dd51bda9db0bd62fe48ef7d5"
- ],
- "tasks": [
- "AuraFlowPipeline"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "auraflow"
- ],
- "text_encoder": [
- "info.stst.mt5",
- "*"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "transformer": [
- "AuraFlowTransformer2DModel"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.dit.stable-diffusion-3": {
- "*": {
- "repo": "stabilityai/stable-diffusion-3.5-medium",
- "pkg": {
- "0": {
- "precision": "ops.precision.float.F16"
- }
- },
- "identifiers": [
- "model.diffusion_model.joint_blocks.",
- "transformer_blocks.21.norm1_context.linear.weight",
- "transformer_blocks.31.norm1_context.linear.weight",
- "blocks.11.ff.net.2.weight"
- ],
- "file_256": [
- "ffef7a279d9134626e6ce0d494fba84fc1c7e720b3c7df2d19a09dc3796d8f93",
- "11fe06e22364b823dfeedc275912336b932b32a293a0b2f35ffac071990cc4de"
- ],
- "layer_b3": [
- "e411016545785046810b29cc3999f40bc6392be134a1318386c6f1c48f98726a",
- "a81e07ee67bc627e8b3c5e292ec1ca239009517a2106e8249d670ced0a88f746"
- ],
- "layer_256": [
- "13c982a6dc82d21c9f459e837d8c6f6d4696fd6e7e7b5783bdd2250b1f4fec61",
- "6ee79050373337bf63ac20916596df778bb22022bb38af986128a7459eda1463"
- ]
- },
- "stable-diffusion-3-turbo": {
- "repo": "tensorart/stable-diffusion-3.5-medium-turbo",
- "pkg": {
- "0": {
- "precision": "ops.precision.bfloat.B16",
- "generation": {
- "num_inference_steps": 8,
- "guidance_scale": 1.5,
- "height": 1024,
- "width": 768
- }
- }
- },
- "file_256": [
- "5b0530e8d71b49fa1358f1208047cd789a40bae5b44406c9524b0f0d88f8b246",
- "07119c77c3548a1d9eb30923df4dd55ec74914dc5ec81626804dcbe51ce17a5d",
- "3c379381344d2a2b3ee3d7a1bc97f7d1e58fa95c6b5187fb48b3ce446f99f17b",
- "6b3806cafdb4303ea2638e9e08eb186067b4a46a95ddf344ccdbe56537afaf6e"
- ],
- "layer_256": [
- "3c324055a1ec6eb4ee0242e344bb2b6356afcbd2e215fdd9d160cda691a72fae",
- "7284d2027523482af9ef47405667ca891cc518bfb6ebf1f1d4666cb0accc8cd5",
- "d938ee5738c73f701760ed18acad274b074d2796123aee3f2eee1328b6c36ea4",
- "c4c40056c2a77959083b5a69a1a4b205caa463ccabde057352c5c4e38b2c67b6"
- ],
- "layer_b3": [
- "873821614080a98e1ebfe56673bc96c2ac57379720d4ad2f97e4bca317571d48",
- "7284d2027523482af9ef47405667ca891cc518bfb6ebf1f1d4666cb0accc8cd5",
- "d938ee5738c73f701760ed18acad274b074d2796123aee3f2eee1328b6c36ea4",
- "c4c40056c2a77959083b5a69a1a4b205caa463ccabde057352c5c4e38b2c67b6"
- ]
- }
- },
- "info.unet.gligen-1-4-inpainting-text-box": {
- "*": {
- "repo": "masterful/gligen-1-4-inpainting-text-box",
- "pkg": {
- "0": {
- "diffusers": "StableDiffusionGLIGENPipeline"
- }
- }
- }
- },
- "info.unet.gligen-inpainting-text-image": {
- "*": {
- "repo": "anhnct/Gligen_Inpainting_Text_Image",
- "pkg": {
- "0": {
- "diffusers": "StableDiffusionGLIGENTextImagePipeline"
- }
- }
- }
- },
- "info.unet.stable-video-diffusion-img2vid-xt": {
- "*": {
- "repo": "stabilityai/stable-video-diffusion-img2vid-xt",
- "pkg": {
- "0": {
- "diffusers": "StableVideoDiffusionPipeline"
- }
- },
- "pipe_names": {
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "unet": [
- "UNetSpatioTemporalConditionModel"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "feature_extractor": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.unet.ldm3d-4c": {
- "*": {
- "repo": "Intel/ldm3d-4c",
- "pkg": {
- "0": {
- "diffusers": "StableDiffusionLDM3DPipeline"
- }
- },
- "tasks": [
- "StableDiffusion3ControlNetInpaintingPipeline",
- "StableDiffusion3ControlNetPipeline",
- "StableDiffusion3Img2ImgPipeline",
- "StableDiffusion3InpaintPipeline",
- "StableDiffusion3PAGImg2ImgPipeline",
- "StableDiffusion3PAGPipeline",
- "StableDiffusion3Pipeline",
- "StableDiffusionControlNetImg2ImgPipeline",
- "StableDiffusionControlNetInpaintPipeline",
- "StableDiffusionControlNetPAGInpaintPipeline",
- "StableDiffusionControlNetPAGPipeline",
- "StableDiffusionControlNetPipeline",
- "StableDiffusionImg2ImgPipeline",
- "StableDiffusionInpaintPipeline",
- "StableDiffusionPAGImg2ImgPipeline",
- "StableDiffusionPAGInpaintPipeline",
- "StableDiffusionPAGPipeline",
- "StableDiffusionPipeline",
- "StableDiffusionXLControlNetImg2ImgPipeline",
- "StableDiffusionXLControlNetInpaintPipeline",
- "StableDiffusionXLControlNetPAGImg2ImgPipeline",
- "StableDiffusionXLControlNetPAGPipeline",
- "StableDiffusionXLControlNetPipeline",
- "StableDiffusionXLControlNetUnionImg2ImgPipeline",
- "StableDiffusionXLControlNetUnionInpaintPipeline",
- "StableDiffusionXLControlNetUnionPipeline",
- "StableDiffusionXLImg2ImgPipeline",
- "StableDiffusionXLInpaintPipeline",
- "StableDiffusionXLPAGImg2ImgPipeline",
- "StableDiffusionXLPAGInpaintPipeline",
- "StableDiffusionXLPAGPipeline",
- "StableDiffusionXLPipeline"
- ],
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "ldm3d-4c"
- ],
- "scheduler": [
- "ops.scheduler.karrasdiffusion",
- "schedulers"
- ],
- "safety_checker": [
- "StableDiffusionSafetyChecker"
- ],
- "feature_extractor": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.unet.i2vgen-xl": {
- "*": {
- "repo": "ali-vilab/i2vgen-xl",
- "pkg": {
- "0": {
- "diffusers": "I2VGenXLPipeline"
- }
- },
- "pipe_names": {
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "i2vgen-xl"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "feature_extractor": [
- "info.dit.flux1-schnell",
- "*"
- ],
- "unet": [
- "I2VGenXLUNet"
- ],
- "scheduler": [
- "ops.scheduler.ddim",
- "scheduler"
- ]
- }
- }
- },
- "info.unet.wuerstchen": {
- "prior": {
- "repo": "warp-ai/wuerstchen-prior",
- "pkg": {
- "0": {
- "diffusers": "WuerstchenPriorPipeline"
- }
- },
- "tasks": [
- "WuerstchenCombinedPipeline",
- "WuerstchenDecoderPipeline"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "wuerstchen"
- ],
- "text_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "prior": [
- "WuerstchenPrior"
- ],
- "scheduler": [
- "ops.scheduler.ddpmwuerstchen",
- "scheduler"
- ]
- }
- }
- },
- "info.dit.wan2-t2v": {
- "diffusers": {
- "repo": "Wan-AI/Wan2.1-T2V-14B-Diffusers",
- "pkg": {
- "0": {
- "precision": "ops.precision.bfloat.B16",
- "generation": {
- "height": 480,
- "width": 832,
- "num_frames": 81,
- "guidance_scale": 5.0
- }
- }
- },
- "file_256": [
- "299e6304544f2783896372fa919e755a8bb9ab8caf898ce08a678dae391e1179",
- "a9278e6e9c82d174e6c67b3c97d8b97fef30af51dcf59160f2fc241f6819f5dc",
- "be531024cd9018cb5b48c40cfbb6a6191645b1c792eb8bf4f8c1c6e10f924dc5",
- "6f999b0d6cb9a72b3d98ac386ed96f57f8cecae13994a69232514ea4974ad5fd",
- "2e39adde59c5e0e90edbb35873126b0d67928b5c11c501e384e976d6dc597cce",
- "2ee88ab18d7ed7691c5b7f8bdc3d0a9815e6efe75499287564830fd209d3cdfb",
- "46c27d3693bf2475990a912e08bf67fc6e6cd5396eab87b5e8dd1fcd3651364a",
- "193535c6450045f718df5f011de6d94d49bd9b13f37ca0412500f050dbbb01a8"
- ],
- "layer_b3": [
- "32266d1c79b518adb9d21837e6a427f6ae55b68cfdd673a7dadb38820fddeb48",
- "3b6989856f4f05368524c1852d8660b73c84cfbe44460af017d7139c2a4641b8",
- "f4d6cee3c112db93b3c9137ad102ec0e79ec7ab68b9bbc59004fbc268ccd5ddb",
- "e627144f41055619eb5407699c46e69ac0d87cf8873721e3e48c9e842656abf8",
- "6c00f3fadedacb841c4b9b4321b94a11ef85a08c9dd9253e5f9ba95856715579",
- "a0c339253c714b05877c8fbab649ed631cf021930978f3696a46f685a07c9092",
- "6435da89a870fd0e88680d31de75b9a40c408a4768eff384ce9b9e99481e8e66"
- ],
- "layer_256": [
- "52493c23c5fc1d087a283bc4eabb151421b7ae09affa12a5bb059d62656c5766",
- "058dedb3d2683a9a5b671c6302690e22722c93f6ed92281d5fa74ab190e632a1",
- "5fbed4b95e7196d3626003ea9e0fbbffd074b4297ca406e01b5b6c5d881a6080",
- "3a2335c8e7a4359c071b50333b5c00eef6f42a1d5206915e2ee99464a8c5eae7",
- "0542780670dd75d4cd9deda123d2e150730646c0a1a8d34582460991498a77a6",
- "e925b8222774905c8fbf10af77811fde7870e563eedcde2c94bd5c727e952d49",
- "3d915854976284347efa7aa0a117c0fc3b415c4208e1a6c94beb4ccb9720743d"
- ]
- }
- },
- "info.dit.wan-animate": {
- "diffusers": {
- "repo": "Wan-AI/Wan2.2-Animate-14B-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "WanAnimatePipeline"
- }
- },
- "tasks": [
- "WanImageToVideoPipeline",
- "WanPipeline",
- "WanVideoToVideoPipeline"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "wan-animate"
- ],
- "text_encoder": [
- "info.stst.mt5",
- "*"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.unipc",
- "multistep"
- ],
- "image_processor": [
- "info.dit.flux1-schnell",
- "*"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "transformer": [
- "WanAnimateTransformer3DModel"
- ]
- }
- }
- },
- "info.dit.wan2-i2v-480p": {
- "diffusers": {
- "repo": "Wan-AI/Wan2.1-I2V-14B-480P-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "WanImageToVideoPipeline"
- }
- },
- "file_256": [
- "b4602c35fa0519750a42c03e3f296c02d542291e344c4d702522cddbd1711f13",
- "6d7a34b63b70eb608324e546d979167a5e787ac6bca3528e63f54a11572d66aa",
- "b2051cd29d6b2f0c924fa7a3e78a4772f0134d7b059f21590dcce416f4f6cbe8",
- "7664fe075b3c82dcecf89012ad3429eee41ee9f10d476f60bc2d2ae3c4ca986c",
- "8ef7ea5bf9eea636b9b3ebd84c40671b4a18ae2704cb4c8595cb5b25c1d8e8b9",
- "b2de21b99b2e72cb0ff15253b07e926f26e7cf1b7e229efc32f94ad1f1ed9395",
- "0ca75338e7a47ca7cacddb7e626647e65829c497387f718ecb6ea0bae456944a",
- "c058a4ac5363c35d1ab4dd3bdec788c23b267fa42a0d7c68aba599f2f74600c9",
- "27988f6b510eb8d5fdd7485671b54897f8683f2bba7a772c5671be21d3491253"
- ],
- "layer_b3": [
- "4b6c3354c9ee5694e00a78f5658fdf14129f159c3b78a57f82fb18e0f265a83d",
- "c36c783559a40d22504f6c4bfb4f5aae760f3f46bbb3a595be79880935122175",
- "ac62f7d5583fd2e85b738fafaf233e2cde6e2857e04351135bb9ded45f9082ce",
- "215e89e855b5e9456af9aa68bc67567dc2269002aaa6b01d849ffec425fc628d",
- "324b8b6c2d512547a2c31bafa12e20acf313fd3aad587b293334f9f629edeec6"
- ],
- "layer_256": [
- "137881dad8c00063bc8bf05f93067736e419173cd171acc22f77b730db688a19",
- "8c5952fd3d333d3a4b719bf7d8ce6b12d1d2e78caaa7e42d713788cfdcadd244",
- "86c58bc4864c97f394ea6bccb2ecedc4aab7166f5b9bfeb313edfdcb2918164a",
- "cac45f7d8f1a0628cb0738bd308689e439b1cc6206e5f887d60d5b37d30138f2",
- "60e4f71a0961b1346b6f6b5ebe4c8cc93219239c5e13b4c0f1e19e9b8e1324d5"
- ],
- "tasks": [
- "WanImageToVideoPipeline",
- "WanPipeline",
- "WanVideoToVideoPipeline"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "wan2-i2v-480p"
- ],
- "text_encoder": [
- "info.stst.mt5",
- "*"
- ],
- "vae": [
- "info.vae.wan",
- "wan2-i2v-480p"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "image_processor": [
- "info.dit.flux1-schnell",
- "*"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "transformer": [
- "WanTransformer3DModel"
- ],
- "transformer_2": [
- "WanTransformer3DModel"
- ]
- }
- }
- },
- "info.dit.wan21-vace": {
- "diffusers": {
- "repo": "Wan-AI/Wan2.1-VACE-1.3B-diffusers",
- "pkg": {
- "0": {
- "diffusers": "WanVACEPipeline"
- }
- },
- "file_256": [
- "bd8bbb8834a274525ab65cbb063f21aa58973a054bfd1638bfe395504c9d9b99",
- "192804a4e10b5bb0a13f5c224bc4ec9707b3b8cc0def8eea005dbce7c9d6752a",
- "f202a5c59b8a91ada1862c46a038214f1f7f216c61ec8350d25f69b919da4307",
- "654693bf2a93a27cd67c3bcee238bc1d0cbb0dd9a74928ed7155fb21a2a1900a",
- "640ccc0577e6a5d4bb15cd91b11b699ef914fc55f126c5a1c544e152130784f2"
- ],
- "layer_b3": [
- "5357d78799a61cd2d72a8a2824c919d63f718eb3fba624af63689e9c657db032",
- "7ae67b7ccf79d1c3f4531ae138e1eb63d52dd97a66b3fcbe1d68fded8df4d5b1",
- "ee63ecdfb3da6901853a59ec950f3e7c3f6595ac46347a03881a4a9c71425377",
- "82762df3539021d3c0342e0da04137ddbe95ef37ea933cd0a68c09c2c650f2ac"
- ],
- "layer_256": [
- "2684413479030170fb3f08c1069c02957ffc386a59168d23b55d579d5c675269",
- "d527680fa735e5f30ef8852aabf8a49f02a094bc4718f0787c5b85710a13c026",
- "9677492a107b3ed827c7285db3393f5321d451cc6d922a4d0488d2a67e939446",
- "aaef66a4f65ecf852888d160b2122753fe4c6d642b5d41db29e4ce9e6855b5a0"
- ],
- "tasks": [
- "WanImageToVideoPipeline",
- "WanPipeline",
- "WanVideoToVideoPipeline"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "wan21-vace"
- ],
- "text_encoder": [
- "info.stst.mt5",
- "*"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "transformer": [
- "WanVACETransformer3DModel"
- ],
- "transformer_2": [
- "WanVACETransformer3DModel"
- ]
- }
- }
- },
- "info.dit.wan21-t2v": {
- "diffusers": {
- "repo": "Wan-AI/Wan2.1-T2V-1.3B-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "WanPipeline",
- "precision": "ops.precision.bfloat.B16",
- "generation": {
- "height": 480,
- "width": 832,
- "num_frames": 81,
- "guidance_scale": 5.0
- }
- }
- },
- "tasks": [
- "WanImageToVideoPipeline",
- "WanPipeline",
- "WanVideoToVideoPipeline"
- ],
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "wan21-t2v"
- ],
- "text_encoder": [
- "info.stst.mt5",
- "*"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.dit.kandinsky-5-t2v-lite-sft-5s": {
- "diffusers": {
- "repo": "kandinskylab/Kandinsky-5.0-T2V-Lite-sft-5s-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "Kandinsky5T2VPipeline"
- }
- },
- "pipe_names": {
- "transformer": [
- "Kandinsky5Transformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "hunyuanvideo-i2v"
- ],
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "kandinsky-5-t2v-lite-sft-5s"
- ],
- "text_encoder_2": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "kandinsky-5-t2v-lite-sft-5s"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.dit.kandinsky-5-i2i-lite-sft": {
- "diffusers": {
- "repo": "kandinskylab/Kandinsky-5.0-I2I-Lite-sft-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "Kandinsky5I2IPipeline"
- }
- },
- "pipe_names": {
- "transformer": [
- "Kandinsky5Transformer3DModel"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "kandinsky-5-i2i-lite-sft"
- ],
- "text_encoder_2": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "kandinsky-5-i2i-lite-sft"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.dit.kandinsky-5-i2v-sft-5s": {
- "diffusers": {
- "repo": "kandinskylab/Kandinsky-5.0-I2V-Pro-sft-5s-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "Kandinsky5I2VPipeline"
- }
- },
- "pipe_names": {
- "transformer": [
- "Kandinsky5Transformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "hunyuanvideo-i2v"
- ],
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "kandinsky-5-i2v-sft-5s"
- ],
- "text_encoder_2": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "kandinsky-5-i2v-sft-5s"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.dit.kandinsky-5-t2i-lite-sft": {
- "diffusers": {
- "repo": "kandinskylab/Kandinsky-5.0-T2I-Lite-sft-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "Kandinsky5T2IPipeline"
- }
- },
- "pipe_names": {
- "transformer": [
- "Kandinsky5Transformer3DModel"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "kandinsky-5-t2i-lite-sft"
- ],
- "text_encoder_2": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "tokenizer_2": [
- "info.encoder.tokenizer",
- "kandinsky-5-t2i-lite-sft"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.dit.z-image-turbo": {
- "*": {
- "repo": "Z-a-o/Z-Image-Turbo",
- "pkg": {
- "0": {
- "diffusers": "ZImageOmniPipeline"
- }
- },
- "tasks": [
- "ZImageControlNetInpaintPipeline",
- "ZImageControlNetPipeline",
- "ZImageImg2ImgPipeline",
- "ZImageOmniPipeline",
- "ZImagePipeline"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "AutoencoderKL"
- ],
- "text_encoder": [
- "PreTrainedModel"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "z-image-turbo"
- ],
- "transformer": [
- "info.dit.flux1-schnell",
- "*"
- ],
- "siglip": [
- "info.vit.siglip2-patch16-224",
- "*"
- ],
- "siglip_processor": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.controlnet.z-image-turbo": {
- "*": {
- "repo": "Tongyi-MAI/Z-Image-Turbo",
- "pkg": {
- "0": {
- "diffusers": "ZImageControlNetInpaintPipeline"
- }
- }
- }
- },
- "info.dit.skyreels-v2-t2v-720p": {
- "diffusers": {
- "repo": "Skywork/SkyReels-V2-T2V-14B-720P-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "SkyReelsV2Pipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "skyreels-v2-t2v-720p"
- ],
- "text_encoder": [
- "info.stst.mt5",
- "*"
- ],
- "transformer": [
- "SkyReelsV2Transformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.unipc",
- "multistep"
- ]
- }
- }
- },
- "info.dit.skyreels-v2-df-720p": {
- "diffusers": {
- "repo": "Skywork/SkyReels-V2-DF-14B-720P-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "SkyReelsV2DiffusionForcingVideoToVideoPipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "skyreels-v2-df-720p"
- ],
- "text_encoder": [
- "info.stst.mt5",
- "*"
- ],
- "transformer": [
- "SkyReelsV2Transformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.unipc",
- "multistep"
- ]
- }
- }
- },
- "info.dit.skyreels-v2-i2v-720p": {
- "diffusers": {
- "repo": "Skywork/SkyReels-V2-I2V-14B-720P-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "SkyReelsV2ImageToVideoPipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "skyreels-v2-i2v-720p"
- ],
- "text_encoder": [
- "info.stst.mt5",
- "*"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "image_processor": [
- "CLIPProcessor"
- ],
- "transformer": [
- "SkyReelsV2Transformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.unipc",
- "multistep"
- ]
- }
- }
- },
- "info.dit.qwen-image": {
- "*": {
- "repo": "Qwen/Qwen-Image",
- "pkg": {
- "0": {
- "diffusers": "QwenImageInpaintPipeline"
- }
- },
- "file_256": [
- "9f33a59093af3abcc2836d4cf4b7bd122c238ca70a26c70f34fdde64646b3bcd"
- ],
- "layer_b3": [
- "c87eedda853c12844a8deb3592a90bbcbd4dff2f7a850c28755e4aa171432150"
- ],
- "layer_256": [
- "fda2472d8ef6587a4c979021a2390eeb7c8fc2bcf565330ab8dc6b22f5348ec9"
- ],
- "tasks": [
- "QwenImageControlNetPipeline",
- "QwenImageEditInpaintPipeline",
- "QwenImageEditPipeline",
- "QwenImageEditPlusPipeline",
- "QwenImageImg2ImgPipeline",
- "QwenImageInpaintPipeline",
- "QwenImagePipeline"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "info.vae.kl",
- "qwen-image"
- ],
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "qwen-image"
- ],
- "transformer": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.controlnet.qwen-image-controlnet-union": {
- "*": {
- "repo": "InstantX/Qwen-Image-ControlNet-Union",
- "pkg": {
- "0": {
- "diffusers": "QwenImageControlNetModel"
- }
- }
- }
- },
- "info.controlnet.qwen-image-controlnet-inpainting": {
- "*": {
- "repo": "InstantX/Qwen-Image-ControlNet-Inpainting",
- "pkg": {
- "0": {
- "diffusers": "QwenImageControlNetModel"
- }
- }
- }
- },
- "info.dit.qwen-image-edit": {
- "*": {
- "repo": "Qwen/Qwen-Image-Edit",
- "pkg": {
- "0": {
- "diffusers": "QwenImageEditInpaintPipeline"
- }
- },
- "tasks": [
- "QwenImageControlNetPipeline",
- "QwenImageEditInpaintPipeline",
- "QwenImageEditPipeline",
- "QwenImageEditPlusPipeline",
- "QwenImageImg2ImgPipeline",
- "QwenImageInpaintPipeline",
- "QwenImagePipeline"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "info.vae.kl",
- "qwen-image"
- ],
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "qwen-image-edit"
- ],
- "processor": [
- "Qwen2VLProcessor"
- ],
- "transformer": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.qwen-image-edit-2509": {
- "*": {
- "repo": "Qwen/Qwen-Image-Edit-2509",
- "pkg": {
- "0": {
- "diffusers": "QwenImageEditPlusPipeline"
- }
- },
- "tasks": [
- "QwenImageControlNetPipeline",
- "QwenImageEditInpaintPipeline",
- "QwenImageEditPipeline",
- "QwenImageEditPlusPipeline",
- "QwenImageImg2ImgPipeline",
- "QwenImageInpaintPipeline",
- "QwenImagePipeline"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "info.vae.kl",
- "qwen-image"
- ],
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "qwen-image-edit-2509"
- ],
- "processor": [
- "Qwen2VLProcessor"
- ],
- "transformer": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.qwen-image-layered": {
- "*": {
- "repo": "Qwen/Qwen-Image-Layered",
- "pkg": {
- "0": {
- "diffusers": "QwenImageLayeredPipeline"
- }
- },
- "tasks": [
- "QwenImageControlNetPipeline",
- "QwenImageEditInpaintPipeline",
- "QwenImageEditPipeline",
- "QwenImageEditPlusPipeline",
- "QwenImageImg2ImgPipeline",
- "QwenImageInpaintPipeline",
- "QwenImagePipeline"
- ],
- "pipe_names": {
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ],
- "vae": [
- "info.vae.kl",
- "qwen-image"
- ],
- "text_encoder": [
- "info.vit.qwen2-vl",
- "*"
- ],
- "tokenizer": [
- "info.encoder.tokenizer",
- "qwen-image-layered"
- ],
- "processor": [
- "Qwen2VLProcessor"
- ],
- "transformer": [
- "info.dit.flux1-schnell",
- "*"
- ]
- }
- }
- },
- "info.dit.chronoedit": {
- "diffusers": {
- "repo": "nvidia/ChronoEdit-14B-Diffusers",
- "pkg": {
- "0": {
- "diffusers": "ChronoEditPipeline"
- }
- },
- "pipe_names": {
- "tokenizer": [
- "info.encoder.tokenizer",
- "chronoedit"
- ],
- "text_encoder": [
- "info.stst.mt5",
- "*"
- ],
- "image_encoder": [
- "info.vit.clip-vit-patch32",
- "*"
- ],
- "image_processor": [
- "info.dit.flux1-schnell",
- "*"
- ],
- "transformer": [
- "ChronoEditTransformer3DModel"
- ],
- "vae": [
- "info.vae.kl",
- "audioldm-s-v2"
- ],
- "scheduler": [
- "ops.scheduler.euler",
- "discrete"
- ]
- }
- }
- },
- "info.unet.kolors": {
- "diffusers": {
- "repo": "Kwai-Kolors/Kolors-diffusers",
- "pkg": {
- "0": {
- "precision": "ops.precision.float.F16",
- "generation": {
- "negative_prompt": "",
- "guidance_scale": 5.0,
- "num_inference_steps": 50,
- "width": 1024,
- "height": 1024
- }
- },
- "1": {
- "diffusers": "DiffusionPipeline"
- }
- },
- "file_256": [
- "425ff1dcbe3a70ac13d3afdd69bd4e3176b0c3260722527c80b210f11d2d966c"
- ],
- "layer_b3": [
- "6eb15506fa38b4cbb26391ab1b6c9ead05f86c711e46583bfbe8fc4421571414"
- ],
- "layer_256": [
- "04e3c17170b8a200481f6941b370fdc5056a00fe5a16956de01790f8a93c0dcd"
- ],
- "identifiers": [
- ".DenseReluDense.wi.weight",
- "encoder_hid_proj.weight"
- ],
- "pipe_names": {}
- }
- },
- "info.moe.trinity": {
- "*": {
- "repo": "arcee-ai/Trinity-Mini",
- "pkg": {
- "0": {
- "transformers": "AfmoeModel"
- }
- },
- "tasks": [
- "AfmoeForCausalLM",
- "AfmoeModel",
- "AfmoePreTrainedModel"
- ]
- }
- },
- "info.encoder.tokenizer": {
- "aimv2-patch14-224-lit": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
- }
- }
- },
- "albert-xx-v2": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.albert.tokenization_albert.AlbertTokenizer"
- }
- }
- },
- "align": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "afm": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "aria": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "audio-flamingo-3-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
- }
- }
- },
- "aya-vision": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.cohere.tokenization_cohere.CohereTokenizer"
- }
- }
- },
- "bark": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "bart": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- }
- }
- },
- "bert-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "bert-for-seq-generation-l-24-bbc-encoder": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert_generation.tokenization_bert_generation.BertGenerationTokenizer"
- }
- }
- },
- "bigbird-roberta": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.big_bird.tokenization_big_bird.BigBirdTokenizer"
- }
- }
- },
- "bigbird-pegasus-arxiv": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.pegasus.tokenization_pegasus.PegasusTokenizer"
- }
- }
- },
- "biogpt": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.biogpt.tokenization_biogpt.BioGptTokenizer"
- }
- }
- },
- "bitnet-b18-4t": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "blenderbot": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.blenderbot_small.tokenization_blenderbot_small.BlenderbotSmallTokenizer"
- }
- }
- },
- "blip-vqa": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "blip2-opt": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "bloom": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "blt": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "bridgetower": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- }
- }
- },
- "bros-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "camembert": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.camembert.tokenization_camembert.CamembertTokenizer"
- }
- }
- },
- "canine-s": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.canine.tokenization_canine.CanineTokenizer"
- }
- }
- },
- "chameleon": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "chinese-clip-vit-patch16": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "clap-htsat-fused": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- }
- }
- },
- "clip-vit-patch32": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
- }
- }
- },
- "clipseg-rd64": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
- }
- }
- },
- "clvp-dev": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clvp.tokenization_clvp.ClvpTokenizer"
- }
- }
- },
- "llama-2-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "codegen-mono": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "c4ai-command-r-v01": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.cohere.tokenization_cohere.CohereTokenizer"
- }
- }
- },
- "conv-bert": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "cpm-ant": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.cpmant.tokenization_cpmant.CpmAntTokenizer"
- }
- }
- },
- "csm": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "ctrl": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.ctrl.tokenization_ctrl.CTRLTokenizer"
- }
- }
- },
- "data2vec-audio-960h": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer"
- }
- }
- },
- "data2vec-text": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- }
- }
- },
- "dbrx": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "deberta": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.deberta.tokenization_deberta.DebertaTokenizer"
- }
- }
- },
- "deberta-v2-x": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.deberta_v2.tokenization_deberta_v2.DebertaV2Tokenizer"
- }
- }
- },
- "deepseek-v2-lite": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "deepseek-v3": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "deepseek-vl-chat": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "dia": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.dia.tokenization_dia.DiaTokenizer"
- }
- }
- },
- "diffllama-handcut": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "distilbert-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "dpr-ctx-encoder-single-nq": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.dpr.tokenization_dpr_fast.DPRQuestionEncoderTokenizerFast"
- }
- }
- },
- "electra-discriminator": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "emu3-chat-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "ernie-3-zh": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "ernie-45-pt": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "ernie-4-a-pt": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "ernie-4-vl-a-pt": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "esm": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.esm.tokenization_esm.EsmTokenizer"
- }
- }
- },
- "exaone-4": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "falcon": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "falcon-mamba": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
- }
- }
- },
- "flaubert-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.flaubert.tokenization_flaubert.FlaubertTokenizer"
- }
- }
- },
- "flava": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "flexolmo-7x-1t": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "florence-2": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- }
- }
- },
- "fnet": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.fnet.tokenization_fnet.FNetTokenizer"
- }
- }
- },
- "wmt19-en-ru": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.fsmt.tokenization_fsmt.FSMTTokenizer"
- }
- }
- },
- "funnel": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.funnel.tokenization_funnel.FunnelTokenizer"
- }
- }
- },
- "fuyu": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "gemma": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
- }
- }
- },
- "gemma2": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
- }
- }
- },
- "gemma-3": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
- }
- }
- },
- "gemma3-text": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
- }
- }
- },
- "gemma-3n-e": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
- }
- }
- },
- "git": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "glm-4-chat": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "glm-4-0414": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "glm-4-a": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "glm-4v-thinking": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "glm-4v": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "glm-asr-nano-2512": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "got-ocr-2-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "gpt2": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "gpt-bigcode-santacoder": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "gpt-neo": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "gpt-neox": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
- }
- }
- },
- "gpt-neox-japanese": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese.GPTNeoXJapaneseTokenizer"
- }
- }
- },
- "gpt-oss": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "gpt-j": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "granite": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "powermoe": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "granite-4-h": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "moe-active-shared-experts": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "grounding-dino": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "groupvit-gcc-yfcc": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
- }
- }
- },
- "helium": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "hubert-ls960": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer"
- }
- }
- },
- "ibert-roberta": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- }
- }
- },
- "idefics": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "idefics2": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "idefics3-llama3": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "blip-flan-t5": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "internvl3-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
- }
- }
- },
- "jais-2-chat": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "jamba-v0": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "janus": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "jetmoe": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "kosmos-2-patch14-224": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer"
- }
- }
- },
- "kosmos-2": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "todo": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.parakeet.tokenization_parakeet_fast.ParakeetTokenizerFast"
- }
- }
- },
- "layoutlm-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "layoutlmv2-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.layoutlmv2.tokenization_layoutlmv2.LayoutLMv2Tokenizer"
- }
- }
- },
- "layoutlmv3": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.layoutlmv3.tokenization_layoutlmv3.LayoutLMv3Tokenizer"
- }
- }
- },
- "led-16384": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- }
- }
- },
- "lfm2-vl": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "lilt-roberta-en": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- }
- }
- },
- "llama-4-scout-16e": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "llava": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "llava-v1-mistral-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "llava-next-video-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "llava-onevision-qwen2-ov-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "longformer-4096": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- }
- }
- },
- "long-t5-local": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
- }
- }
- },
- "luke": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.luke.tokenization_luke.LukeTokenizer"
- }
- }
- },
- "lxmert-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "m": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.m2m_100.tokenization_m2m_100.M2M100Tokenizer"
- }
- }
- },
- "mamba": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
- }
- }
- },
- "mamba2": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
- }
- }
- },
- "opus-mt-en-de": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.marian.tokenization_marian.MarianTokenizer"
- }
- }
- },
- "mbart-cc25": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.mbart.tokenization_mbart.MBartTokenizer"
- }
- }
- },
- "megatron-bert-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "metaclip-2-worldwide-huge-quickgelu": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer"
- }
- }
- },
- "mgp-str": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.mgp_str.tokenization_mgp_str.MgpstrTokenizer"
- }
- }
- },
- "max-text-01-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "stral-3-2512": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "mistral-v0": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "mistral-3-2503": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "mixtral-8x": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "llama-3-vision": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "mm-grounding-dino-o365v1-goldg-v3det": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "mobilebert-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "modernbert": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "moonshine": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "hf-moshiko": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "mpnet": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.mpnet.tokenization_mpnet.MPNetTokenizer"
- }
- }
- },
- "mpt": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
- }
- }
- },
- "mra-512-4": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- }
- }
- },
- "mt5": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
- }
- }
- },
- "musicgen": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
- }
- }
- },
- "musicgen-melody": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
- }
- }
- },
- "mvp": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- }
- }
- },
- "nemotron-3-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "nllb-moe": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.nllb.tokenization_nllb.NllbTokenizer"
- }
- }
- },
- "nystromformer-512": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.albert.tokenization_albert.AlbertTokenizer"
- }
- }
- },
- "olmo-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
- }
- }
- },
- "olmo2-1124-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
- }
- }
- },
- "olmo-3-0725": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "olmoe-0924": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
- }
- }
- },
- "omdet-turbo-swin-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
- }
- }
- },
- "oneformer-ade-swin": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
- }
- }
- },
- "openai-gpt": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.openai.tokenization_openai.OpenAIGPTTokenizer"
- }
- }
- },
- "opt": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "ovis2-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
- }
- }
- },
- "owlv2-patch16": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
- }
- }
- },
- "owlvit-patch32": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
- }
- }
- },
- "paligemma": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "pegasus": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.pegasus.tokenization_pegasus.PegasusTokenizer"
- }
- }
- },
- "pegasus-x": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.pegasus.tokenization_pegasus.PegasusTokenizer"
- }
- }
- },
- "language-perceiver": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.perceiver.tokenization_perceiver.PerceiverTokenizer"
- }
- }
- },
- "persimmon": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "phi-1": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "phi-3": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "phi-3-moe": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "pixtral": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "plbart": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.plbart.tokenization_plbart.PLBartTokenizer"
- }
- }
- },
- "phetnet-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.prophetnet.tokenization_prophetnet.ProphetNetTokenizer"
- }
- }
- },
- "qwen2": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
- }
- }
- },
- "qwen2-vl": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
- }
- }
- },
- "qwen15-moe-a": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
- }
- }
- },
- "qwen3": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
- }
- }
- },
- "qwen3-a": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
- }
- }
- },
- "qwen3-next-a": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
- }
- }
- },
- "qwen3-vl": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
- }
- }
- },
- "qwen3-vl-a": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.qwen2.tokenization_qwen2.Qwen2Tokenizer"
- }
- }
- },
- "recurrentgemma": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
- }
- }
- },
- "reformer-crime-and-punishment": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.reformer.tokenization_reformer.ReformerTokenizer"
- }
- }
- },
- "rembert": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.rembert.tokenization_rembert.RemBertTokenizer"
- }
- }
- },
- "roberta": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- }
- }
- },
- "efficient-mlm-m0-0": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roberta.tokenization_roberta.RobertaTokenizer"
- }
- }
- },
- "roc-bert-zh": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roc_bert.tokenization_roc_bert.RoCBertTokenizer"
- }
- }
- },
- "roformer-chinese": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.roformer.tokenization_roformer.RoFormerTokenizer"
- }
- }
- },
- "rwkv-4-pile": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
- }
- }
- },
- "hf-seamless-m4t": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.seamless_m4t.tokenization_seamless_m4t.SeamlessM4TTokenizer"
- }
- }
- },
- "seamless-m4t-v2": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.seamless_m4t.tokenization_seamless_m4t.SeamlessM4TTokenizer"
- }
- }
- },
- "siglip-patch16-224": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.siglip.tokenization_siglip.SiglipTokenizer"
- }
- }
- },
- "siglip2-patch16-224": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
- }
- }
- },
- "smollm3": {
- "pkg": {
- "0": {
- "transformers": "transformers.tokenization_utils_tokenizers.TokenizersBackend"
- }
- }
- },
- "s2t-librispeech-asr": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.speech_to_text.tokenization_speech_to_text.Speech2TextTokenizer"
- }
- }
- },
- "speecht5-asr": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.speecht5.tokenization_speecht5.SpeechT5Tokenizer"
- }
- }
- },
- "splinter": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.splinter.tokenization_splinter.SplinterTokenizer"
- }
- }
- },
- "squeezebert-uncased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "stablelm-4e1t": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
- }
- }
- },
- "starcoder2": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer"
- }
- }
- },
- "switch-8": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
- }
- }
- },
- "t5": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
- }
- }
- },
- "t5gemma-prefixlm": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gemma.tokenization_gemma.GemmaTokenizer"
- }
- }
- },
- "tapas-finetuned-sqa": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.tapas.tokenization_tapas.TapasTokenizer"
- }
- }
- },
- "tvp": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "udop": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.udop.tokenization_udop.UdopTokenizer"
- }
- }
- },
- "umt5": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.t5.tokenization_t5.T5Tokenizer"
- }
- }
- },
- "video-llava-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "vilt-b32-mlm": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "vip-llava-hf": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "visualbert-vqa-coco-pre": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.bert.tokenization_bert.BertTokenizer"
- }
- }
- },
- "mms-tts-eng": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.vits.tokenization_vits.VitsTokenizer"
- }
- }
- },
- "voxtral-2507": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "wav2vec2-960h": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer"
- }
- }
- },
- "wav2vec2-bert-rel-pos": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer"
- }
- }
- },
- "wav2vec2-conformer-rel-pos": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer"
- }
- }
- },
- "whisper": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.whisper.tokenization_whisper.WhisperTokenizer"
- }
- }
- },
- "xclip-patch32": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.clip.tokenization_clip.CLIPTokenizer"
- }
- }
- },
- "xglm": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.xglm.tokenization_xglm.XGLMTokenizer"
- }
- }
- },
- "xlm-mlm-en-2048": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.xlm.tokenization_xlm.XLMTokenizer"
- }
- }
- },
- "xlm-roberta": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer"
- }
- }
- },
- "xlm-roberta-xl": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer"
- }
- }
- },
- "xlnet-cased": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer"
- }
- }
- },
- "xlstm": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.gpt_neox.tokenization_gpt_neox.GPTNeoXTokenizer"
- }
- }
- },
- "xmod": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.xlm_roberta.tokenization_xlm_roberta.XLMRobertaTokenizer"
- }
- }
- },
- "yoso-4096": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.albert.tokenization_albert.AlbertTokenizer"
- }
- }
- },
- "zamba-v1": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- },
- "zamba2": {
- "pkg": {
- "0": {
- "transformers": "transformers.models.llama.tokenization_llama.LlamaTokenizer"
- }
- }
- }
- },
- "info.vit.aimv2-patch14-224-lit": {
- "*": {
- "repo": "apple/aimv2-large-patch14-224-lit",
- "pkg": {
- "0": {
- "transformers": "Aimv2Model"
- }
- },
- "tasks": [
- "Aimv2VisionModel",
- "Aimv2Model",
- "Aimv2PreTrainedModel",
- "Aimv2TextModel"
- ]
- }
- },
- "info.vit.aimv2-patch14-224": {
- "*": {
- "repo": "apple/aimv2-large-patch14-224",
- "pkg": {
- "0": {
- "transformers": "Aimv2VisionModel"
- }
- },
- "tasks": [
- "Aimv2VisionModel",
- "Aimv2Model",
- "Aimv2PreTrainedModel",
- "Aimv2TextModel"
- ]
- }
- },
- "info.art.albert-xx-v2": {
- "*": {
- "repo": "albert/albert-xxlarge-v2",
- "pkg": {
- "0": {
- "transformers": "AlbertModel"
- }
- },
- "tasks": [
- "AlbertPreTrainedModel",
- "AlbertModel",
- "AlbertForPreTraining",
- "AlbertForMaskedLM",
- "AlbertForSequenceClassification",
- "AlbertForTokenClassification",
- "AlbertForQuestionAnswering",
- "AlbertForMultipleChoice"
- ]
- }
- },
- "info.vit.align": {
- "*": {
- "repo": "kakaobrain/align-base",
- "pkg": {
- "0": {
- "transformers": "AlignModel"
- }
- },
- "tasks": [
- "AlignPreTrainedModel",
- "AlignTextModel",
- "AlignVisionModel",
- "AlignModel"
- ]
- }
- },
- "info.vit.altclip": {
- "*": {
- "repo": "BAAI/AltCLIP",
- "pkg": {
- "0": {
- "transformers": "AltCLIPModel"
- }
- },
- "tasks": [
- "AltCLIPPreTrainedModel",
- "AltCLIPVisionModel",
- "AltCLIPTextModel",
- "AltCLIPModel"
- ]
- }
- },
- "info.stst.apertus": {
- "*": {
- "repo": "swiss-ai/Apertus-8B",
- "pkg": {
- "0": {
- "transformers": "ApertusModel"
- }
- },
- "tasks": [
- "ApertusModel",
- "ApertusForCausalLM",
- "ApertusForTokenClassification",
- "ApertusPreTrainedModel"
- ]
- }
- },
- "info.stst.afm": {
- "*": {
- "repo": "arcee-ai/AFM-4.5B",
- "pkg": {
- "0": {
- "transformers": "ArceeModel"
- }
- },
- "tasks": [
- "ArceeForCausalLM",
- "ArceeForQuestionAnswering",
- "ArceeForSequenceClassification",
- "ArceeForTokenClassification",
- "ArceeModel",
- "ArceePreTrainedModel"
- ]
- }
- },
- "info.vit.aria": {
- "*": {
- "repo": "rhymes-ai/Aria",
- "pkg": {
- "0": {
- "transformers": "AriaModel"
- }
- },
- "tasks": [
- "AriaForConditionalGeneration",
- "AriaPreTrainedModel",
- "AriaTextPreTrainedModel",
- "AriaTextModel",
- "AriaModel",
- "AriaTextForCausalLM"
- ]
- }
- },
- "info.vit.ast-finetuned-audioset-10-10-0593": {
- "*": {
- "repo": "MIT/ast-finetuned-audioset-10-10-0.4593",
- "pkg": {
- "0": {
- "transformers": "ASTModel"
- }
- },
- "tasks": [
- "ASTForAudioClassification",
- "ASTModel",
- "ASTPreTrainedModel"
- ]
- }
- },
- "info.stst.audio-flamingo-3-hf": {
- "*": {
- "repo": "nvidia/audio-flamingo-3-hf",
- "pkg": {
- "0": {
- "transformers": "AudioFlamingo3ForConditionalGeneration"
- }
- },
- "tasks": [
- "AudioFlamingo3ForConditionalGeneration",
- "AudioFlamingo3PreTrainedModel",
- "AudioFlamingo3Encoder"
- ]
- }
- },
- "info.aet.audio-flamingo-3-hf": {
- "*": {
- "repo": "nvidia/audio-flamingo-3-hf",
- "pkg": {
- "0": {
- "transformers": "AudioFlamingo3Encoder"
- }
- },
- "tasks": [
- "AudioFlamingo3ForConditionalGeneration",
- "AudioFlamingo3PreTrainedModel",
- "AudioFlamingo3Encoder"
- ]
- }
- },
- "info.stst.autoformer-tourism-monthly": {
- "*": {
- "repo": "huggingface/autoformer-tourism-monthly",
- "pkg": {
- "0": {
- "transformers": "AutoformerModel"
- }
- },
- "tasks": [
- "AutoformerForPrediction",
- "AutoformerModel",
- "AutoformerPreTrainedModel"
- ]
- }
- },
- "info.vit.aya-vision": {
- "*": {
- "repo": "CohereForAI/aya-vision-8b",
- "pkg": {
- "0": {
- "transformers": "AyaVisionModel"
- }
- },
- "tasks": [
- "AyaVisionForConditionalGeneration",
- "AyaVisionPreTrainedModel",
- "AyaVisionModel"
- ]
- }
- },
- "info.ssm.bamba-t-hf": {
- "*": {
- "repo": "ibm-fms/Bamba-9.8b-2.2T-hf",
- "pkg": {
- "0": {
- "transformers": "BambaModel"
- }
- },
- "tasks": [
- "BambaModel",
- "BambaForCausalLM",
- "BambaPreTrainedModel"
- ]
- }
- },
- "info.art.bark": {
- "*": {
- "repo": "suno/bark",
- "pkg": {
- "0": {
- "transformers": "BarkModel"
- }
- },
- "tasks": [
- "BarkFineModel",
- "BarkSemanticModel",
- "BarkCoarseModel",
- "BarkModel",
- "BarkPreTrainedModel",
- "BarkCausalModel"
- ]
- }
- },
- "info.stst.bart": {
- "*": {
- "repo": "facebook/bart-large",
- "pkg": {
- "0": {
- "transformers": "BartModel"
- }
- },
- "tasks": [
- "BartForCausalLM",
- "BartForConditionalGeneration",
- "BartForQuestionAnswering",
- "BartForSequenceClassification",
- "BartModel",
- "BartPreTrainedModel",
- "BartPretrainedModel",
- "PretrainedBartModel"
- ]
- }
- },
- "info.vit.beit-patch16-224-pt": {
- "*": {
- "repo": "microsoft/beit-base-patch16-224-pt22k",
- "pkg": {
- "0": {
- "transformers": "BeitModel"
- }
- },
- "tasks": [
- "BeitForImageClassification",
- "BeitForMaskedImageModeling",
- "BeitForSemanticSegmentation",
- "BeitModel",
- "BeitPreTrainedModel",
- "BeitBackbone"
- ]
- }
- },
- "info.art.bert-uncased": {
- "*": {
- "repo": "google-bert/bert-base-uncased",
- "pkg": {
- "0": {
- "transformers": "BertModel"
- }
- },
- "file_256": [
- "c6c6348af2cb4d5852fe51102ce39605903dbe7925c005cf8995506cc21ea914"
- ],
- "layer_b3": [
- "30d7d2cc3ec9e4ba45844e005d0bbcb5887b6a0976042f73da916237dc5c4c12"
- ],
- "layer_256": [
- "94fd2508680ff684eff57e4a5a8ca46bf338fc356a9cf6fe8db2b84543dd7971"
- ],
- "tasks": [
- "BertForMaskedLM",
- "BertForMultipleChoice",
- "BertForNextSentencePrediction",
- "BertForPreTraining",
- "BertForQuestionAnswering",
- "BertForSequenceClassification",
- "BertForTokenClassification",
- "BertLayer",
- "BertLMHeadModel",
- "BertModel",
- "BertPreTrainedModel"
- ]
- }
- },
- "info.art.bert-for-seq-generation-l-24-bbc-encoder": {
- "*": {
- "repo": "google/bert_for_seq_generation_L-24_bbc_encoder",
- "pkg": {
- "0": {
- "transformers": "BertGenerationEncoder"
- }
- },
- "tasks": [
- "BertGenerationDecoder",
- "BertGenerationEncoder",
- "BertGenerationPreTrainedModel"
- ]
- }
- },
- "info.art.bigbird-roberta": {
- "*": {
- "repo": "google/bigbird-roberta-base",
- "pkg": {
- "0": {
- "transformers": "BigBirdModel"
- }
- },
- "tasks": [
- "BigBirdForCausalLM",
- "BigBirdForMaskedLM",
- "BigBirdForMultipleChoice",
- "BigBirdForPreTraining",
- "BigBirdForQuestionAnswering",
- "BigBirdForSequenceClassification",
- "BigBirdForTokenClassification",
- "BigBirdLayer",
- "BigBirdModel",
- "BigBirdPreTrainedModel"
- ]
- }
- },
- "info.stst.bigbird-pegasus-arxiv": {
- "*": {
- "repo": "google/bigbird-pegasus-large-arxiv",
- "pkg": {
- "0": {
- "transformers": "BigBirdPegasusModel"
- }
- },
- "tasks": [
- "BigBirdPegasusForCausalLM",
- "BigBirdPegasusForConditionalGeneration",
- "BigBirdPegasusForQuestionAnswering",
- "BigBirdPegasusForSequenceClassification",
- "BigBirdPegasusModel",
- "BigBirdPegasusPreTrainedModel"
- ]
- }
- },
- "info.art.biogpt": {
- "*": {
- "repo": "microsoft/biogpt",
- "pkg": {
- "0": {
- "transformers": "BioGptModel"
- }
- },
- "tasks": [
- "BioGptForCausalLM",
- "BioGptForTokenClassification",
- "BioGptForSequenceClassification",
- "BioGptModel",
- "BioGptPreTrainedModel"
- ]
- }
- },
- "info.vit.bit-50": {
- "*": {
- "repo": "google/bit-50",
- "pkg": {
- "0": {
- "transformers": "BitModel"
- }
- },
- "tasks": [
- "BitForImageClassification",
- "BitModel",
- "BitPreTrainedModel",
- "BitBackbone"
- ]
- }
- },
- "info.stst.bitnet-b18-4t": {
- "*": {
- "repo": "microsoft/bitnet-b1.58-2B-4T",
- "pkg": {
- "0": {
- "transformers": "BitNetModel"
- }
- },
- "tasks": [
- "BitNetForCausalLM",
- "BitNetModel",
- "BitNetPreTrainedModel"
- ]
- }
- },
- "info.stst.blenderbot": {
- "*": {
- "repo": "facebook/blenderbot-3B",
- "pkg": {
- "0": {
- "transformers": "BlenderbotModel"
- }
- },
- "tasks": [
- "BlenderbotForCausalLM",
- "BlenderbotForConditionalGeneration",
- "BlenderbotModel",
- "BlenderbotPreTrainedModel"
- ]
- }
- },
- "info.vit.blip-vqa": {
- "*": {
- "repo": "Salesforce/blip-vqa-base",
- "pkg": {
- "0": {
- "transformers": "BlipModel"
- }
- },
- "tasks": [
- "BlipModel",
- "BlipPreTrainedModel",
- "BlipForConditionalGeneration",
- "BlipForQuestionAnswering",
- "BlipVisionModel",
- "BlipTextModel",
- "BlipForImageTextRetrieval"
- ]
- }
- },
- "info.vit.blip2-opt": {
- "*": {
- "repo": "Salesforce/blip2-opt-2.7b",
- "pkg": {
- "0": {
- "transformers": "Blip2Model"
- }
- },
- "tasks": [
- "Blip2Model",
- "Blip2VisionModelWithProjection",
- "Blip2QFormerModel",
- "Blip2PreTrainedModel",
- "Blip2ForConditionalGeneration",
- "Blip2ForImageTextRetrieval",
- "Blip2VisionModel",
- "Blip2TextModelWithProjection"
- ]
- }
- },
- "info.stst.blip2-opt": {
- "*": {
- "repo": "Salesforce/blip2-opt-2.7b",
- "pkg": {
- "0": {
- "transformers": "Blip2QFormerModel"
- }
- },
- "tasks": [
- "Blip2Model",
- "Blip2VisionModelWithProjection",
- "Blip2QFormerModel",
- "Blip2PreTrainedModel",
- "Blip2ForConditionalGeneration",
- "Blip2ForImageTextRetrieval",
- "Blip2VisionModel",
- "Blip2TextModelWithProjection"
- ]
- }
- },
- "info.art.bloom": {
- "*": {
- "repo": "bigscience/bloom",
- "pkg": {
- "0": {
- "transformers": "BloomModel"
- }
- },
- "tasks": [
- "BloomForCausalLM",
- "BloomModel",
- "BloomPreTrainedModel",
- "BloomForSequenceClassification",
- "BloomForTokenClassification",
- "BloomForQuestionAnswering"
- ]
- }
- },
- "info.vit.blt": {
- "*": {
- "repo": "facebook/blt",
- "pkg": {
- "0": {
- "transformers": "BltModel"
- }
- },
- "tasks": [
- "BltPreTrainedModel",
- "BltModel",
- "BltPatcher",
- "BltForCausalLM"
- ]
- }
- },
- "info.vit.bridgetower": {
- "*": {
- "repo": "BridgeTower/bridgetower-base",
- "pkg": {
- "0": {
- "transformers": "BridgeTowerModel"
- }
- },
- "tasks": [
- "BridgeTowerForContrastiveLearning",
- "BridgeTowerForImageAndTextRetrieval",
- "BridgeTowerForMaskedLM",
- "BridgeTowerModel",
- "BridgeTowerPreTrainedModel"
- ]
- }
- },
- "info.art.bros-uncased": {
- "*": {
- "repo": "jinho8345/bros-base-uncased",
- "pkg": {
- "0": {
- "transformers": "BrosModel"
- }
- },
- "tasks": [
- "BrosPreTrainedModel",
- "BrosModel",
- "BrosForTokenClassification",
- "BrosSpadeEEForTokenClassification",
- "BrosSpadeELForTokenClassification"
- ]
- }
- },
- "info.art.camembert": {
- "*": {
- "repo": "almanach/camembert-base",
- "pkg": {
- "0": {
- "transformers": "CamembertModel"
- }
- },
- "tasks": [
- "CamembertForCausalLM",
- "CamembertForMaskedLM",
- "CamembertForMultipleChoice",
- "CamembertForQuestionAnswering",
- "CamembertForSequenceClassification",
- "CamembertForTokenClassification",
- "CamembertModel",
- "CamembertPreTrainedModel"
- ]
- }
- },
- "info.art.canine-s": {
- "*": {
- "repo": "google/canine-s",
- "pkg": {
- "0": {
- "transformers": "CanineModel"
- }
- },
- "tasks": [
- "CanineForMultipleChoice",
- "CanineForQuestionAnswering",
- "CanineForSequenceClassification",
- "CanineForTokenClassification",
- "CanineLayer",
- "CanineModel",
- "CaninePreTrainedModel"
- ]
- }
- },
- "info.stst.chameleon": {
- "*": {
- "repo": "meta/chameleon-7B",
- "pkg": {
- "0": {
- "transformers": "ChameleonModel"
- }
- },
- "tasks": [
- "ChameleonForConditionalGeneration",
- "ChameleonModel",
- "ChameleonPreTrainedModel",
- "ChameleonVQVAE"
- ]
- }
- },
- "info.vit.chinese-clip-vit-patch16": {
- "*": {
- "repo": "OFA-Sys/chinese-clip-vit-base-patch16",
- "pkg": {
- "0": {
- "transformers": "ChineseCLIPModel"
- }
- },
- "tasks": [
- "ChineseCLIPModel",
- "ChineseCLIPPreTrainedModel",
- "ChineseCLIPTextModel",
- "ChineseCLIPVisionModel"
- ]
- }
- },
- "info.vit.clap-htsat-fused": {
- "*": {
- "repo": "laion/clap-htsat-fused",
- "pkg": {
- "0": {
- "transformers": "ClapModel"
- }
- },
- "file_256": [
- "c92b5a2bee69ff5dd05820d9e0a5cddbc9c9b9dd19a6cb3214f0cf4f29a4d1b0",
- "ae69f555e7f1a2333b8e684c9fa8233f44a47bbadf76d484f941b74f74d2753d"
- ],
- "layer_b3": [
- "a4d26450ac399d51b9abbe37859615bb02a5cbf63521da4c7cdc549d04a2872c",
- "ddf310d8eb2d4e3f61e605978675a9d3a748cad9406b9aee8335eae013e77573"
- ],
- "layer_256": [
- "843ba86000971d6067bfc4f3ed6dd01bd6f6726188aaa15d86b05554f4fe8481",
- "27529e30442d030a28badf9d62710f4b74e38e9c4424ed169c7e0ac072f5a771"
- ],
- "tasks": [
- "ClapModel",
- "ClapPreTrainedModel",
- "ClapTextModel",
- "ClapTextModelWithProjection",
- "ClapAudioModel",
- "ClapAudioModelWithProjection"
- ]
- }
- },
- "info.vit.clip-vit-patch32": {
- "*": {
- "repo": "openai/clip-vit-base-patch32",
- "pkg": {
- "0": {
- "transformers": "CLIPModel"
- }
- },
- "tasks": [
- "CLIPModel",
- "CLIPPreTrainedModel",
- "CLIPTextModel",
- "CLIPTextModelWithProjection",
- "CLIPVisionModel",
- "CLIPVisionModelWithProjection",
- "CLIPForImageClassification"
- ]
- }
- },
- "info.vit.clipseg-rd64": {
- "*": {
- "repo": "CIDAS/clipseg-rd64",
- "pkg": {
- "0": {
- "transformers": "CLIPSegModel"
- }
- },
- "tasks": [
- "CLIPSegModel",
- "CLIPSegPreTrainedModel",
- "CLIPSegTextModel",
- "CLIPSegVisionModel",
- "CLIPSegForImageSegmentation"
- ]
- }
- },
- "info.vit.clvp-dev": {
- "*": {
- "repo": "susnato/clvp_dev",
- "pkg": {
- "0": {
- "transformers": "ClvpModelForConditionalGeneration"
- }
- },
- "tasks": [
- "ClvpModelForConditionalGeneration",
- "ClvpForCausalLM",
- "ClvpModel",
- "ClvpPreTrainedModel",
- "ClvpEncoder",
- "ClvpDecoder"
- ]
- }
- },
- "info.stst.llama-2-hf": {
- "*": {
- "repo": "meta-llama/Llama-2-7b-hf",
- "pkg": {
- "0": {
- "transformers": "LlamaModel"
- }
- },
- "tasks": [
- "LlamaForCausalLM",
- "LlamaModel",
- "LlamaPreTrainedModel",
- "LlamaForSequenceClassification",
- "LlamaForQuestionAnswering",
- "LlamaForTokenClassification"
- ]
- }
- },
- "info.art.codegen-mono": {
- "*": {
- "repo": "Salesforce/codegen-2B-mono",
- "pkg": {
- "0": {
- "transformers": "CodeGenModel"
- }
- },
- "tasks": [
- "CodeGenForCausalLM",
- "CodeGenModel",
- "CodeGenPreTrainedModel"
- ]
- }
- },
- "info.stst.c4ai-command-r-v01": {
- "*": {
- "repo": "CohereForAI/c4ai-command-r-v01",
- "pkg": {
- "0": {
- "transformers": "CohereModel"
- }
- },
- "tasks": [
- "CohereForCausalLM",
- "CohereModel",
- "CoherePreTrainedModel"
- ]
- }
- },
- "info.vit.command-a-vision-07-2025": {
- "*": {
- "repo": "CohereLabs/command-a-vision-07-2025",
- "pkg": {
- "0": {
- "transformers": "Cohere2VisionModel"
- }
- },
- "tasks": [
- "Cohere2VisionForConditionalGeneration",
- "Cohere2VisionPreTrainedModel",
- "Cohere2VisionModel"
- ]
- }
- },
- "info.detr.conditional-detr-resnet-50": {
- "*": {
- "repo": "microsoft/conditional-detr-resnet-50",
- "pkg": {
- "0": {
- "transformers": "ConditionalDetrModel"
- }
- },
- "tasks": [
- "ConditionalDetrForObjectDetection",
- "ConditionalDetrForSegmentation",
- "ConditionalDetrModel",
- "ConditionalDetrPreTrainedModel"
- ]
- }
- },
- "info.art.conv-bert": {
- "*": {
- "repo": "YituTech/conv-bert-base",
- "pkg": {
- "0": {
- "transformers": "ConvBertModel"
- }
- },
- "tasks": [
- "ConvBertForMaskedLM",
- "ConvBertForMultipleChoice",
- "ConvBertForQuestionAnswering",
- "ConvBertForSequenceClassification",
- "ConvBertForTokenClassification",
- "ConvBertLayer",
- "ConvBertModel",
- "ConvBertPreTrainedModel"
- ]
- }
- },
- "info.vit.convnext-224": {
- "*": {
- "repo": "facebook/convnext-tiny-224",
- "pkg": {
- "0": {
- "transformers": "ConvNextModel"
- }
- },
- "tasks": [
- "ConvNextForImageClassification",
- "ConvNextModel",
- "ConvNextPreTrainedModel",
- "ConvNextBackbone"
- ]
- }
- },
- "info.vit.convnextv2-224": {
- "*": {
- "repo": "facebook/convnextv2-tiny-1k-224",
- "pkg": {
- "0": {
- "transformers": "ConvNextV2Model"
- }
- },
- "tasks": [
- "ConvNextV2ForImageClassification",
- "ConvNextV2Model",
- "ConvNextV2PreTrainedModel",
- "ConvNextV2Backbone"
- ]
- }
- },
- "info.stst.cpm-ant": {
- "*": {
- "repo": "openbmb/cpm-ant-10b",
- "pkg": {
- "0": {
- "transformers": "CpmAntModel"
- }
- },
- "tasks": [
- "CpmAntForCausalLM",
- "CpmAntModel",
- "CpmAntPreTrainedModel"
- ]
- }
- },
- "info.stst.csm": {
- "*": {
- "repo": "sesame/csm-1b",
- "pkg": {
- "0": {
- "transformers": "CsmForConditionalGeneration"
- }
- },
- "tasks": [
- "CsmPreTrainedModel",
- "CsmBackboneModel",
- "CsmDepthDecoderModel",
- "CsmDepthDecoderForCausalLM",
- "CsmForConditionalGeneration"
- ]
- }
- },
- "info.art.ctrl": {
- "*": {
- "repo": "Salesforce/ctrl",
- "pkg": {
- "0": {
- "transformers": "CTRLModel"
- }
- },
- "tasks": [
- "CTRLForSequenceClassification",
- "CTRLLMHeadModel",
- "CTRLModel",
- "CTRLPreTrainedModel"
- ]
- }
- },
- "info.vit.cvt-13": {
- "*": {
- "repo": "microsoft/cvt-13",
- "pkg": {
- "0": {
- "transformers": "CvtModel"
- }
- },
- "tasks": [
- "CvtForImageClassification",
- "CvtModel",
- "CvtPreTrainedModel"
- ]
- }
- },
- "info.art.cwm": {
- "*": {
- "repo": "facebook/cwm",
- "pkg": {
- "0": {
- "transformers": "CwmModel"
- }
- },
- "tasks": [
- "CwmPreTrainedModel",
- "CwmModel",
- "CwmForCausalLM"
- ]
- }
- },
- "info.detr.dfine-x-coco": {
- "*": {
- "repo": "ustc-community/dfine-xlarge-coco",
- "pkg": {
- "0": {
- "transformers": "DFineModel"
- }
- },
- "tasks": [
- "DFineModel",
- "DFinePreTrainedModel",
- "DFineForObjectDetection"
- ]
- }
- },
- "info.detr.dab-detr": {
- "*": {
- "repo": "IDEA-Research/dab-detr-resnet-50",
- "pkg": {
- "0": {
- "transformers": "DabDetrModel"
- }
- },
- "tasks": [
- "DabDetrForObjectDetection",
- "DabDetrModel",
- "DabDetrPreTrainedModel"
- ]
- }
- },
- "info.gan.dac": {
- "*": {
- "repo": "descript/dac_16khz",
- "pkg": {
- "0": {
- "transformers": "DacModel"
- }
- },
- "tasks": [
- "DacModel",
- "DacPreTrainedModel"
- ]
- }
- },
- "info.aet.data2vec-audio-960h": {
- "*": {
- "repo": "facebook/data2vec-audio-base-960h",
- "pkg": {
- "0": {
- "transformers": "Data2VecAudioModel"
- }
- },
- "tasks": [
- "Data2VecAudioForAudioFrameClassification",
- "Data2VecAudioForCTC",
- "Data2VecAudioForSequenceClassification",
- "Data2VecAudioForXVector",
- "Data2VecAudioModel",
- "Data2VecAudioPreTrainedModel"
- ]
- }
- },
- "info.art.data2vec-text": {
- "*": {
- "repo": "facebook/data2vec-text-base",
- "pkg": {
- "0": {
- "transformers": "Data2VecTextModel"
- }
- },
- "tasks": [
- "Data2VecTextForCausalLM",
- "Data2VecTextForMaskedLM",
- "Data2VecTextForMultipleChoice",
- "Data2VecTextForQuestionAnswering",
- "Data2VecTextForSequenceClassification",
- "Data2VecTextForTokenClassification",
- "Data2VecTextModel",
- "Data2VecTextPreTrainedModel"
- ]
- }
- },
- "info.vit.data2vec-vision": {
- "*": {
- "repo": "facebook/data2vec-vision-base",
- "pkg": {
- "0": {
- "transformers": "Data2VecVisionModel"
- }
- },
- "tasks": [
- "Data2VecVisionForImageClassification",
- "Data2VecVisionForSemanticSegmentation",
- "Data2VecVisionModel",
- "Data2VecVisionPreTrainedModel"
- ]
- }
- },
- "info.stst.dbrx": {
- "*": {
- "repo": "databricks/dbrx-instruct",
- "pkg": {
- "0": {
- "transformers": "DbrxModel"
- }
- },
- "tasks": [
- "DbrxForCausalLM",
- "DbrxModel",
- "DbrxPreTrainedModel"
- ]
- }
- },
- "info.art.deberta": {
- "*": {
- "repo": "microsoft/deberta-base",
- "pkg": {
- "0": {
- "transformers": "DebertaModel"
- }
- },
- "tasks": [
- "DebertaForMaskedLM",
- "DebertaForQuestionAnswering",
- "DebertaForSequenceClassification",
- "DebertaForTokenClassification",
- "DebertaModel",
- "DebertaPreTrainedModel"
- ]
- }
- },
- "info.art.deberta-v2-x": {
- "*": {
- "repo": "microsoft/deberta-v2-xlarge",
- "pkg": {
- "0": {
- "transformers": "DebertaV2Model"
- }
- },
- "tasks": [
- "DebertaV2ForMaskedLM",
- "DebertaV2ForMultipleChoice",
- "DebertaV2ForQuestionAnswering",
- "DebertaV2ForSequenceClassification",
- "DebertaV2ForTokenClassification",
- "DebertaV2Model",
- "DebertaV2PreTrainedModel"
- ]
- }
- },
- "info.art.decision-transformer-gym-hopper": {
- "*": {
- "repo": "edbeeching/decision-transformer-gym-hopper-medium",
- "pkg": {
- "0": {
- "transformers": "DecisionTransformerModel"
- }
- },
- "tasks": [
- "DecisionTransformerGPT2Model",
- "DecisionTransformerGPT2PreTrainedModel",
- "DecisionTransformerModel",
- "DecisionTransformerPreTrainedModel"
- ]
- }
- },
- "info.moe.deepseek-v2-lite": {
- "*": {
- "repo": "deepseek-ai/DeepSeek-V2-Lite",
- "pkg": {
- "0": {
- "transformers": "DeepseekV2Model"
- }
- },
- "tasks": [
- "DeepseekV2PreTrainedModel",
- "DeepseekV2Model",
- "DeepseekV2ForCausalLM",
- "DeepseekV2ForSequenceClassification"
- ]
- }
- },
- "info.moe.deepseek-v3": {
- "*": {
- "repo": "bzantium/tiny-deepseek-v3",
- "pkg": {
- "0": {
- "transformers": "DeepseekV3Model"
- }
- },
- "tasks": [
- "DeepseekV3PreTrainedModel",
- "DeepseekV3Model",
- "DeepseekV3ForCausalLM",
- "DeepseekV3ForSequenceClassification",
- "DeepseekV3ForTokenClassification"
- ]
- }
- },
- "info.vit.deepseek-vl-chat": {
- "*": {
- "repo": "deepseek-community/deepseek-vl-1.3b-chat",
- "pkg": {
- "0": {
- "transformers": "DeepseekVLModel"
- }
- },
- "tasks": [
- "DeepseekVLPreTrainedModel",
- "DeepseekVLModel",
- "DeepseekVLForConditionalGeneration"
- ]
- }
- },
- "info.detr.deformable-detr": {
- "*": {
- "repo": "SenseTime/deformable-detr",
- "pkg": {
- "0": {
- "transformers": "DeformableDetrModel"
- }
- },
- "tasks": [
- "DeformableDetrForObjectDetection",
- "DeformableDetrModel",
- "DeformableDetrPreTrainedModel"
- ]
- }
- },
- "info.vit.deit-distilled-patch16-224": {
- "*": {
- "repo": "facebook/deit-base-distilled-patch16-224",
- "pkg": {
- "0": {
- "transformers": "DeiTModel"
- }
- },
- "tasks": [
- "DeiTForImageClassification",
- "DeiTForImageClassificationWithTeacher",
- "DeiTForMaskedImageModeling",
- "DeiTModel",
- "DeiTPreTrainedModel"
- ]
- }
- },
- "info.vit.depth": {
- "*": {
- "repo": "apple/DepthPro",
- "pkg": {
- "0": {
- "transformers": "DepthProModel"
- }
- },
- "tasks": [
- "DepthProPreTrainedModel",
- "DepthProModel",
- "DepthProForDepthEstimation"
- ]
- }
- },
- "info.detr.detr-resnet-50": {
- "*": {
- "repo": "facebook/detr-resnet-50",
- "pkg": {
- "0": {
- "transformers": "DetrModel"
- }
- },
- "tasks": [
- "DetrForObjectDetection",
- "DetrForSegmentation",
- "DetrModel",
- "DetrPreTrainedModel"
- ]
- }
- },
- "info.stst.dia": {
- "*": {
- "repo": "nari-labs/Dia-1.6B",
- "pkg": {
- "0": {
- "transformers": "DiaModel"
- }
- },
- "tasks": [
- "DiaModel",
- "DiaPreTrainedModel",
- "DiaForConditionalGeneration"
- ]
- }
- },
- "info.stst.diffllama-handcut": {
- "*": {
- "repo": "kajuma/DiffLlama-0.3B-handcut",
- "pkg": {
- "0": {
- "transformers": "DiffLlamaModel"
- }
- },
- "tasks": [
- "DiffLlamaPreTrainedModel",
- "DiffLlamaModel",
- "DiffLlamaForCausalLM",
- "DiffLlamaForSequenceClassification",
- "DiffLlamaForQuestionAnswering",
- "DiffLlamaForTokenClassification"
- ]
- }
- },
- "info.gan.dinat-in-224": {
- "*": {
- "repo": "shi-labs/dinat-mini-in1k-224",
- "pkg": {
- "0": {
- "transformers": "DinatModel"
- }
- },
- "tasks": [
- "DinatForImageClassification",
- "DinatModel",
- "DinatPreTrainedModel",
- "DinatBackbone"
- ]
- }
- },
- "info.vit.dinov2-patch16-224": {
- "*": {
- "repo": "google/dinov2-base-patch16-224",
- "pkg": {
- "0": {
- "transformers": "Dinov2Model"
- }
- },
- "tasks": [
- "Dinov2ForImageClassification",
- "Dinov2Model",
- "Dinov2PreTrainedModel",
- "Dinov2Backbone"
- ]
- }
- },
- "info.vit.dinov2-with-registers": {
- "*": {
- "repo": "facebook/dinov2-with-registers-base",
- "pkg": {
- "0": {
- "transformers": "Dinov2WithRegistersModel"
- }
- },
- "tasks": [
- "Dinov2WithRegistersPreTrainedModel",
- "Dinov2WithRegistersModel",
- "Dinov2WithRegistersForImageClassification",
- "Dinov2WithRegistersBackbone"
- ]
- }
- },
- "info.vit.dinov3-convnext-pretrain-lvd": {
- "*": {
- "repo": "facebook/dinov3-convnext-tiny-pretrain-lvd1689m",
- "pkg": {
- "0": {
- "transformers": "DINOv3ConvNextModel"
- }
- },
- "tasks": [
- "DINOv3ConvNextModel",
- "DINOv3ConvNextPreTrainedModel",
- "DINOv3ConvNextBackbone"
- ]
- }
- },
- "info.vit.dinov3-vits16-pretrain-lvd": {
- "*": {
- "repo": "facebook/dinov3-vits16-pretrain-lvd1689m",
- "pkg": {
- "0": {
- "transformers": "DINOv3ViTModel"
- }
- },
- "tasks": [
- "DINOv3ViTModel",
- "DINOv3ViTPreTrainedModel",
- "DINOv3ViTBackbone"
- ]
- }
- },
- "info.art.distilbert-uncased": {
- "*": {
- "repo": "distilbert-base-uncased",
- "pkg": {
- "0": {
- "transformers": "DistilBertModel"
- }
- },
- "tasks": [
- "DistilBertForMaskedLM",
- "DistilBertForMultipleChoice",
- "DistilBertForQuestionAnswering",
- "DistilBertForSequenceClassification",
- "DistilBertForTokenClassification",
- "DistilBertModel",
- "DistilBertPreTrainedModel"
- ]
- }
- },
- "info.moe.doge": {
- "*": {
- "repo": "SmallDoge/Doge-320M",
- "pkg": {
- "0": {
- "transformers": "DogeModel"
- }
- },
- "tasks": [
- "DogeForCausalLM",
- "DogeModel",
- "DogePreTrainedModel",
- "DogeForSequenceClassification"
- ]
- }
- },
- "info.vit.donut": {
- "*": {
- "repo": "naver-clova-ix/donut-base",
- "pkg": {
- "0": {
- "transformers": "DonutSwinModel"
- }
- },
- "tasks": [
- "DonutSwinModel",
- "DonutSwinPreTrainedModel",
- "DonutSwinForImageClassification"
- ]
- }
- },
- "info.moe.dots-llm1": {
- "*": {
- "repo": "rednote-hilab/dots.llm1.base",
- "pkg": {
- "0": {
- "transformers": "Dots1Model"
- }
- },
- "tasks": [
- "Dots1PreTrainedModel",
- "Dots1Model",
- "Dots1ForCausalLM"
- ]
- }
- },
- "info.vit.dpr-ctx-encoder-single-nq": {
- "*": {
- "repo": "facebook/dpr-ctx_encoder-single-nq-base",
- "pkg": {
- "0": {
- "transformers": "DPRQuestionEncoder"
- }
- },
- "tasks": [
- "DPRContextEncoder",
- "DPRPretrainedContextEncoder",
- "DPRPreTrainedModel",
- "DPRPretrainedQuestionEncoder",
- "DPRPretrainedReader",
- "DPRQuestionEncoder",
- "DPRReader"
- ]
- }
- },
- "info.detr.dpt": {
- "*": {
- "repo": "Intel/dpt-large",
- "pkg": {
- "0": {
- "transformers": "DPTModel"
- }
- },
- "tasks": [
- "DPTForDepthEstimation",
- "DPTForSemanticSegmentation",
- "DPTModel",
- "DPTPreTrainedModel"
- ]
- }
- },
- "info.vit.edgetam1-hiera": {
- "*": {
- "repo": "facebook/edgetam.1-hiera-tiny",
- "pkg": {
- "0": {
- "transformers": "EdgeTamModel"
- }
- },
- "tasks": [
- "EdgeTamModel",
- "EdgeTamVisionModel",
- "EdgeTamPreTrainedModel"
- ]
- }
- },
- "info.vit.edgetam": {
- "*": {
- "repo": "facebook/EdgeTAM",
- "pkg": {
- "0": {
- "transformers": "EdgeTamVideoModel"
- }
- },
- "tasks": [
- "EdgeTamVideoModel",
- "EdgeTamVideoInferenceSession",
- "EdgeTamVideoPreTrainedModel"
- ]
- }
- },
- "info.vit.efficientloftr": {
- "*": {
- "repo": "zju-community/efficientloftr",
- "pkg": {
- "0": {
- "transformers": "EfficientLoFTRModel"
- }
- },
- "tasks": [
- "EfficientLoFTRPreTrainedModel",
- "EfficientLoFTRModel",
- "EfficientLoFTRForKeypointMatching"
- ]
- }
- },
- "info.vit.efficientnet-b7": {
- "*": {
- "repo": "google/efficientnet-b7",
- "pkg": {
- "0": {
- "transformers": "EfficientNetModel"
- }
- },
- "tasks": [
- "EfficientNetForImageClassification",
- "EfficientNetModel",
- "EfficientNetPreTrainedModel"
- ]
- }
- },
- "info.art.electra-discriminator": {
- "*": {
- "repo": "google/electra-small-discriminator",
- "pkg": {
- "0": {
- "transformers": "ElectraModel"
- }
- },
- "tasks": [
- "ElectraForCausalLM",
- "ElectraForMaskedLM",
- "ElectraForMultipleChoice",
- "ElectraForPreTraining",
- "ElectraForQuestionAnswering",
- "ElectraForSequenceClassification",
- "ElectraForTokenClassification",
- "ElectraModel",
- "ElectraPreTrainedModel"
- ]
- }
- },
- "info.art.emu3-chat-hf": {
- "*": {
- "repo": "Emu3-community/Emu3-Chat-hf",
- "pkg": {
- "0": {
- "transformers": "Emu3Model"
- }
- },
- "tasks": [
- "Emu3ForConditionalGeneration",
- "Emu3ForCausalLM",
- "Emu3TextModel",
- "Emu3PreTrainedModel",
- "Emu3VQVAE",
- "Emu3Model"
- ]
- }
- },
- "info.gan.encodec": {
- "*": {
- "repo": "facebook/encodec_24khz",
- "pkg": {
- "0": {
- "transformers": "EncodecModel"
- }
- },
- "tasks": [
- "EncodecModel",
- "EncodecPreTrainedModel"
- ]
- }
- },
- "info.art.ernie-3-zh": {
- "*": {
- "repo": "nghuyong/ernie-3.0-base-zh",
- "pkg": {
- "0": {
- "transformers": "ErnieModel"
- }
- },
- "tasks": [
- "ErnieForCausalLM",
- "ErnieForMaskedLM",
- "ErnieForMultipleChoice",
- "ErnieForNextSentencePrediction",
- "ErnieForPreTraining",
- "ErnieForQuestionAnswering",
- "ErnieForSequenceClassification",
- "ErnieForTokenClassification",
- "ErnieModel",
- "ErniePreTrainedModel"
- ]
- }
- },
- "info.stst.ernie-45-pt": {
- "*": {
- "repo": "baidu/ERNIE-4.5-0.3B-PT",
- "pkg": {
- "0": {
- "transformers": "Ernie4_5Model"
- }
- },
- "tasks": [
- "Ernie4_5ForCausalLM",
- "Ernie4_5Model",
- "Ernie4_5PreTrainedModel"
- ]
- }
- },
- "info.moe.ernie-4-a-pt": {
- "*": {
- "repo": "baidu/ERNIE-4.5-21B-A3B-PT",
- "pkg": {
- "0": {
- "transformers": "Ernie4_5_MoeModel"
- }
- },
- "tasks": [
- "Ernie4_5_MoeForCausalLM",
- "Ernie4_5_MoeModel",
- "Ernie4_5_MoePreTrainedModel"
- ]
- }
- },
- "info.vit.ernie-4-vl-a-pt": {
- "*": {
- "repo": "baidu/ERNIE-4.5-VL-28B-A3B-PT",
- "pkg": {
- "0": {
- "transformers": "Ernie4_5_VL_MoeModel"
- }
- },
- "tasks": [
- "Ernie4_5_VL_MoePreTrainedModel",
- "Ernie4_5_VL_MoeForConditionalGeneration",
- "Ernie4_5_VL_MoeModel",
- "Ernie4_5_VL_MoeTextModel",
- "Ernie4_5_VL_MoeVisionTransformerPretrainedModel",
- "Ernie4_5_VL_MoeVariableResolutionResamplerModel"
- ]
- }
- },
- "info.aet.esm": {
- "*": {
- "repo": "facebook/esm-1b",
- "pkg": {
- "0": {
- "transformers": "EsmModel"
- }
- },
- "tasks": [
- "EsmForMaskedLM",
- "EsmForSequenceClassification",
- "EsmForTokenClassification",
- "EsmModel",
- "EsmPreTrainedModel"
- ]
- }
- },
- "info.stst.evolla-hf": {
- "*": {
- "repo": "westlake-repl/Evolla-10B-hf",
- "pkg": {
- "0": {
- "transformers": "EvollaModel"
- }
- },
- "tasks": [
- "EvollaForProteinText2Text",
- "EvollaModel",
- "EvollaPreTrainedModel"
- ]
- }
- },
- "info.stst.exaone-4": {
- "*": {
- "repo": "LGAI-EXAONE/EXAONE-4.0-32B",
- "pkg": {
- "0": {
- "transformers": "Exaone4Model"
- }
- },
- "tasks": [
- "Exaone4PreTrainedModel",
- "Exaone4Model",
- "Exaone4ForCausalLM",
- "Exaone4ForSequenceClassification",
- "Exaone4ForTokenClassification",
- "Exaone4ForQuestionAnswering"
- ]
- }
- },
- "info.ssm.falcon": {
- "*": {
- "repo": "tiiuae/falcon-7b",
- "pkg": {
- "0": {
- "transformers": "FalconModel"
- }
- },
- "tasks": [
- "FalconForCausalLM",
- "FalconModel",
- "FalconPreTrainedModel",
- "FalconForSequenceClassification",
- "FalconForTokenClassification",
- "FalconForQuestionAnswering"
- ]
- }
- },
- "info.ssm.falconh1-t-hf": {
- "*": {
- "repo": "tiiuae/Falcon-H1-34B-Instruct",
- "pkg": {
- "0": {
- "transformers": "FalconH1Model"
- }
- },
- "tasks": [
- "FalconH1Model",
- "FalconH1ForCausalLM",
- "FalconH1PreTrainedModel"
- ]
- }
- },
- "info.ssm.falcon-mamba": {
- "*": {
- "repo": "tiiuae/falcon-mamba-7b",
- "pkg": {
- "0": {
- "transformers": "FalconMambaModel"
- }
- },
- "tasks": [
- "FalconMambaForCausalLM",
- "FalconMambaModel",
- "FalconMambaPreTrainedModel",
- "FalconMambaCache"
- ]
- }
- },
- "info.vit.fastvlm": {
- "*": {
- "repo": "KamilaMila/FastVLM-7B",
- "pkg": {
- "0": {
- "transformers": "FastVlmModel"
- }
- },
- "tasks": [
- "FastVlmForConditionalGeneration",
- "FastVlmModel",
- "FastVlmPreTrainedModel"
- ]
- }
- },
- "info.aet.fastspeech2-conformer": {
- "*": {
- "repo": "espnet/fastspeech2_conformer",
- "pkg": {
- "0": {
- "transformers": "FastSpeech2ConformerModel"
- }
- },
- "tasks": [
- "FastSpeech2ConformerWithHifiGan",
- "FastSpeech2ConformerHifiGan",
- "FastSpeech2ConformerModel",
- "FastSpeech2ConformerPreTrainedModel"
- ]
- }
- },
- "info.stst.fastspeech2-conformer": {
- "*": {
- "repo": "espnet/fastspeech2_conformer",
- "pkg": {
- "0": {
- "transformers": "FastSpeech2ConformerWithHifiGan"
- }
- },
- "tasks": [
- "FastSpeech2ConformerWithHifiGan",
- "FastSpeech2ConformerHifiGan",
- "FastSpeech2ConformerModel",
- "FastSpeech2ConformerPreTrainedModel"
- ]
- }
- },
- "info.art.flaubert-uncased": {
- "*": {
- "repo": "flaubert/flaubert_base_uncased",
- "pkg": {
- "0": {
- "transformers": "FlaubertModel"
- }
- },
- "tasks": [
- "FlaubertForMultipleChoice",
- "FlaubertForQuestionAnswering",
- "FlaubertForQuestionAnsweringSimple",
- "FlaubertForSequenceClassification",
- "FlaubertForTokenClassification",
- "FlaubertModel",
- "FlaubertWithLMHeadModel",
- "FlaubertPreTrainedModel"
- ]
- }
- },
- "info.vit.flava": {
- "*": {
- "repo": "facebook/flava-full",
- "pkg": {
- "0": {
- "transformers": "FlavaModel"
- }
- },
- "tasks": [
- "FlavaForPreTraining",
- "FlavaImageCodebook",
- "FlavaImageModel",
- "FlavaModel",
- "FlavaMultimodalModel",
- "FlavaPreTrainedModel",
- "FlavaTextModel"
- ]
- }
- },
- "info.moe.flexolmo-7x-1t": {
- "*": {
- "repo": "allenai/FlexOlmo-7x7B-1T",
- "pkg": {
- "0": {
- "transformers": "FlexOlmoModel"
- }
- },
- "tasks": [
- "FlexOlmoForCausalLM",
- "FlexOlmoModel",
- "FlexOlmoPreTrainedModel"
- ]
- }
- },
- "info.vit.florence-2": {
- "*": {
- "repo": "florence-community/Florence-2-base",
- "pkg": {
- "0": {
- "transformers": "Florence2Model"
- }
- },
- "tasks": [
- "Florence2Model",
- "Florence2ForConditionalGeneration",
- "Florence2PreTrainedModel",
- "Florence2VisionBackbone",
- "Florence2VisionPreTrainedModel"
- ]
- }
- },
- "info.art.fnet": {
- "*": {
- "repo": "google/fnet-base",
- "pkg": {
- "0": {
- "transformers": "FNetModel"
- }
- },
- "tasks": [
- "FNetForMaskedLM",
- "FNetForMultipleChoice",
- "FNetForNextSentencePrediction",
- "FNetForPreTraining",
- "FNetForQuestionAnswering",
- "FNetForSequenceClassification",
- "FNetForTokenClassification",
- "FNetLayer",
- "FNetModel",
- "FNetPreTrainedModel"
- ]
- }
- },
- "info.vit.focalnet": {
- "*": {
- "repo": "microsoft/focalnet-tiny",
- "pkg": {
- "0": {
- "transformers": "FocalNetModel"
- }
- },
- "tasks": [
- "FocalNetForImageClassification",
- "FocalNetForMaskedImageModeling",
- "FocalNetBackbone",
- "FocalNetModel",
- "FocalNetPreTrainedModel"
- ]
- }
- },
- "info.stst.wmt19-en-ru": {
- "*": {
- "repo": "facebook/wmt19-en-ru",
- "pkg": {
- "0": {
- "transformers": "FSMTModel"
- }
- },
- "tasks": [
- "FSMTForConditionalGeneration",
- "FSMTModel",
- "PretrainedFSMTModel"
- ]
- }
- },
- "info.aet.funnel": {
- "*": {
- "repo": "funnel-transformer/small",
- "pkg": {
- "0": {
- "transformers": "FunnelModel"
- }
- },
- "tasks": [
- "FunnelBaseModel",
- "FunnelForMaskedLM",
- "FunnelForMultipleChoice",
- "FunnelForPreTraining",
- "FunnelForQuestionAnswering",
- "FunnelForSequenceClassification",
- "FunnelForTokenClassification",
- "FunnelModel",
- "FunnelPreTrainedModel"
- ]
- }
- },
- "info.vit.fuyu": {
- "*": {
- "repo": "adept/fuyu-8b",
- "pkg": {
- "0": {
- "transformers": "FuyuModel"
- }
- },
- "tasks": [
- "FuyuForCausalLM",
- "FuyuPreTrainedModel",
- "FuyuModel"
- ]
- }
- },
- "info.stst.gemma": {
- "*": {
- "repo": "google/gemma-7b",
- "pkg": {
- "0": {
- "transformers": "GemmaModel"
- }
- },
- "file_256": [
- "01676b4c6e765f737a5e9854a315de3887e939c370cae116d505777729099a68"
- ],
- "layer_b3": [
- "438d82c867240f194a4e15798eef2886a911c8f57fa2d9f4ffad1d56e7bd1ccf",
- "1de38e09f5f2c5345de48b8cd4dddcfff3e341cc0059752446e186b3863f0981"
- ],
- "layer_256": [
- "e4835a72d582b4ae066d6ff0519f2ee9f8b21fb02e8c28d8eaa317f8d1e9ea75",
- "1657c7180b48672004f4463308dfdd56d92eedeb23d1408ea766985ca208e5aa"
- ],
- "tasks": [
- "GemmaModel",
- "GemmaForCausalLM",
- "GemmaForSequenceClassification",
- "GemmaForTokenClassification",
- "GemmaPreTrainedModel"
- ]
- }
- },
- "info.stst.gemma2": {
- "*": {
- "repo": "google/gemma-2-9b",
- "pkg": {
- "0": {
- "transformers": "Gemma2Model"
- }
- },
- "file_256": [
- "e909230aabafad02d097c7dc02f2ae062b4e6b0593477c1f07679d277e09ce71",
- "d61628bc793240439e608c5ae744f55ec8770f684abb63602648a24cb6da60bc"
- ],
- "layer_b3": [
- "55a3c812ac0832d154867f5927365bcc776926e48e65f7f35a81fc11f4bb81da",
- "543572889beb25cad83a43ce70cdd255d2c82951d6595e8c97ff62fd05871c99"
- ],
- "layer_256": [
- "a0d820c39578cf888f398579d9a00d69b31c81e049795ba70008dad8fe5b3a33",
- "abc83b04a04467579ea1952a7efbdd252b8641ac0e2a6a9be2a5a73e371111d6"
- ],
- "tasks": [
- "Gemma2ForCausalLM",
- "Gemma2Model",
- "Gemma2PreTrainedModel",
- "Gemma2ForSequenceClassification",
- "Gemma2ForTokenClassification"
- ]
- }
- },
- "info.vit.gemma-3": {
- "*": {
- "repo": "google/gemma-3-4b-it",
- "pkg": {
- "0": {
- "transformers": "Gemma3Model"
- }
- },
- "tasks": [
- "Gemma3PreTrainedModel",
- "Gemma3TextModel",
- "Gemma3ForCausalLM",
- "Gemma3ForConditionalGeneration",
- "Gemma3Model",
- "Gemma3ForSequenceClassification",
- "Gemma3TextForSequenceClassification"
- ]
- }
- },
- "info.stst.gemma3-text": {
- "*": {
- "repo": "google/gemma-3-12b-it",
- "pkg": {
- "0": {
- "transformers": "Gemma3TextModel"
- }
- },
- "tasks": [
- "Gemma3PreTrainedModel",
- "Gemma3TextModel",
- "Gemma3ForCausalLM",
- "Gemma3ForConditionalGeneration",
- "Gemma3Model",
- "Gemma3ForSequenceClassification",
- "Gemma3TextForSequenceClassification"
- ]
- }
- },
- "info.vit.gemma-3n-e": {
- "*": {
- "repo": "google/gemma-3n-E4B",
- "pkg": {
- "0": {
- "transformers": "Gemma3nModel"
- }
- },
- "tasks": [
- "Gemma3nAudioEncoder",
- "Gemma3nForCausalLM",
- "Gemma3nForConditionalGeneration",
- "Gemma3nModel",
- "Gemma3nPreTrainedModel",
- "Gemma3nTextModel"
- ]
- }
- },
- "info.art.gemma-3n-e": {
- "*": {
- "repo": "google/gemma-3n-E4B",
- "pkg": {
- "0": {
- "transformers": "Gemma3nAudioEncoder"
- }
- },
- "tasks": [
- "Gemma3nAudioEncoder",
- "Gemma3nForCausalLM",
- "Gemma3nForConditionalGeneration",
- "Gemma3nModel",
- "Gemma3nPreTrainedModel",
- "Gemma3nTextModel"
- ]
- }
- },
- "info.stst.gemma-3n-e": {
- "*": {
- "repo": "google/gemma-3n-E4B",
- "pkg": {
- "0": {
- "transformers": "Gemma3nTextModel"
- }
- },
- "tasks": [
- "Gemma3nAudioEncoder",
- "Gemma3nForCausalLM",
- "Gemma3nForConditionalGeneration",
- "Gemma3nModel",
- "Gemma3nPreTrainedModel",
- "Gemma3nTextModel"
- ]
- }
- },
- "info.vit.git": {
- "*": {
- "repo": "microsoft/git-base",
- "pkg": {
- "0": {
- "transformers": "GitModel"
- }
- },
- "tasks": [
- "GitForCausalLM",
- "GitModel",
- "GitPreTrainedModel",
- "GitVisionModel"
- ]
- }
- },
- "info.stst.glm-4-chat": {
- "*": {
- "repo": "zai-org/glm-4-9b-chat",
- "pkg": {
- "0": {
- "transformers": "GlmModel"
- }
- },
- "tasks": [
- "GlmPreTrainedModel",
- "GlmModel",
- "GlmForCausalLM",
- "GlmForSequenceClassification",
- "GlmForTokenClassification"
- ]
- }
- },
- "info.stst.glm-4-0414": {
- "*": {
- "repo": "zai-org/GLM-4-9B-0414",
- "pkg": {
- "0": {
- "transformers": "Glm4Model"
- }
- },
- "tasks": [
- "Glm4PreTrainedModel",
- "Glm4Model",
- "Glm4ForCausalLM",
- "Glm4ForSequenceClassification",
- "Glm4ForTokenClassification"
- ]
- }
- },
- "info.vit.glm-4v-thinking": {
- "*": {
- "repo": "zai-org/GLM-4.1V-9B-Thinking",
- "pkg": {
- "0": {
- "transformers": "Glm46VModel"
- }
- },
- "tasks": [
- "Glm46VModel",
- "Glm46VPreTrainedModel",
- "Glm46VForConditionalGeneration"
- ]
- }
- },
- "info.moe.glm-4-a": {
- "*": {
- "repo": "zai-org/GLM-4.5-Air",
- "pkg": {
- "0": {
- "transformers": "Glm4MoeModel"
- }
- },
- "tasks": [
- "Glm4MoePreTrainedModel",
- "Glm4MoeModel",
- "Glm4MoeForCausalLM"
- ]
- }
- },
- "info.vit.glm-4v": {
- "*": {
- "repo": "zai-org/GLM-4.5V",
- "pkg": {
- "0": {
- "transformers": "Glm4vMoeModel"
- }
- },
- "tasks": [
- "Glm4vMoeForConditionalGeneration",
- "Glm4vMoeModel",
- "Glm4vMoePreTrainedModel",
- "Glm4vMoeTextModel",
- "Glm4vMoeVisionModel"
- ]
- }
- },
- "info.moe.glm-4v": {
- "*": {
- "repo": "zai-org/GLM-4.5V",
- "pkg": {
- "0": {
- "transformers": "Glm4vMoeTextModel"
- }
- },
- "tasks": [
- "Glm4vMoeForConditionalGeneration",
- "Glm4vMoeModel",
- "Glm4vMoePreTrainedModel",
- "Glm4vMoeTextModel",
- "Glm4vMoeVisionModel"
- ]
- }
- },
- "info.stst.glm-4v-thinking": {
- "*": {
- "repo": "zai-org/GLM-4.1V-9B-Thinking",
- "pkg": {
- "0": {
- "transformers": "Glm4vTextModel"
- }
- },
- "tasks": [
- "Glm4vForConditionalGeneration",
- "Glm4vModel",
- "Glm4vPreTrainedModel",
- "Glm4vTextModel",
- "Glm4vVisionModel"
- ]
- }
- },
- "info.stst.glm-asr-nano-2512": {
- "*": {
- "repo": "zai-org/GLM-ASR-Nano-2512",
- "pkg": {
- "0": {
- "transformers": "GlmAsrForConditionalGeneration"
- }
- },
- "tasks": [
- "GlmAsrEncoder",
- "GlmAsrForConditionalGeneration",
- "GlmAsrPreTrainedModel"
- ]
- }
- },
- "info.vit.glpn-kitti": {
- "*": {
- "repo": "vinvino02/glpn-kitti",
- "pkg": {
- "0": {
- "transformers": "GLPNModel"
- }
- },
- "tasks": [
- "GLPNForDepthEstimation",
- "GLPNLayer",
- "GLPNModel",
- "GLPNPreTrainedModel"
- ]
- }
- },
- "info.vit.got-ocr-2-hf": {
- "*": {
- "repo": "stepfun-ai/GOT-OCR-2.0-hf",
- "pkg": {
- "0": {
- "transformers": "GotOcr2Model"
- }
- },
- "tasks": [
- "GotOcr2PreTrainedModel",
- "GotOcr2Model",
- "GotOcr2ForConditionalGeneration"
- ]
- }
- },
- "info.art.gpt2": {
- "*": {
- "repo": "openai-community/gpt2",
- "pkg": {
- "0": {
- "transformers": "GPT2Model"
- }
- },
- "tasks": [
- "GPT2DoubleHeadsModel",
- "GPT2ForQuestionAnswering",
- "GPT2ForSequenceClassification",
- "GPT2ForTokenClassification",
- "GPT2LMHeadModel",
- "GPT2Model",
- "GPT2PreTrainedModel"
- ]
- }
- },
- "info.art.gpt-bigcode-santacoder": {
- "*": {
- "repo": "bigcode/gpt_bigcode-santacoder",
- "pkg": {
- "0": {
- "transformers": "GPTBigCodeModel"
- }
- },
- "tasks": [
- "GPTBigCodeForSequenceClassification",
- "GPTBigCodeForTokenClassification",
- "GPTBigCodeForCausalLM",
- "GPTBigCodeModel",
- "GPTBigCodePreTrainedModel"
- ]
- }
- },
- "info.art.gpt-neo": {
- "*": {
- "repo": "EleutherAI/gpt-neo-1.3B",
- "pkg": {
- "0": {
- "transformers": "GPTNeoModel"
- }
- },
- "tasks": [
- "GPTNeoForCausalLM",
- "GPTNeoForQuestionAnswering",
- "GPTNeoForSequenceClassification",
- "GPTNeoForTokenClassification",
- "GPTNeoModel",
- "GPTNeoPreTrainedModel"
- ]
- }
- },
- "info.stst.gpt-neox": {
- "*": {
- "repo": "EleutherAI/gpt-neox-20b",
- "pkg": {
- "0": {
- "transformers": "GPTNeoXModel"
- }
- },
- "tasks": [
- "GPTNeoXForCausalLM",
- "GPTNeoXForQuestionAnswering",
- "GPTNeoXForSequenceClassification",
- "GPTNeoXForTokenClassification",
- "GPTNeoXLayer",
- "GPTNeoXModel",
- "GPTNeoXPreTrainedModel"
- ]
- }
- },
- "info.stst.gpt-neox-japanese": {
- "*": {
- "repo": "abeja/gpt-neox-japanese-2.7b",
- "pkg": {
- "0": {
- "transformers": "GPTNeoXJapaneseModel"
- }
- },
- "tasks": [
- "GPTNeoXJapaneseForCausalLM",
- "GPTNeoXJapaneseLayer",
- "GPTNeoXJapaneseModel",
- "GPTNeoXJapanesePreTrainedModel"
- ]
- }
- },
- "info.moe.gpt-oss": {
- "*": {
- "repo": "openai/gpt-oss-120b",
- "pkg": {
- "0": {
- "transformers": "GptOssModel"
- }
- },
- "file_256": [
- "68a8dc1f8e2e5996cb702f14332a25ddf3463daeab2df68e21ca09ef181203c3",
- "a881aa5f561b26a22b14a8262aa61849ace349ffd73d74769e030ac90a1fcf8a"
- ],
- "layer_b3": [
- "b52807536902cabbf84f99e4fa2f8713fb4ef77e739f06367ee0d486e3222faa",
- "43c618018db1fd6e915dead610652da261d9058b73bc5355c85c6ac69af4d913",
- "ab27ce7391b7fbd6ce3c319faa119afdac68f746af6a0ce2c3400a132f36f6ac"
- ],
- "layer_256": [
- "de5dcad822be5ed6196f0f3f6965739993118d14db97b33a94a269f4f1b7a363",
- "575f1977ed42d95a050e13dadaafc05a6d94c8aadca8364dca8a62aa4f2b146c"
- ],
- "tasks": [
- "GptOssForCausalLM",
- "GptOssForSequenceClassification",
- "GptOssForTokenClassification",
- "GptOssModel",
- "GptOssPreTrainedModel"
- ]
- }
- },
- "info.art.gpt-j": {
- "*": {
- "repo": "EleutherAI/gpt-j-6B",
- "pkg": {
- "0": {
- "transformers": "GPTJModel"
- }
- },
- "tasks": [
- "GPTJForCausalLM",
- "GPTJForQuestionAnswering",
- "GPTJForSequenceClassification",
- "GPTJModel",
- "GPTJPreTrainedModel"
- ]
- }
- },
- "info.stst.granite": {
- "*": {
- "repo": "ibm-granite/granite-3.3-2b-base",
- "pkg": {
- "0": {
- "transformers": "GraniteModel"
- }
- },
- "tasks": [
- "GraniteForCausalLM",
- "GraniteModel",
- "GranitePreTrainedModel"
- ]
- }
- },
- "info.moe.powermoe": {
- "*": {
- "repo": "ibm-research/PowerMoE-3b",
- "pkg": {
- "0": {
- "transformers": "GraniteMoeModel"
- }
- },
- "tasks": [
- "GraniteMoeForCausalLM",
- "GraniteMoeModel",
- "GraniteMoePreTrainedModel"
- ]
- }
- },
- "info.ssm.granite-4-h": {
- "*": {
- "repo": "ibm-granite/granite-4.0-h-small",
- "pkg": {
- "0": {
- "transformers": "GraniteMoeHybridModel"
- }
- },
- "tasks": [
- "GraniteMoeHybridForCausalLM",
- "GraniteMoeHybridModel",
- "GraniteMoeHybridPreTrainedModel"
- ]
- }
- },
- "info.moe.moe-active-shared-experts": {
- "*": {
- "repo": "ibm-research/moe-7b-1b-active-shared-experts",
- "pkg": {
- "0": {
- "transformers": "GraniteMoeSharedModel"
- }
- },
- "tasks": [
- "GraniteMoeSharedForCausalLM",
- "GraniteMoeSharedModel",
- "GraniteMoeSharedPreTrainedModel"
- ]
- }
- },
- "info.vit.llava-v1-mistral-hf": {
- "*": {
- "repo": "llava-hf/llava-v1.6-mistral-7b-hf",
- "pkg": {
- "0": {
- "transformers": "LlavaNextModel"
- }
- },
- "tasks": [
- "LlavaNextForConditionalGeneration",
- "LlavaNextPreTrainedModel",
- "LlavaNextModel"
- ]
- }
- },
- "info.detr.grounding-dino": {
- "*": {
- "repo": "IDEA-Research/grounding-dino-tiny",
- "pkg": {
- "0": {
- "transformers": "GroundingDinoModel"
- }
- },
- "tasks": [
- "GroundingDinoForObjectDetection",
- "GroundingDinoModel",
- "GroundingDinoPreTrainedModel"
- ]
- }
- },
- "info.vit.groupvit-gcc-yfcc": {
- "*": {
- "repo": "nvidia/groupvit-gcc-yfcc",
- "pkg": {
- "0": {
- "transformers": "GroupViTModel"
- }
- },
- "tasks": [
- "GroupViTModel",
- "GroupViTPreTrainedModel",
- "GroupViTTextModel",
- "GroupViTVisionModel"
- ]
- }
- },
- "info.stst.helium": {
- "*": {
- "repo": "kyutai/helium-1-2b",
- "pkg": {
- "0": {
- "transformers": "HeliumModel"
- }
- },
- "tasks": [
- "HeliumPreTrainedModel",
- "HeliumModel",
- "HeliumForCausalLM",
- "HeliumForSequenceClassification",
- "HeliumForTokenClassification"
- ]
- }
- },
- "info.vit.dfine-x-coco": {
- "*": {
- "repo": "ustc-community/dfine_x_coco",
- "pkg": {
- "0": {
- "transformers": "HGNetV2Backbone"
- }
- },
- "tasks": [
- "HGNetV2Backbone",
- "HGNetV2PreTrainedModel",
- "HGNetV2ForImageClassification"
- ]
- }
- },
- "info.vit.hiera-224": {
- "*": {
- "repo": "facebook/hiera-base-224-hf",
- "pkg": {
- "0": {
- "transformers": "HieraModel"
- }
- },
- "tasks": [
- "HieraForImageClassification",
- "HieraForPreTraining",
- "HieraBackbone",
- "HieraModel",
- "HieraPreTrainedModel"
- ]
- }
- },
- "info.aet.hubert-ls960": {
- "*": {
- "repo": "facebook/hubert-base-ls960",
- "pkg": {
- "0": {
- "transformers": "HubertModel"
- }
- },
- "tasks": [
- "HubertForCTC",
- "HubertForSequenceClassification",
- "HubertModel",
- "HubertPreTrainedModel"
- ]
- }
- },
- "info.stst.hunyuan": {
- "*": {
- "repo": "tencent/Hunyuan-7B-Instruct",
- "pkg": {
- "0": {
- "transformers": "HunYuanDenseV1Model"
- }
- },
- "tasks": [
- "HunYuanDenseV1ForCausalLM",
- "HunYuanDenseV1Model",
- "HunYuanDenseV1PreTrainedModel",
- "HunYuanDenseV1ForSequenceClassification"
- ]
- }
- },
- "info.moe.hunyuan-a": {
- "*": {
- "repo": "tencent/Hunyuan-A13B-Instruct",
- "pkg": {
- "0": {
- "transformers": "HunYuanMoEV1Model"
- }
- },
- "tasks": [
- "HunYuanMoEV1ForCausalLM",
- "HunYuanMoEV1Model",
- "HunYuanMoEV1PreTrainedModel",
- "HunYuanMoEV1ForSequenceClassification"
- ]
- }
- },
- "info.art.ibert-roberta": {
- "*": {
- "repo": "kssteven/ibert-roberta-base",
- "pkg": {
- "0": {
- "transformers": "IBertModel"
- }
- },
- "tasks": [
- "IBertForMaskedLM",
- "IBertForMultipleChoice",
- "IBertForQuestionAnswering",
- "IBertForSequenceClassification",
- "IBertForTokenClassification",
- "IBertModel",
- "IBertPreTrainedModel"
- ]
- }
- },
- "info.vit.idefics": {
- "*": {
- "repo": "HuggingFaceM4/idefics-9b",
- "pkg": {
- "0": {
- "transformers": "IdeficsModel"
- }
- },
- "tasks": [
- "IdeficsForVisionText2Text",
- "IdeficsModel",
- "IdeficsPreTrainedModel"
- ]
- }
- },
- "info.vit.idefics2": {
- "*": {
- "repo": "HuggingFaceM4/idefics2-8b",
- "pkg": {
- "0": {
- "transformers": "Idefics2Model"
- }
- },
- "tasks": [
- "Idefics2ForConditionalGeneration",
- "Idefics2PreTrainedModel",
- "Idefics2Model"
- ]
- }
- },
- "info.vit.idefics3-llama3": {
- "*": {
- "repo": "HuggingFaceM4/Idefics3-8B-Llama3",
- "pkg": {
- "0": {
- "transformers": "Idefics3Model"
- }
- },
- "tasks": [
- "Idefics3ForConditionalGeneration",
- "Idefics3PreTrainedModel",
- "Idefics3Model",
- "Idefics3VisionTransformer"
- ]
- }
- },
- "info.vit.siglip-patch16-224": {
- "*": {
- "repo": "google/siglip-base-patch16-224",
- "pkg": {
- "0": {
- "transformers": "Idefics3VisionTransformer"
- }
- },
- "tasks": [
- "Idefics3ForConditionalGeneration",
- "Idefics3PreTrainedModel",
- "Idefics3Model",
- "Idefics3VisionTransformer"
- ]
- }
- },
- "info.vit.ijepa-vith14": {
- "*": {
- "repo": "facebook/ijepa_vith14_1k",
- "pkg": {
- "0": {
- "transformers": "IJepaModel"
- }
- },
- "tasks": [
- "IJepaPreTrainedModel",
- "IJepaModel",
- "IJepaForImageClassification"
- ]
- }
- },
- "info.art.imagegpt": {
- "*": {
- "repo": "openai/imagegpt-small",
- "pkg": {
- "0": {
- "transformers": "ImageGPTModel"
- }
- },
- "tasks": [
- "ImageGPTForCausalImageModeling",
- "ImageGPTForImageClassification",
- "ImageGPTModel",
- "ImageGPTPreTrainedModel"
- ]
- }
- },
- "info.stst.informer-tourism-monthly": {
- "*": {
- "repo": "huggingface/informer-tourism-monthly",
- "pkg": {
- "0": {
- "transformers": "InformerModel"
- }
- },
- "tasks": [
- "InformerForPrediction",
- "InformerModel",
- "InformerPreTrainedModel"
- ]
- }
- },
- "info.vit.blip-flan-t5": {
- "*": {
- "repo": "Salesforce/instructblip-flan-t5-xl",
- "pkg": {
- "0": {
- "transformers": "InstructBlipModel"
- }
- },
- "tasks": [
- "InstructBlipQFormerModel",
- "InstructBlipPreTrainedModel",
- "InstructBlipModel",
- "InstructBlipForConditionalGeneration",
- "InstructBlipVisionModel"
- ]
- }
- },
- "info.vit.internvl3-hf": {
- "*": {
- "repo": "OpenGVLab/InternVL3-1B-hf",
- "pkg": {
- "0": {
- "transformers": "InternVLModel"
- }
- },
- "tasks": [
- "InternVLVisionPreTrainedModel",
- "InternVLVisionModel",
- "InternVLPreTrainedModel",
- "InternVLModel",
- "InternVLForConditionalGeneration"
- ]
- }
- },
- "info.stst.jais-2-chat": {
- "*": {
- "repo": "inceptionai/Jais-2-8B-Chat",
- "pkg": {
- "0": {
- "transformers": "Jais2Model"
- }
- },
- "tasks": [
- "Jais2Model",
- "Jais2ForCausalLM",
- "Jais2PreTrainedModel"
- ]
- }
- },
- "info.ssm.jamba-v0": {
- "*": {
- "repo": "ai21labs/Jamba-v0.1",
- "pkg": {
- "0": {
- "transformers": "JambaModel"
- }
- },
- "tasks": [
- "JambaForCausalLM",
- "JambaForSequenceClassification",
- "JambaModel",
- "JambaPreTrainedModel"
- ]
- }
- },
- "info.vit.janus": {
- "*": {
- "repo": "deepseek-community/Janus-Pro-1B",
- "pkg": {
- "0": {
- "transformers": "JanusModel"
- }
- },
- "tasks": [
- "JanusPreTrainedModel",
- "JanusForConditionalGeneration",
- "JanusModel",
- "JanusVQVAE",
- "JanusVisionModel"
- ]
- }
- },
- "info.moe.jetmoe": {
- "*": {
- "repo": "jetmoe/jetmoe-8b",
- "pkg": {
- "0": {
- "transformers": "JetMoeModel"
- }
- },
- "tasks": [
- "JetMoeForCausalLM",
- "JetMoeModel",
- "JetMoePreTrainedModel",
- "JetMoeForSequenceClassification"
- ]
- }
- },
- "info.vit.kosmos-2-patch14-224": {
- "*": {
- "repo": "microsoft/kosmos-2-patch14-224",
- "pkg": {
- "0": {
- "transformers": "Kosmos2Model"
- }
- },
- "tasks": [
- "Kosmos2ForConditionalGeneration",
- "Kosmos2Model",
- "Kosmos2PreTrainedModel"
- ]
- }
- },
- "info.vit.kosmos-2": {
- "*": {
- "repo": "microsoft/kosmos-2.5",
- "pkg": {
- "0": {
- "transformers": "Kosmos2_5Model"
- }
- },
- "tasks": [
- "Kosmos2_5ForConditionalGeneration",
- "Kosmos2_5Model",
- "Kosmos2_5PreTrainedModel"
- ]
- }
- },
- "info.stst.stt-en-trfs": {
- "*": {
- "repo": "kyutai/stt-2.6b-en-trfs",
- "pkg": {
- "0": {
- "transformers": "KyutaiSpeechToTextModel"
- }
- },
- "tasks": [
- "KyutaiSpeechToTextPreTrainedModel",
- "KyutaiSpeechToTextModel",
- "KyutaiSpeechToTextForConditionalGeneration"
- ]
- }
- },
- "info.aet.todo": {
- "*": {
- "repo": "TODO/TODO",
- "pkg": {
- "0": {
- "transformers": "LasrForCTC"
- }
- },
- "tasks": [
- "LasrForCTC",
- "LasrEncoder",
- "LasrPreTrainedModel"
- ]
- }
- },
- "info.stst.todo": {
- "*": {
- "repo": "TODO/TODO",
- "pkg": {
- "0": {
- "transformers": "LasrEncoder"
- }
- },
- "tasks": [
- "LasrForCTC",
- "LasrEncoder",
- "LasrPreTrainedModel"
- ]
- }
- },
- "info.art.layoutlm-uncased": {
- "*": {
- "repo": "microsoft/layoutlm-base-uncased",
- "pkg": {
- "0": {
- "transformers": "LayoutLMModel"
- }
- },
- "tasks": [
- "LayoutLMForMaskedLM",
- "LayoutLMForSequenceClassification",
- "LayoutLMForTokenClassification",
- "LayoutLMForQuestionAnswering",
- "LayoutLMModel",
- "LayoutLMPreTrainedModel"
- ]
- }
- },
- "info.art.layoutlmv2-uncased": {
- "*": {
- "repo": "microsoft/layoutlmv2-base-uncased",
- "pkg": {
- "0": {
- "transformers": "LayoutLMv2Model"
- }
- },
- "tasks": [
- "LayoutLMv2ForQuestionAnswering",
- "LayoutLMv2ForSequenceClassification",
- "LayoutLMv2ForTokenClassification",
- "LayoutLMv2Layer",
- "LayoutLMv2Model",
- "LayoutLMv2PreTrainedModel"
- ]
- }
- },
- "info.vit.layoutlmv3": {
- "*": {
- "repo": "microsoft/layoutlmv3-base",
- "pkg": {
- "0": {
- "transformers": "LayoutLMv3Model"
- }
- },
- "tasks": [
- "LayoutLMv3ForQuestionAnswering",
- "LayoutLMv3ForSequenceClassification",
- "LayoutLMv3ForTokenClassification",
- "LayoutLMv3Model",
- "LayoutLMv3PreTrainedModel"
- ]
- }
- },
- "info.stst.led-16384": {
- "*": {
- "repo": "allenai/led-base-16384",
- "pkg": {
- "0": {
- "transformers": "LEDModel"
- }
- },
- "tasks": [
- "LEDForConditionalGeneration",
- "LEDForQuestionAnswering",
- "LEDForSequenceClassification",
- "LEDModel",
- "LEDPreTrainedModel"
- ]
- }
- },
- "info.gan.levit-128s": {
- "*": {
- "repo": "facebook/levit-128S",
- "pkg": {
- "0": {
- "transformers": "LevitModel"
- }
- },
- "tasks": [
- "LevitForImageClassification",
- "LevitForImageClassificationWithTeacher",
- "LevitModel",
- "LevitPreTrainedModel"
- ]
- }
- },
- "info.stst.lfm": {
- "*": {
- "repo": "LiquidAI/LFM2-1.2B",
- "pkg": {
- "0": {
- "transformers": "Lfm2Model"
- }
- },
- "tasks": [
- "Lfm2ForCausalLM",
- "Lfm2Model",
- "Lfm2PreTrainedModel"
- ]
- }
- },
- "info.moe.lfm2-a": {
- "*": {
- "repo": "LiquidAI/LFM2-8B-A1B",
- "pkg": {
- "0": {
- "transformers": "Lfm2MoeModel"
- }
- },
- "tasks": [
- "Lfm2MoeForCausalLM",
- "Lfm2MoeModel",
- "Lfm2MoePreTrainedModel"
- ]
- }
- },
- "info.vit.lfm2-vl": {
- "*": {
- "repo": "LiquidAI/LFM2-VL-1.6B",
- "pkg": {
- "0": {
- "transformers": "Lfm2VlModel"
- }
- },
- "tasks": [
- "Lfm2VlForConditionalGeneration",
- "Lfm2VlPreTrainedModel",
- "Lfm2VlModel"
- ]
- }
- },
- "info.aet.lightglue-superpoint": {
- "*": {
- "repo": "ETH-CVG/lightglue_superpoint",
- "pkg": {
- "0": {
- "transformers": "LightGlueForKeypointMatching"
- }
- },
- "tasks": [
- "LightGluePreTrainedModel",
- "LightGlueForKeypointMatching"
- ]
- }
- },
- "info.art.lilt-roberta-en": {
- "*": {
- "repo": "SCUT-DLVCLab/lilt-roberta-en-base",
- "pkg": {
- "0": {
- "transformers": "LiltModel"
- }
- },
- "tasks": [
- "LiltForQuestionAnswering",
- "LiltForSequenceClassification",
- "LiltForTokenClassification",
- "LiltModel",
- "LiltPreTrainedModel"
- ]
- }
- },
- "info.vit.llama-4-scout-16e": {
- "*": {
- "repo": "meta-llama/Llama-4-Scout-17B-16E",
- "pkg": {
- "0": {
- "transformers": "Llama4ForConditionalGeneration"
- }
- },
- "tasks": [
- "Llama4PreTrainedModel",
- "Llama4TextModel",
- "Llama4VisionModel",
- "Llama4ForCausalLM",
- "Llama4ForConditionalGeneration"
- ]
- }
- },
- "info.moe.llama-4-scout-16e": {
- "*": {
- "repo": "meta-llama/Llama-4-Scout-17B-16E",
- "pkg": {
- "0": {
- "transformers": "Llama4TextModel"
- }
- },
- "tasks": [
- "Llama4PreTrainedModel",
- "Llama4TextModel",
- "Llama4VisionModel",
- "Llama4ForCausalLM",
- "Llama4ForConditionalGeneration"
- ]
- }
- },
- "info.vit.llava": {
- "*": {
- "repo": "llava-hf/llava-9b",
- "pkg": {
- "0": {
- "transformers": "LlavaModel"
- }
- },
- "file_256": [
- "f5ad57d3eda300a3195bc9c0bb36ab76ebe88831f128e9851e63440aff4a6741"
- ],
- "layer_b3": [
- "d7d6ccb9dbba90b64e4cd259b6309e56708b3f4fbd6e9f85e9f0410e549133ef"
- ],
- "layer_256": [
- "9969c41152aba689413b7f63888ecdc0c0badad2c2960e689ebc4c0e4a696c73"
- ],
- "tasks": [
- "LlavaForConditionalGeneration",
- "LlavaPreTrainedModel",
- "LlavaModel"
- ]
- }
- },
- "info.vit.llava-next-video-hf": {
- "*": {
- "repo": "llava-hf/LLaVA-NeXT-Video-7B-hf",
- "pkg": {
- "0": {
- "transformers": "LlavaNextVideoModel"
- }
- },
- "tasks": [
- "LlavaNextVideoForConditionalGeneration",
- "LlavaNextVideoModel",
- "LlavaNextVideoPreTrainedModel"
- ]
- }
- },
- "info.vit.llava-onevision-qwen2-ov-hf": {
- "*": {
- "repo": "llava-hf/llava-onevision-qwen2-7b-ov-hf",
- "pkg": {
- "0": {
- "transformers": "LlavaOnevisionModel"
- }
- },
- "tasks": [
- "LlavaOnevisionModel",
- "LlavaOnevisionForConditionalGeneration",
- "LlavaOnevisionPreTrainedModel"
- ]
- }
- },
- "info.stst.longcat-flash-chat": {
- "*": {
- "repo": "meituan-longcat/LongCat-Flash-Chat",
- "pkg": {
- "0": {
- "transformers": "LongcatFlashModel"
- }
- },
- "tasks": [
- "LongcatFlashPreTrainedModel",
- "LongcatFlashModel",
- "LongcatFlashForCausalLM"
- ]
- }
- },
- "info.art.longformer-4096": {
- "*": {
- "repo": "allenai/longformer-base-4096",
- "pkg": {
- "0": {
- "transformers": "LongformerModel"
- }
- },
- "tasks": [
- "LongformerForMaskedLM",
- "LongformerForMultipleChoice",
- "LongformerForQuestionAnswering",
- "LongformerForSequenceClassification",
- "LongformerForTokenClassification",
- "LongformerModel",
- "LongformerPreTrainedModel",
- "LongformerSelfAttention"
- ]
- }
- },
- "info.stst.long-t5-local": {
- "*": {
- "repo": "google/long-t5-local-base",
- "pkg": {
- "0": {
- "transformers": "LongT5Model"
- }
- },
- "tasks": [
- "LongT5EncoderModel",
- "LongT5ForConditionalGeneration",
- "LongT5Model",
- "LongT5PreTrainedModel"
- ]
- }
- },
- "info.art.luke": {
- "*": {
- "repo": "studio-ousia/luke-base",
- "pkg": {
- "0": {
- "transformers": "LukeModel"
- }
- },
- "tasks": [
- "LukeForEntityClassification",
- "LukeForEntityPairClassification",
- "LukeForEntitySpanClassification",
- "LukeForMultipleChoice",
- "LukeForQuestionAnswering",
- "LukeForSequenceClassification",
- "LukeForTokenClassification",
- "LukeForMaskedLM",
- "LukeModel",
- "LukePreTrainedModel"
- ]
- }
- },
- "info.art.lxmert-uncased": {
- "*": {
- "repo": "unc-nlp/lxmert-base-uncased",
- "pkg": {
- "0": {
- "transformers": "LxmertModel"
- }
- },
- "tasks": [
- "LxmertEncoder",
- "LxmertForPreTraining",
- "LxmertForQuestionAnswering",
- "LxmertModel",
- "LxmertPreTrainedModel",
- "LxmertVisualFeatureEncoder",
- "LxmertXLayer"
- ]
- }
- },
- "info.stst.m": {
- "*": {
- "repo": "facebook/m2m100_418M",
- "pkg": {
- "0": {
- "transformers": "M2M100Model"
- }
- },
- "tasks": [
- "M2M100ForConditionalGeneration",
- "M2M100Model",
- "M2M100PreTrainedModel"
- ]
- }
- },
- "info.ssm.mamba": {
- "*": {
- "repo": "state-spaces/mamba-2.8b",
- "pkg": {
- "0": {
- "transformers": "MambaModel"
- }
- },
- "tasks": [
- "MambaForCausalLM",
- "MambaModel",
- "MambaPreTrainedModel",
- "MambaCache"
- ]
- }
- },
- "info.ssm.mamba2": {
- "*": {
- "repo": "AntonV/mamba2-2.7b-hf",
- "pkg": {
- "0": {
- "transformers": "Mamba2Model"
- }
- },
- "tasks": [
- "Mamba2ForCausalLM",
- "Mamba2Model",
- "Mamba2PreTrainedModel"
- ]
- }
- },
- "info.stst.opus-mt-en-de": {
- "*": {
- "repo": "Helsinki-NLP/opus-mt-en-de",
- "pkg": {
- "0": {
- "transformers": "MarianModel"
- }
- },
- "tasks": [
- "MarianForCausalLM",
- "MarianModel",
- "MarianMTModel",
- "MarianPreTrainedModel"
- ]
- }
- },
- "info.art.markuplm": {
- "*": {
- "repo": "microsoft/markuplm-base",
- "pkg": {
- "0": {
- "transformers": "MarkupLMModel"
- }
- },
- "tasks": [
- "MarkupLMForQuestionAnswering",
- "MarkupLMForSequenceClassification",
- "MarkupLMForTokenClassification",
- "MarkupLMModel",
- "MarkupLMPreTrainedModel"
- ]
- }
- },
- "info.detr.mask2former-swin-coco-instance": {
- "*": {
- "repo": "facebook/mask2former-swin-small-coco-instance",
- "pkg": {
- "0": {
- "transformers": "Mask2FormerModel"
- }
- },
- "tasks": [
- "Mask2FormerForUniversalSegmentation",
- "Mask2FormerModel",
- "Mask2FormerPreTrainedModel"
- ]
- }
- },
- "info.detr.maskformer-swin-ade": {
- "*": {
- "repo": "facebook/maskformer-swin-base-ade",
- "pkg": {
- "0": {
- "transformers": "MaskFormerModel"
- }
- },
- "tasks": [
- "MaskFormerForInstanceSegmentation",
- "MaskFormerModel",
- "MaskFormerPreTrainedModel"
- ]
- }
- },
- "info.vit.swin-patch4-window7-224": {
- "*": {
- "repo": "microsoft/swin-tiny-patch4-window7-224",
- "pkg": {
- "0": {
- "transformers": "MaskFormerSwinModel"
- }
- },
- "tasks": [
- "MaskFormerSwinBackbone",
- "MaskFormerSwinModel",
- "MaskFormerSwinPreTrainedModel"
- ]
- }
- },
- "info.stst.mbart-cc25": {
- "*": {
- "repo": "facebook/mbart-large-cc25",
- "pkg": {
- "0": {
- "transformers": "MBartModel"
- }
- },
- "tasks": [
- "MBartForCausalLM",
- "MBartForConditionalGeneration",
- "MBartForQuestionAnswering",
- "MBartForSequenceClassification",
- "MBartModel",
- "MBartPreTrainedModel"
- ]
- }
- },
- "info.art.megatron-bert-uncased": {
- "*": {
- "repo": "nvidia/megatron-bert-uncased-345m",
- "pkg": {
- "0": {
- "transformers": "MegatronBertModel"
- }
- },
- "tasks": [
- "MegatronBertForCausalLM",
- "MegatronBertForMaskedLM",
- "MegatronBertForMultipleChoice",
- "MegatronBertForNextSentencePrediction",
- "MegatronBertForPreTraining",
- "MegatronBertForQuestionAnswering",
- "MegatronBertForSequenceClassification",
- "MegatronBertForTokenClassification",
- "MegatronBertModel",
- "MegatronBertPreTrainedModel"
- ]
- }
- },
- "info.vit.metaclip-2-worldwide-huge-quickgelu": {
- "*": {
- "repo": "facebook/metaclip-2-worldwide-huge-quickgelu",
- "pkg": {
- "0": {
- "transformers": "MetaClip2Model"
- }
- },
- "tasks": [
- "MetaClip2Model",
- "MetaClip2PreTrainedModel",
- "MetaClip2TextModel",
- "MetaClip2TextModelWithProjection",
- "MetaClip2VisionModel",
- "MetaClip2VisionModelWithProjection",
- "MetaClip2ForImageClassification"
- ]
- }
- },
- "info.vit.mgp-str": {
- "*": {
- "repo": "alibaba-damo/mgp-str-base",
- "pkg": {
- "0": {
- "transformers": "MgpstrForSceneTextRecognition"
- }
- },
- "tasks": [
- "MgpstrModel",
- "MgpstrPreTrainedModel",
- "MgpstrForSceneTextRecognition"
- ]
- }
- },
- "info.gan.mimi": {
- "*": {
- "repo": "kyutai/mimi",
- "pkg": {
- "0": {
- "transformers": "MimiModel"
- }
- },
- "tasks": [
- "MimiModel",
- "MimiPreTrainedModel"
- ]
- }
- },
- "info.moe.max-text-01-hf": {
- "*": {
- "repo": "MiniMaxAI/MiniMax-Text-01-hf",
- "pkg": {
- "0": {
- "transformers": "MiniMaxModel"
- }
- },
- "tasks": [
- "MiniMaxPreTrainedModel",
- "MiniMaxModel",
- "MiniMaxForCausalLM",
- "MiniMaxForSequenceClassification",
- "MiniMaxForTokenClassification",
- "MiniMaxForQuestionAnswering"
- ]
- }
- },
- "info.stst.stral-2410": {
- "*": {
- "repo": "mistralai/Ministral-8B-Instruct-2410",
- "pkg": {
- "0": {
- "transformers": "MinistralModel"
- }
- },
- "tasks": [
- "MinistralPreTrainedModel",
- "MinistralModel",
- "MinistralForCausalLM",
- "MinistralForSequenceClassification",
- "MinistralForTokenClassification",
- "MinistralForQuestionAnswering"
- ]
- }
- },
- "info.stst.stral-3-2512": {
- "*": {
- "repo": "mistralai/Ministral-3-8B-Base-2512",
- "pkg": {
- "0": {
- "transformers": "Ministral3Model"
- }
- },
- "tasks": [
- "Ministral3ForCausalLM",
- "Ministral3ForQuestionAnswering",
- "Ministral3Model",
- "Ministral3PreTrainedModel",
- "Ministral3ForSequenceClassification",
- "Ministral3ForTokenClassification"
- ]
- }
- },
- "info.stst.mistral-v0": {
- "*": {
- "repo": "mistralai/Mistral-7B-v0.1",
- "pkg": {
- "0": {
- "transformers": "MistralModel"
- }
- },
- "tasks": [
- "MistralForCausalLM",
- "MistralForQuestionAnswering",
- "MistralModel",
- "MistralPreTrainedModel",
- "MistralForSequenceClassification",
- "MistralForTokenClassification"
- ]
- }
- },
- "info.vit.mistral-3-2503": {
- "*": {
- "repo": "mistralai/Mistral-Small-3.1-24B-Instruct-2503",
- "pkg": {
- "0": {
- "transformers": "Mistral3Model"
- }
- },
- "tasks": [
- "Mistral3Model",
- "Mistral3PreTrainedModel",
- "Mistral3ForConditionalGeneration"
- ]
- }
- },
- "info.moe.mixtral-8x": {
- "*": {
- "repo": "mistralai/Mixtral-8x7B-v0.1",
- "pkg": {
- "0": {
- "transformers": "MixtralModel"
- }
- },
- "tasks": [
- "MixtralForCausalLM",
- "MixtralForQuestionAnswering",
- "MixtralModel",
- "MixtralPreTrainedModel",
- "MixtralForSequenceClassification",
- "MixtralForTokenClassification"
- ]
- }
- },
- "info.vit.mlcd-vit-bigg-patch14-336": {
- "*": {
- "repo": "DeepGlint-AI/mlcd-vit-bigG-patch14-336",
- "pkg": {
- "0": {
- "transformers": "MLCDVisionModel"
- }
- },
- "tasks": [
- "MLCDPreTrainedModel",
- "MLCDVisionModel"
- ]
- }
- },
- "info.vit.llama-3-vision": {
- "*": {
- "repo": "meta-llama/Llama-3.2-11B-Vision",
- "pkg": {
- "0": {
- "transformers": "MllamaModel"
- }
- },
- "tasks": [
- "MllamaForConditionalGeneration",
- "MllamaForCausalLM",
- "MllamaTextModel",
- "MllamaVisionModel",
- "MllamaPreTrainedModel",
- "MllamaModel"
- ]
- }
- },
- "info.detr.mm-grounding-dino-o365v1-goldg-v3det": {
- "*": {
- "repo": "openmmlab-community/mm_grounding_dino_tiny_o365v1_goldg_v3det",
- "pkg": {
- "0": {
- "transformers": "MMGroundingDinoModel"
- }
- },
- "tasks": [
- "MMGroundingDinoForObjectDetection",
- "MMGroundingDinoModel",
- "MMGroundingDinoPreTrainedModel"
- ]
- }
- },
- "info.art.mobilebert-uncased": {
- "*": {
- "repo": "google/mobilebert-uncased",
- "pkg": {
- "0": {
- "transformers": "MobileBertModel"
- }
- },
- "tasks": [
- "MobileBertForMaskedLM",
- "MobileBertForMultipleChoice",
- "MobileBertForNextSentencePrediction",
- "MobileBertForPreTraining",
- "MobileBertForQuestionAnswering",
- "MobileBertForSequenceClassification",
- "MobileBertForTokenClassification",
- "MobileBertLayer",
- "MobileBertModel",
- "MobileBertPreTrainedModel"
- ]
- }
- },
- "info.vit.mobilenet-v1-1--224": {
- "*": {
- "repo": "google/mobilenet_v1_1.0_224",
- "pkg": {
- "0": {
- "transformers": "MobileNetV1Model"
- }
- },
- "tasks": [
- "MobileNetV1ForImageClassification",
- "MobileNetV1Model",
- "MobileNetV1PreTrainedModel"
- ]
- }
- },
- "info.vit.mobilenet-v2-1--224": {
- "*": {
- "repo": "google/mobilenet_v2_1.0_224",
- "pkg": {
- "0": {
- "transformers": "MobileNetV2Model"
- }
- },
- "tasks": [
- "MobileNetV2ForImageClassification",
- "MobileNetV2ForSemanticSegmentation",
- "MobileNetV2Model",
- "MobileNetV2PreTrainedModel"
- ]
- }
- },
- "info.vit.mobilevit": {
- "*": {
- "repo": "apple/mobilevit-small",
- "pkg": {
- "0": {
- "transformers": "MobileViTModel"
- }
- },
- "tasks": [
- "MobileViTForImageClassification",
- "MobileViTForSemanticSegmentation",
- "MobileViTModel",
- "MobileViTPreTrainedModel"
- ]
- }
- },
- "info.vit.mobilevitv2-1": {
- "*": {
- "repo": "apple/mobilevitv2-1.0-imagenet1k-256",
- "pkg": {
- "0": {
- "transformers": "MobileViTV2Model"
- }
- },
- "tasks": [
- "MobileViTV2ForImageClassification",
- "MobileViTV2ForSemanticSegmentation",
- "MobileViTV2Model",
- "MobileViTV2PreTrainedModel"
- ]
- }
- },
- "info.aet.modernbert": {
- "*": {
- "repo": "answerdotai/ModernBERT-base",
- "pkg": {
- "0": {
- "transformers": "ModernBertModel"
- }
- },
- "tasks": [
- "ModernBertModel",
- "ModernBertPreTrainedModel",
- "ModernBertForMaskedLM",
- "ModernBertForSequenceClassification",
- "ModernBertForTokenClassification",
- "ModernBertForQuestionAnswering",
- "ModernBertForMultipleChoice"
- ]
- }
- },
- "info.aet.test-dec": {
- "*": {
- "repo": "blab-jhu/test-32m-dec",
- "pkg": {
- "0": {
- "transformers": "ModernBertDecoderModel"
- }
- },
- "tasks": [
- "ModernBertDecoderModel",
- "ModernBertDecoderPreTrainedModel",
- "ModernBertDecoderForCausalLM",
- "ModernBertDecoderForSequenceClassification"
- ]
- }
- },
- "info.stst.moonshine": {
- "*": {
- "repo": "UsefulSensors/moonshine-tiny",
- "pkg": {
- "0": {
- "transformers": "MoonshineModel"
- }
- },
- "tasks": [
- "MoonshineModel",
- "MoonshinePreTrainedModel",
- "MoonshineForConditionalGeneration"
- ]
- }
- },
- "info.stst.hf-moshiko": {
- "*": {
- "repo": "kmhf/hf-moshiko",
- "pkg": {
- "0": {
- "transformers": "MoshiModel"
- }
- },
- "tasks": [
- "MoshiForCausalLM",
- "MoshiForConditionalGeneration",
- "MoshiModel",
- "MoshiPreTrainedModel"
- ]
- }
- },
- "info.art.mpnet": {
- "*": {
- "repo": "microsoft/mpnet-base",
- "pkg": {
- "0": {
- "transformers": "MPNetModel"
- }
- },
- "tasks": [
- "MPNetForMaskedLM",
- "MPNetForMultipleChoice",
- "MPNetForQuestionAnswering",
- "MPNetForSequenceClassification",
- "MPNetForTokenClassification",
- "MPNetLayer",
- "MPNetModel",
- "MPNetPreTrainedModel"
- ]
- }
- },
- "info.art.mpt": {
- "*": {
- "repo": "mosaicml/mpt-7b",
- "pkg": {
- "0": {
- "transformers": "MptModel"
- }
- },
- "tasks": [
- "MptForCausalLM",
- "MptModel",
- "MptPreTrainedModel",
- "MptForSequenceClassification",
- "MptForTokenClassification",
- "MptForQuestionAnswering"
- ]
- }
- },
- "info.art.mra-512-4": {
- "*": {
- "repo": "uw-madison/mra-base-512-4",
- "pkg": {
- "0": {
- "transformers": "MraModel"
- }
- },
- "tasks": [
- "MraForMaskedLM",
- "MraForMultipleChoice",
- "MraForQuestionAnswering",
- "MraForSequenceClassification",
- "MraForTokenClassification",
- "MraLayer",
- "MraModel",
- "MraPreTrainedModel"
- ]
- }
- },
- "info.stst.mt5": {
- "*": {
- "repo": "google/mt5-small",
- "pkg": {
- "0": {
- "transformers": "MT5Model"
- }
- },
- "identifiers": [
- [
- 250112,
- 2048
- ],
- "text_encoders.mt5xl.transformer.shared.weight"
- ],
- "file_256": [
- "0524484ec81425ba9deef6fac1393a78ba9b1c9bfed704a4be5f9c7255975cc1",
- "32f70f1d187e131a5fc3e4f0edc97ce89360d8e2f1d90177a443a05296097acc"
- ],
- "layer_b3": [
- "a1d616c37711ec7b9073d04734af2f5fd02f9035a322eb46efeace922e104c51"
- ],
- "layer_256": [
- "bd337daf0c1aa36896013109b406a0580aa3bb8ab9291d89df3015d737358e95",
- "2e40c48c96fc7df636aad96d3e78ed0ba9f68c3059e21b7fcf917f284c569a61"
- ],
- "tasks": [
- "MT5EncoderModel",
- "MT5ForConditionalGeneration",
- "MT5ForQuestionAnswering",
- "MT5ForSequenceClassification",
- "MT5ForTokenClassification",
- "MT5Model",
- "MT5PreTrainedModel"
- ]
- }
- },
- "info.art.musicgen": {
- "*": {
- "repo": "facebook/musicgen-small",
- "pkg": {
- "0": {
- "transformers": "MusicgenModel"
- }
- },
- "tasks": [
- "MusicgenForConditionalGeneration",
- "MusicgenForCausalLM",
- "MusicgenModel",
- "MusicgenPreTrainedModel"
- ]
- }
- },
- "info.art.musicgen-melody": {
- "*": {
- "repo": "facebook/musicgen-melody",
- "pkg": {
- "0": {
- "transformers": "MusicgenMelodyModel"
- }
- },
- "tasks": [
- "MusicgenMelodyForConditionalGeneration",
- "MusicgenMelodyForCausalLM",
- "MusicgenMelodyModel",
- "MusicgenMelodyPreTrainedModel"
- ]
- }
- },
- "info.stst.mvp": {
- "*": {
- "repo": "RUCAIBox/mvp",
- "pkg": {
- "0": {
- "transformers": "MvpModel"
- }
- },
- "tasks": [
- "MvpForCausalLM",
- "MvpForConditionalGeneration",
- "MvpForQuestionAnswering",
- "MvpForSequenceClassification",
- "MvpModel",
- "MvpPreTrainedModel"
- ]
- }
- },
- "info.stst.nanochat-d32": {
- "*": {
- "repo": "karpathy/nanochat-d32",
- "pkg": {
- "0": {
- "transformers": "NanoChatModel"
- }
- },
- "tasks": [
- "NanoChatPreTrainedModel",
- "NanoChatModel",
- "NanoChatForCausalLM"
- ]
- }
- },
- "info.stst.nemotron-3-hf": {
- "*": {
- "repo": "mgoin/nemotron-3-8b-chat-4k-sft-hf",
- "pkg": {
- "0": {
- "transformers": "NemotronModel"
- }
- },
- "tasks": [
- "NemotronForQuestionAnswering",
- "NemotronForCausalLM",
- "NemotronModel",
- "NemotronPreTrainedModel",
- "NemotronForSequenceClassification",
- "NemotronForTokenClassification"
- ]
- }
- },
- "info.moe.nllb-moe": {
- "*": {
- "repo": "facebook/nllb-moe-54b",
- "pkg": {
- "0": {
- "transformers": "NllbMoeModel"
- }
- },
- "tasks": [
- "NllbMoeForConditionalGeneration",
- "NllbMoeModel",
- "NllbMoePreTrainedModel",
- "NllbMoeTop2Router",
- "NllbMoeSparseMLP"
- ]
- }
- },
- "info.art.nystromformer-512": {
- "*": {
- "repo": "uw-madison/nystromformer-512",
- "pkg": {
- "0": {
- "transformers": "NystromformerModel"
- }
- },
- "tasks": [
- "NystromformerForMaskedLM",
- "NystromformerForMultipleChoice",
- "NystromformerForQuestionAnswering",
- "NystromformerForSequenceClassification",
- "NystromformerForTokenClassification",
- "NystromformerLayer",
- "NystromformerModel",
- "NystromformerPreTrainedModel"
- ]
- }
- },
- "info.stst.olmo-hf": {
- "*": {
- "repo": "allenai/OLMo-7B-hf",
- "pkg": {
- "0": {
- "transformers": "OlmoModel"
- }
- },
- "tasks": [
- "OlmoForCausalLM",
- "OlmoModel",
- "OlmoPreTrainedModel"
- ]
- }
- },
- "info.stst.olmo2-1124-hf": {
- "*": {
- "repo": "allenai/Olmo-2-1124-7B",
- "pkg": {
- "0": {
- "transformers": "Olmo2Model"
- }
- },
- "tasks": [
- "Olmo2ForCausalLM",
- "Olmo2Model",
- "Olmo2PreTrainedModel"
- ]
- }
- },
- "info.stst.olmo-3-0725": {
- "*": {
- "repo": "allenai/OLMo-3-0725-1B",
- "pkg": {
- "0": {
- "transformers": "Olmo3Model"
- }
- },
- "tasks": [
- "Olmo3ForCausalLM",
- "Olmo3Model",
- "Olmo3PreTrainedModel"
- ]
- }
- },
- "info.moe.olmoe-0924": {
- "*": {
- "repo": "allenai/OLMoE-1B-7B-0924",
- "pkg": {
- "0": {
- "transformers": "OlmoeModel"
- }
- },
- "tasks": [
- "OlmoeForCausalLM",
- "OlmoeModel",
- "OlmoePreTrainedModel"
- ]
- }
- },
- "info.detr.omdet-turbo-swin-hf": {
- "*": {
- "repo": "omlab/omdet-turbo-swin-tiny-hf",
- "pkg": {
- "0": {
- "transformers": "OmDetTurboForObjectDetection"
- }
- },
- "tasks": [
- "OmDetTurboForObjectDetection",
- "OmDetTurboPreTrainedModel"
- ]
- }
- },
- "info.detr.oneformer-ade-swin": {
- "*": {
- "repo": "shi-labs/oneformer_ade20k_swin_tiny",
- "pkg": {
- "0": {
- "transformers": "OneFormerModel"
- }
- },
- "tasks": [
- "OneFormerForUniversalSegmentation",
- "OneFormerModel",
- "OneFormerPreTrainedModel"
- ]
- }
- },
- "info.art.openai-gpt": {
- "*": {
- "repo": "openai-community/openai-gpt",
- "pkg": {
- "0": {
- "transformers": "OpenAIGPTModel"
- }
- },
- "tasks": [
- "OpenAIGPTDoubleHeadsModel",
- "OpenAIGPTForSequenceClassification",
- "OpenAIGPTLMHeadModel",
- "OpenAIGPTModel",
- "OpenAIGPTPreTrainedModel"
- ]
- }
- },
- "info.art.opt": {
- "*": {
- "repo": "facebook/opt-350m",
- "pkg": {
- "0": {
- "transformers": "OPTModel"
- }
- },
- "tasks": [
- "OPTForCausalLM",
- "OPTModel",
- "OPTPreTrainedModel",
- "OPTForSequenceClassification",
- "OPTForQuestionAnswering"
- ]
- }
- },
- "info.vit.ovis2-hf": {
- "*": {
- "repo": "thisisiron/Ovis2-1B-hf",
- "pkg": {
- "0": {
- "transformers": "Ovis2Model"
- }
- },
- "tasks": [
- "Ovis2PreTrainedModel",
- "Ovis2Model",
- "Ovis2ForConditionalGeneration"
- ]
- }
- },
- "info.vit.owlv2-patch16": {
- "*": {
- "repo": "google/owlv2-base-patch16",
- "pkg": {
- "0": {
- "transformers": "Owlv2Model"
- }
- },
- "tasks": [
- "Owlv2Model",
- "Owlv2PreTrainedModel",
- "Owlv2TextModel",
- "Owlv2VisionModel",
- "Owlv2ForObjectDetection"
- ]
- }
- },
- "info.vit.owlvit-patch32": {
- "*": {
- "repo": "google/owlvit-base-patch32",
- "pkg": {
- "0": {
- "transformers": "OwlViTModel"
- }
- },
- "tasks": [
- "OwlViTModel",
- "OwlViTPreTrainedModel",
- "OwlViTTextModel",
- "OwlViTVisionModel",
- "OwlViTForObjectDetection"
- ]
- }
- },
- "info.vit.paligemma": {
- "*": {
- "repo": "google/paligemma2-3b-mix-224",
- "pkg": {
- "0": {
- "transformers": "PaliGemmaModel"
- }
- },
- "tasks": [
- "PaliGemmaForConditionalGeneration",
- "PaliGemmaPreTrainedModel",
- "PaliGemmaModel"
- ]
- }
- },
- "info.aet.parakeet-ctc-b": {
- "*": {
- "repo": "nvidia/parakeet-ctc-1.1b",
- "pkg": {
- "0": {
- "transformers": "ParakeetForCTC"
- }
- },
- "tasks": [
- "ParakeetForCTC",
- "ParakeetEncoder",
- "ParakeetPreTrainedModel"
- ]
- }
- },
- "info.stst.parakeet-ctc-b": {
- "*": {
- "repo": "nvidia/parakeet-ctc-1.1b",
- "pkg": {
- "0": {
- "transformers": "ParakeetEncoder"
- }
- },
- "tasks": [
- "ParakeetForCTC",
- "ParakeetEncoder",
- "ParakeetPreTrainedModel"
- ]
- }
- },
- "info.mlp.patchtsmixer-etth1-pretrain": {
- "*": {
- "repo": "ibm/patchtsmixer-etth1-pretrain",
- "pkg": {
- "0": {
- "transformers": "PatchTSMixerModel"
- }
- },
- "tasks": [
- "PatchTSMixerPreTrainedModel",
- "PatchTSMixerModel",
- "PatchTSMixerForPretraining",
- "PatchTSMixerForPrediction",
- "PatchTSMixerForTimeSeriesClassification",
- "PatchTSMixerForRegression"
- ]
- }
- },
- "info.art.patchtst": {
- "*": {
- "repo": "ibm/patchtst",
- "pkg": {
- "0": {
- "transformers": "PatchTSTModel"
- }
- },
- "tasks": [
- "PatchTSTModel",
- "PatchTSTPreTrainedModel",
- "PatchTSTForPrediction",
- "PatchTSTForPretraining",
- "PatchTSTForRegression",
- "PatchTSTForClassification"
- ]
- }
- },
- "info.stst.pe-av": {
- "*": {
- "repo": "facebook/pe-av-large",
- "pkg": {
- "0": {
- "transformers": "PeAudioModel"
- }
- },
- "tasks": [
- "PeAudioFrameLevelModel",
- "PeAudioModel",
- "PeAudioEncoder"
- ]
- }
- },
- "info.aet.pe-av": {
- "*": {
- "repo": "facebook/pe-av-large",
- "pkg": {
- "0": {
- "transformers": "PeAudioVideoModel"
- }
- },
- "tasks": [
- "PeAudioVideoModel",
- "PeAudioVideoEncoder"
- ]
- }
- },
- "info.vit.pe-av": {
- "*": {
- "repo": "facebook/pe-av-large",
- "pkg": {
- "0": {
- "transformers": "PeVideoEncoder"
- }
- },
- "tasks": [
- "PeVideoEncoder",
- "PeVideoModel"
- ]
- }
- },
- "info.stst.pegasus": {
- "*": {
- "repo": "google/pegasus-large",
- "pkg": {
- "0": {
- "transformers": "PegasusModel"
- }
- },
- "tasks": [
- "PegasusForCausalLM",
- "PegasusForConditionalGeneration",
- "PegasusModel",
- "PegasusPreTrainedModel"
- ]
- }
- },
- "info.stst.pegasus-x": {
- "*": {
- "repo": "google/pegasus-x-large",
- "pkg": {
- "0": {
- "transformers": "PegasusXModel"
- }
- },
- "tasks": [
- "PegasusXForConditionalGeneration",
- "PegasusXModel",
- "PegasusXPreTrainedModel"
- ]
- }
- },
- "info.vit.language-perceiver": {
- "*": {
- "repo": "deepmind/language-perceiver",
- "pkg": {
- "0": {
- "transformers": "PerceiverModel"
- }
- },
- "tasks": [
- "PerceiverForImageClassificationConvProcessing",
- "PerceiverForImageClassificationFourier",
- "PerceiverForImageClassificationLearned",
- "PerceiverForMaskedLM",
- "PerceiverForMultimodalAutoencoding",
- "PerceiverForOpticalFlow",
- "PerceiverForSequenceClassification",
- "PerceiverLayer",
- "PerceiverModel",
- "PerceiverPreTrainedModel"
- ]
- }
- },
- "info.vit.perception-lm": {
- "*": {
- "repo": "facebook/Perception-LM-1B",
- "pkg": {
- "0": {
- "transformers": "PerceptionLMModel"
- }
- },
- "tasks": [
- "PerceptionLMForConditionalGeneration",
- "PerceptionLMPreTrainedModel",
- "PerceptionLMModel"
- ]
- }
- },
- "info.stst.persimmon": {
- "*": {
- "repo": "adept/persimmon-8b-base",
- "pkg": {
- "0": {
- "transformers": "PersimmonModel"
- }
- },
- "tasks": [
- "PersimmonForCausalLM",
- "PersimmonModel",
- "PersimmonPreTrainedModel",
- "PersimmonForSequenceClassification",
- "PersimmonForTokenClassification"
- ]
- }
- },
- "info.stst.phi-1": {
- "*": {
- "repo": "microsoft/phi-1",
- "pkg": {
- "0": {
- "transformers": "PhiModel"
- }
- },
- "tasks": [
- "PhiPreTrainedModel",
- "PhiModel",
- "PhiForCausalLM",
- "PhiForSequenceClassification",
- "PhiForTokenClassification"
- ]
- }
- },
- "info.stst.phi-3": {
- "*": {
- "repo": "microsoft/Phi-3-mini-4k-instruct",
- "pkg": {
- "0": {
- "transformers": "Phi3Model"
- }
- },
- "tasks": [
- "Phi3PreTrainedModel",
- "Phi3Model",
- "Phi3ForCausalLM",
- "Phi3ForSequenceClassification",
- "Phi3ForTokenClassification"
- ]
- }
- },
- "info.vit.phi-4": {
- "*": {
- "repo": "microsoft/Phi-4-multimodal-instruct",
- "pkg": {
- "0": {
- "transformers": "Phi4MultimodalModel"
- }
- },
- "file_256": [
- "bc703090b63eda16f639fa4de7ac54635c23105ab1da2f6ec4d3403151d38ee6"
- ],
- "layer_b3": [
- "cf4add4ada6082f448788eaf2937f645b5212db88e06ee81475b8be0e99063dc"
- ],
- "layer_256": [
- "7ff992b780b2f8993dd6bb9612207943638b2a42badc976ce80893bc205e801b"
- ],
- "tasks": [
- "Phi4MultimodalAudioPreTrainedModel",
- "Phi4MultimodalAudioModel",
- "Phi4MultimodalVisionPreTrainedModel",
- "Phi4MultimodalVisionModel",
- "Phi4MultimodalPreTrainedModel",
- "Phi4MultimodalModel",
- "Phi4MultimodalForCausalLM"
- ]
- }
- },
- "info.moe.phi-3-moe": {
- "*": {
- "repo": "microsoft/Phi-3.5-MoE-instruct",
- "pkg": {
- "0": {
- "transformers": "PhimoeModel"
- }
- },
- "tasks": [
- "PhimoePreTrainedModel",
- "PhimoeModel",
- "PhimoeForCausalLM",
- "PhimoeForSequenceClassification"
- ]
- }
- },
- "info.vit.pixio-huge": {
- "*": {
- "repo": "facebook/pixio-huge",
- "pkg": {
- "0": {
- "transformers": "PixioModel"
- }
- },
- "tasks": [
- "PixioModel",
- "PixioPreTrainedModel",
- "PixioBackbone"
- ]
- }
- },
- "info.vit.pixtral": {
- "*": {
- "repo": "mistralai/Pixtral-12B-Base-2409",
- "pkg": {
- "0": {
- "transformers": "PixtralVisionModel"
- }
- },
- "tasks": [
- "PixtralVisionModel",
- "PixtralPreTrainedModel"
- ]
- }
- },
- "info.stst.plbart": {
- "*": {
- "repo": "uclanlp/plbart-base",
- "pkg": {
- "0": {
- "transformers": "PLBartModel"
- }
- },
- "tasks": [
- "PLBartForCausalLM",
- "PLBartForConditionalGeneration",
- "PLBartForSequenceClassification",
- "PLBartModel",
- "PLBartPreTrainedModel"
- ]
- }
- },
- "info.vit.poolformer-s12": {
- "*": {
- "repo": "sail/poolformer_s12",
- "pkg": {
- "0": {
- "transformers": "PoolFormerModel"
- }
- },
- "tasks": [
- "PoolFormerForImageClassification",
- "PoolFormerModel",
- "PoolFormerPreTrainedModel"
- ]
- }
- },
- "info.stst.phetnet-uncased": {
- "*": {
- "repo": "microsoft/prophetnet-large-uncased",
- "pkg": {
- "0": {
- "transformers": "ProphetNetModel"
- }
- },
- "tasks": [
- "ProphetNetDecoder",
- "ProphetNetEncoder",
- "ProphetNetForCausalLM",
- "ProphetNetForConditionalGeneration",
- "ProphetNetModel",
- "ProphetNetPreTrainedModel"
- ]
- }
- },
- "info.vit.pvt-224": {
- "*": {
- "repo": "Xrenya/pvt-tiny-224",
- "pkg": {
- "0": {
- "transformers": "PvtModel"
- }
- },
- "tasks": [
- "PvtForImageClassification",
- "PvtModel",
- "PvtPreTrainedModel"
- ]
- }
- },
- "info.vit.pvt-v2-b0": {
- "*": {
- "repo": "OpenGVLab/pvt_v2_b0",
- "pkg": {
- "0": {
- "transformers": "PvtV2Model"
- }
- },
- "tasks": [
- "PvtV2ForImageClassification",
- "PvtV2Model",
- "PvtV2PreTrainedModel",
- "PvtV2Backbone"
- ]
- }
- },
- "info.stst.qwen2": {
- "*": {
- "repo": "Qwen/Qwen2-7B",
- "pkg": {
- "0": {
- "transformers": "Qwen2Model"
- }
- },
- "tasks": [
- "Qwen2PreTrainedModel",
- "Qwen2Model",
- "Qwen2ForCausalLM",
- "Qwen2RMSNorm",
- "Qwen2ForSequenceClassification",
- "Qwen2ForTokenClassification",
- "Qwen2ForQuestionAnswering"
- ]
- }
- },
- "info.vit.qwen2-vl": {
- "*": {
- "repo": "Qwen/Qwen2-VL-7B-Instruct",
- "pkg": {
- "0": {
- "transformers": "Qwen2_5_VLModel"
- }
- },
- "tasks": [
- "Qwen2_5_VLForConditionalGeneration",
- "Qwen2_5_VLModel",
- "Qwen2_5_VLPreTrainedModel",
- "Qwen2_5_VLTextModel"
- ]
- }
- },
- "info.stst.qwen2-vl": {
- "*": {
- "repo": "Qwen/Qwen2-VL-7B-Instruct",
- "pkg": {
- "0": {
- "transformers": "Qwen2_5_VLTextModel"
- }
- },
- "file_256": [
- "1f48ac458d6fbd0aec53a116065a7ee3f1d34bddde544e25c16a05c9d5392b78",
- "0e85c7111ce849293e97aa09ce1172352ecece023a3ecea7ac8311e326b47f3a",
- "d725335e4ea2399be706469e4b8807716a8fa64bd03468252e9f7acf2415fee4",
- "e10bd9583a77250376d9134cd6b46799029dfa3b4d7989c1050b3ec149cc7cf5"
- ],
- "layer_b3": [
- "e4f681bde70a753f30f83495a2aa340d251bf3d818eb5a1cbe58f85fd6ea0d40",
- "47b062ce8ddb14845fb1a71d2fd88fd52a82e26561ba3eb05be057915a867775",
- "b6386f70b528ffa9e09fdd8db8a7b91a7c462ed97b06963576c6139e25fdcf31",
- "4cd449df9f9004a7e53005583a7e4cfa6de42912f03647d2ea799d489e9c1406"
- ],
- "layer_256": [
- "ed36a4a11c4ebebb10d1e010cb93e2e43fcaf975cd42bb6c9958537593d0d44d",
- "f7f6f64e7b6d7826400a2fc0eef942a47c47bd5914e051ad0c8cd9ff5ff7982b",
- "f341ed0f792cf0570ceb21d3b64ed14bf9875e9fcb90116851364eeed683a6ca",
- "ba031d0da78afe24ae63558ad29b8028244a7bd4750a5615dab9079fe32a5fd7"
- ],
- "tasks": [
- "Qwen2_5_VLForConditionalGeneration",
- "Qwen2_5_VLModel",
- "Qwen2_5_VLPreTrainedModel",
- "Qwen2_5_VLTextModel"
- ]
- }
- },
- "info.aet.qwen2-audio": {
- "*": {
- "repo": "Qwen/Qwen2-Audio-7B",
- "pkg": {
- "0": {
- "transformers": "Qwen2AudioEncoder"
- }
- },
- "tasks": [
- "Qwen2AudioForConditionalGeneration",
- "Qwen2AudioPreTrainedModel",
- "Qwen2AudioEncoder"
- ]
- }
- },
- "info.moe.qwen15-moe-a": {
- "*": {
- "repo": "Qwen/Qwen1.5-MoE-A2.7B",
- "pkg": {
- "0": {
- "transformers": "Qwen2MoeModel"
- }
- },
- "tasks": [
- "Qwen2MoeForCausalLM",
- "Qwen2MoeForQuestionAnswering",
- "Qwen2MoeModel",
- "Qwen2MoePreTrainedModel",
- "Qwen2MoeForSequenceClassification",
- "Qwen2MoeForTokenClassification"
- ]
- }
- },
- "info.stst.qwen3": {
- "*": {
- "repo": "Qwen/Qwen3-8B",
- "pkg": {
- "0": {
- "transformers": "Qwen3Model"
- }
- },
- "tasks": [
- "Qwen3ForCausalLM",
- "Qwen3ForQuestionAnswering",
- "Qwen3PreTrainedModel",
- "Qwen3Model",
- "Qwen3ForSequenceClassification",
- "Qwen3ForTokenClassification"
- ]
- }
- },
- "info.moe.qwen3-a": {
- "*": {
- "repo": "Qwen/Qwen3-30B-A3B",
- "pkg": {
- "0": {
- "transformers": "Qwen3MoeModel"
- }
- },
- "file_256": [
- "c56947057481fb5e7cdf766e442da81717b34addc88bbe8f3728fd25bd03cbae"
- ],
- "layer_b3": [
- "d2d1e0875202f5c9c84c781a2105620250733bd01832f67b2c17bc981d1eb508"
- ],
- "layer_256": [
- "408c01da57c4968b7b0e36d98a74e321153e7aeb058fea63ffd140e323526476"
- ],
- "tasks": [
- "Qwen3MoeForCausalLM",
- "Qwen3MoeForQuestionAnswering",
- "Qwen3MoeModel",
- "Qwen3MoePreTrainedModel",
- "Qwen3MoeForSequenceClassification",
- "Qwen3MoeForTokenClassification"
- ]
- }
- },
- "info.moe.qwen3-next-a": {
- "*": {
- "repo": "Qwen/Qwen3-Next-80B-A3B-Instruct",
- "pkg": {
- "0": {
- "transformers": "Qwen3NextModel"
- }
- },
- "tasks": [
- "Qwen3NextForCausalLM",
- "Qwen3NextForQuestionAnswering",
- "Qwen3NextModel",
- "Qwen3NextPreTrainedModel",
- "Qwen3NextForSequenceClassification",
- "Qwen3NextForTokenClassification"
- ]
- }
- },
- "info.vit.qwen3-vl": {
- "*": {
- "repo": "Qwen/Qwen3-VL-4B-Instruct",
- "pkg": {
- "0": {
- "transformers": "Qwen3VLModel"
- }
- },
- "tasks": [
- "Qwen3VLVisionModel",
- "Qwen3VLForConditionalGeneration",
- "Qwen3VLModel",
- "Qwen3VLPreTrainedModel",
- "Qwen3VLTextModel"
- ]
- }
- },
- "info.vit.qwen3-vl-a": {
- "*": {
- "repo": "Qwen/Qwen3-VL-30B-A3B-Instruct",
- "pkg": {
- "0": {
- "transformers": "Qwen3VLMoeModel"
- }
- },
- "tasks": [
- "Qwen3VLMoeVisionModel",
- "Qwen3VLMoeForConditionalGeneration",
- "Qwen3VLMoeModel",
- "Qwen3VLMoePreTrainedModel",
- "Qwen3VLMoeTextModel"
- ]
- }
- },
- "info.moe.qwen3-vl-a": {
- "*": {
- "repo": "Qwen/Qwen3-VL-30B-A3B-Instruct",
- "pkg": {
- "0": {
- "transformers": "Qwen3VLMoeTextModel"
- }
- },
- "tasks": [
- "Qwen3VLMoeVisionModel",
- "Qwen3VLMoeForConditionalGeneration",
- "Qwen3VLMoeModel",
- "Qwen3VLMoePreTrainedModel",
- "Qwen3VLMoeTextModel"
- ]
- }
- },
- "info.stst.qwen3-vl": {
- "*": {
- "repo": "Qwen/Qwen3-VL-4B-Instruct",
- "pkg": {
- "0": {
- "transformers": "Qwen3VLTextModel"
- }
- },
- "tasks": [
- "Qwen3VLVisionModel",
- "Qwen3VLForConditionalGeneration",
- "Qwen3VLModel",
- "Qwen3VLPreTrainedModel",
- "Qwen3VLTextModel"
- ]
- }
- },
- "info.rnn.recurrentgemma": {
- "*": {
- "repo": "google/recurrentgemma-2b",
- "pkg": {
- "0": {
- "transformers": "RecurrentGemmaModel"
- }
- },
- "tasks": [
- "RecurrentGemmaForCausalLM",
- "RecurrentGemmaModel",
- "RecurrentGemmaPreTrainedModel"
- ]
- }
- },
- "info.art.reformer-crime-and-punishment": {
- "*": {
- "repo": "google/reformer-crime-and-punishment",
- "pkg": {
- "0": {
- "transformers": "ReformerModel"
- }
- },
- "tasks": [
- "ReformerAttention",
- "ReformerForMaskedLM",
- "ReformerForQuestionAnswering",
- "ReformerForSequenceClassification",
- "ReformerLayer",
- "ReformerModel",
- "ReformerModelWithLMHead",
- "ReformerPreTrainedModel"
- ]
- }
- },
- "info.vit.regnet-y-040": {
- "*": {
- "repo": "facebook/regnet-y-040",
- "pkg": {
- "0": {
- "transformers": "RegNetModel"
- }
- },
- "tasks": [
- "RegNetForImageClassification",
- "RegNetModel",
- "RegNetPreTrainedModel"
- ]
- }
- },
- "info.art.rembert": {
- "*": {
- "repo": "google/rembert",
- "pkg": {
- "0": {
- "transformers": "RemBertModel"
- }
- },
- "tasks": [
- "RemBertForCausalLM",
- "RemBertForMaskedLM",
- "RemBertForMultipleChoice",
- "RemBertForQuestionAnswering",
- "RemBertForSequenceClassification",
- "RemBertForTokenClassification",
- "RemBertLayer",
- "RemBertModel",
- "RemBertPreTrainedModel"
- ]
- }
- },
- "info.vit.resnet-50": {
- "*": {
- "repo": "microsoft/resnet-50",
- "pkg": {
- "0": {
- "transformers": "ResNetModel"
- }
- },
- "tasks": [
- "ResNetForImageClassification",
- "ResNetModel",
- "ResNetPreTrainedModel",
- "ResNetBackbone"
- ]
- }
- },
- "info.art.roberta": {
- "*": {
- "repo": "FacebookAI/roberta-base",
- "pkg": {
- "0": {
- "transformers": "RobertaModel"
- }
- },
- "tasks": [
- "RobertaForCausalLM",
- "RobertaForMaskedLM",
- "RobertaForMultipleChoice",
- "RobertaForQuestionAnswering",
- "RobertaForSequenceClassification",
- "RobertaForTokenClassification",
- "RobertaModel",
- "RobertaPreTrainedModel"
- ]
- }
- },
- "info.art.efficient-mlm-m0-0": {
- "*": {
- "repo": "andreasmadsen/efficient_mlm_m0.40",
- "pkg": {
- "0": {
- "transformers": "RobertaPreLayerNormModel"
- }
- },
- "tasks": [
- "RobertaPreLayerNormForCausalLM",
- "RobertaPreLayerNormForMaskedLM",
- "RobertaPreLayerNormForMultipleChoice",
- "RobertaPreLayerNormForQuestionAnswering",
- "RobertaPreLayerNormForSequenceClassification",
- "RobertaPreLayerNormForTokenClassification",
- "RobertaPreLayerNormModel",
- "RobertaPreLayerNormPreTrainedModel"
- ]
- }
- },
- "info.art.roc-bert-zh": {
- "*": {
- "repo": "weiweishi/roc-bert-base-zh",
- "pkg": {
- "0": {
- "transformers": "RoCBertModel"
- }
- },
- "tasks": [
- "RoCBertForCausalLM",
- "RoCBertForMaskedLM",
- "RoCBertForMultipleChoice",
- "RoCBertForPreTraining",
- "RoCBertForQuestionAnswering",
- "RoCBertForSequenceClassification",
- "RoCBertForTokenClassification",
- "RoCBertLayer",
- "RoCBertModel",
- "RoCBertPreTrainedModel"
- ]
- }
- },
- "info.art.roformer-chinese": {
- "*": {
- "repo": "junnyu/roformer_chinese_base",
- "pkg": {
- "0": {
- "transformers": "RoFormerModel"
- }
- },
- "tasks": [
- "RoFormerForCausalLM",
- "RoFormerForMaskedLM",
- "RoFormerForMultipleChoice",
- "RoFormerForQuestionAnswering",
- "RoFormerForSequenceClassification",
- "RoFormerForTokenClassification",
- "RoFormerLayer",
- "RoFormerModel",
- "RoFormerPreTrainedModel"
- ]
- }
- },
- "info.detr.rtdetr-r50vd": {
- "*": {
- "repo": "PekingU/rtdetr_r50vd",
- "pkg": {
- "0": {
- "transformers": "RTDetrModel"
- }
- },
- "tasks": [
- "RTDetrForObjectDetection",
- "RTDetrModel",
- "RTDetrPreTrainedModel"
- ]
- }
- },
- "info.detr.rtdetr-r18vd": {
- "*": {
- "repo": "PekingU/rtdetr_r18vd",
- "pkg": {
- "0": {
- "transformers": "RTDetrV2Model"
- }
- },
- "tasks": [
- "RTDetrV2Model",
- "RTDetrV2PreTrainedModel",
- "RTDetrV2ForObjectDetection"
- ]
- }
- },
- "info.rnn.rwkv-4-pile": {
- "*": {
- "repo": "RWKV/rwkv-4-169m-pile",
- "pkg": {
- "0": {
- "transformers": "RwkvModel"
- }
- },
- "tasks": [
- "RwkvForCausalLM",
- "RwkvModel",
- "RwkvPreTrainedModel"
- ]
- }
- },
- "info.vit.sam-vit-huge": {
- "*": {
- "repo": "facebook/sam-vit-huge",
- "pkg": {
- "0": {
- "transformers": "SamModel"
- }
- },
- "tasks": [
- "SamVisionModel",
- "SamModel",
- "SamPreTrainedModel"
- ]
- }
- },
- "info.vit.sam2-hiera": {
- "*": {
- "repo": "facebook/sam2.1-hiera-tiny",
- "pkg": {
- "0": {
- "transformers": "Sam2Model"
- }
- },
- "tasks": [
- "Sam2Model",
- "Sam2VisionModel",
- "Sam2PreTrainedModel",
- "Sam2HieraDetModel"
- ]
- }
- },
- "info.vit.sam3": {
- "*": {
- "repo": "facebook/sam3",
- "pkg": {
- "0": {
- "transformers": "Sam3Model"
- }
- },
- "tasks": [
- "Sam3Model",
- "Sam3VisionModel",
- "Sam3ViTModel",
- "Sam3PreTrainedModel"
- ]
- }
- },
- "info.vit.sam3-tracker1-hiera": {
- "*": {
- "repo": "facebook/sam3_tracker.1-hiera-tiny",
- "pkg": {
- "0": {
- "transformers": "Sam3TrackerModel"
- }
- },
- "tasks": [
- "Sam3TrackerModel",
- "Sam3TrackerPreTrainedModel"
- ]
- }
- },
- "info.stst.sam3": {
- "*": {
- "repo": "facebook/sam3",
- "pkg": {
- "0": {
- "transformers": "Sam3VideoModel"
- }
- },
- "tasks": [
- "Sam3VideoModel",
- "Sam3VideoPreTrainedModel",
- "Sam3VideoInferenceSession",
- "Sam3VideoSegmentationOutput"
- ]
- }
- },
- "info.vit.sam-hq-vit-h": {
- "*": {
- "repo": "sushmanth/sam_hq_vit_h",
- "pkg": {
- "0": {
- "transformers": "SamHQModel"
- }
- },
- "tasks": [
- "SamHQModel",
- "SamHQPreTrainedModel",
- "SamHQVisionModel"
- ]
- }
- },
- "info.vit.sam-hq-vit-huge": {
- "*": {
- "repo": "syscv-community/sam-hq-vit-huge",
- "pkg": {
- "0": {
- "transformers": "SamHQVisionModel"
- }
- },
- "tasks": [
- "SamHQModel",
- "SamHQPreTrainedModel",
- "SamHQVisionModel"
- ]
- }
- },
- "info.aet.hf-seamless-m4t": {
- "*": {
- "repo": "facebook/hf-seamless-m4t-medium",
- "pkg": {
- "0": {
- "transformers": "SeamlessM4TModel"
- }
- },
- "tasks": [
- "SeamlessM4TForTextToSpeech",
- "SeamlessM4TForSpeechToSpeech",
- "SeamlessM4TForTextToText",
- "SeamlessM4TForSpeechToText",
- "SeamlessM4TModel",
- "SeamlessM4TPreTrainedModel",
- "SeamlessM4TCodeHifiGan",
- "SeamlessM4THifiGan",
- "SeamlessM4TTextToUnitForConditionalGeneration",
- "SeamlessM4TTextToUnitModel"
- ]
- }
- },
- "info.stst.seamless-m4t-v2": {
- "*": {
- "repo": "facebook/seamless-m4t-v2-large",
- "pkg": {
- "0": {
- "transformers": "SeamlessM4Tv2Model"
- }
- },
- "tasks": [
- "SeamlessM4Tv2ForTextToSpeech",
- "SeamlessM4Tv2ForSpeechToSpeech",
- "SeamlessM4Tv2ForTextToText",
- "SeamlessM4Tv2ForSpeechToText",
- "SeamlessM4Tv2Model",
- "SeamlessM4Tv2PreTrainedModel"
- ]
- }
- },
- "info.stst.seedoss": {
- "*": {
- "repo": "ByteDance-Seed/SeedOss-36B",
- "pkg": {
- "0": {
- "transformers": "SeedOssModel"
- }
- },
- "tasks": [
- "SeedOssForCausalLM",
- "SeedOssForQuestionAnswering",
- "SeedOssPreTrainedModel",
- "SeedOssModel",
- "SeedOssForSequenceClassification",
- "SeedOssForTokenClassification"
- ]
- }
- },
- "info.vit.segformer-b0-finetuned-ade-512-512": {
- "*": {
- "repo": "nvidia/segformer-b0-finetuned-ade-512-512",
- "pkg": {
- "0": {
- "transformers": "SegformerModel"
- }
- },
- "tasks": [
- "SegformerDecodeHead",
- "SegformerForImageClassification",
- "SegformerForSemanticSegmentation",
- "SegformerLayer",
- "SegformerModel",
- "SegformerPreTrainedModel"
- ]
- }
- },
- "info.vit.seggpt-vit": {
- "*": {
- "repo": "BAAI/seggpt-vit-large",
- "pkg": {
- "0": {
- "transformers": "SegGptModel"
- }
- },
- "tasks": [
- "SegGptModel",
- "SegGptPreTrainedModel",
- "SegGptForImageSegmentation"
- ]
- }
- },
- "info.aet.sew": {
- "*": {
- "repo": "asapp/sew-tiny-100k",
- "pkg": {
- "0": {
- "transformers": "SEWModel"
- }
- },
- "tasks": [
- "SEWForCTC",
- "SEWForSequenceClassification",
- "SEWModel",
- "SEWPreTrainedModel"
- ]
- }
- },
- "info.aet.sew-d": {
- "*": {
- "repo": "asapp/sew-d-tiny-100k",
- "pkg": {
- "0": {
- "transformers": "SEWDModel"
- }
- },
- "tasks": [
- "SEWDForCTC",
- "SEWDForSequenceClassification",
- "SEWDModel",
- "SEWDPreTrainedModel"
- ]
- }
- },
- "info.vit.siglip2-patch16-224": {
- "*": {
- "repo": "google/siglip2-base-patch16-224",
- "pkg": {
- "0": {
- "transformers": "Siglip2Model"
- }
- },
- "tasks": [
- "Siglip2Model",
- "Siglip2PreTrainedModel",
- "Siglip2TextModel",
- "Siglip2VisionModel",
- "Siglip2ForImageClassification"
- ]
- }
- },
- "info.vit.siglip2-patch16-naflex": {
- "*": {
- "repo": "google/siglip2-base-patch16-naflex",
- "pkg": {
- "0": {
- "transformers": "Siglip2VisionModel"
- }
- },
- "tasks": [
- "Siglip2Model",
- "Siglip2PreTrainedModel",
- "Siglip2TextModel",
- "Siglip2VisionModel",
- "Siglip2ForImageClassification"
- ]
- }
- },
- "info.stst.smollm3": {
- "*": {
- "repo": "HuggingFaceTB/SmolLM3-3B",
- "pkg": {
- "0": {
- "transformers": "SmolLM3Model"
- }
- },
- "tasks": [
- "SmolLM3PreTrainedModel",
- "SmolLM3Model",
- "SmolLM3ForCausalLM",
- "SmolLM3ForSequenceClassification",
- "SmolLM3ForTokenClassification",
- "SmolLM3ForQuestionAnswering"
- ]
- }
- },
- "info.vit.smolvlm": {
- "*": {
- "repo": "HuggingFaceTB/SmolVLM2-2.2B-Instruct",
- "pkg": {
- "0": {
- "transformers": "SmolVLMModel"
- }
- },
- "tasks": [
- "SmolVLMForConditionalGeneration",
- "SmolVLMPreTrainedModel",
- "SmolVLMModel",
- "SmolVLMVisionTransformer"
- ]
- }
- },
- "info.vit.siglip-so-patch14-384": {
- "*": {
- "repo": "google/siglip-so400m-patch14-384",
- "pkg": {
- "0": {
- "transformers": "SmolVLMVisionTransformer"
- }
- },
- "tasks": [
- "SmolVLMForConditionalGeneration",
- "SmolVLMPreTrainedModel",
- "SmolVLMModel",
- "SmolVLMVisionTransformer"
- ]
- }
- },
- "info.aet.s2t-librispeech-asr": {
- "*": {
- "repo": "facebook/s2t-small-librispeech-asr",
- "pkg": {
- "0": {
- "transformers": "Speech2TextModel"
- }
- },
- "tasks": [
- "Speech2TextForConditionalGeneration",
- "Speech2TextModel",
- "Speech2TextPreTrainedModel"
- ]
- }
- },
- "info.stst.speecht5-asr": {
- "*": {
- "repo": "microsoft/speecht5_asr",
- "pkg": {
- "0": {
- "transformers": "SpeechT5Model"
- }
- },
- "tasks": [
- "SpeechT5ForSpeechToText",
- "SpeechT5ForSpeechToSpeech",
- "SpeechT5ForTextToSpeech",
- "SpeechT5Model",
- "SpeechT5PreTrainedModel",
- "SpeechT5HifiGan"
- ]
- }
- },
- "info.art.splinter": {
- "*": {
- "repo": "tau/splinter-base",
- "pkg": {
- "0": {
- "transformers": "SplinterModel"
- }
- },
- "tasks": [
- "SplinterForQuestionAnswering",
- "SplinterForPreTraining",
- "SplinterLayer",
- "SplinterModel",
- "SplinterPreTrainedModel"
- ]
- }
- },
- "info.art.squeezebert-uncased": {
- "*": {
- "repo": "squeezebert/squeezebert-uncased",
- "pkg": {
- "0": {
- "transformers": "SqueezeBertModel"
- }
- },
- "tasks": [
- "SqueezeBertForMaskedLM",
- "SqueezeBertForMultipleChoice",
- "SqueezeBertForQuestionAnswering",
- "SqueezeBertForSequenceClassification",
- "SqueezeBertForTokenClassification",
- "SqueezeBertModel",
- "SqueezeBertModule",
- "SqueezeBertPreTrainedModel"
- ]
- }
- },
- "info.stst.stablelm-4e1t": {
- "*": {
- "repo": "stabilityai/stablelm-3b-4e1t",
- "pkg": {
- "0": {
- "transformers": "StableLmModel"
- }
- },
- "tasks": [
- "StableLmForCausalLM",
- "StableLmModel",
- "StableLmPreTrainedModel",
- "StableLmForSequenceClassification",
- "StableLmForTokenClassification"
- ]
- }
- },
- "info.stst.starcoder2": {
- "*": {
- "repo": "bigcode/starcoder2-7b",
- "pkg": {
- "0": {
- "transformers": "Starcoder2Model"
- }
- },
- "tasks": [
- "Starcoder2ForCausalLM",
- "Starcoder2Model",
- "Starcoder2PreTrainedModel",
- "Starcoder2ForSequenceClassification",
- "Starcoder2ForTokenClassification"
- ]
- }
- },
- "info.vit.swiftformer-xs": {
- "*": {
- "repo": "MBZUAI/swiftformer-xs",
- "pkg": {
- "0": {
- "transformers": "SwiftFormerModel"
- }
- },
- "tasks": [
- "SwiftFormerForImageClassification",
- "SwiftFormerModel",
- "SwiftFormerPreTrainedModel"
- ]
- }
- },
- "info.vit.swin2sr-classicalsr-x2-64": {
- "*": {
- "repo": "caidas/swin2sr-classicalsr-x2-64",
- "pkg": {
- "0": {
- "transformers": "Swin2SRModel"
- }
- },
- "tasks": [
- "Swin2SRForImageSuperResolution",
- "Swin2SRModel",
- "Swin2SRPreTrainedModel"
- ]
- }
- },
- "info.vit.swinv2-patch4-window8-256": {
- "*": {
- "repo": "microsoft/swinv2-tiny-patch4-window8-256",
- "pkg": {
- "0": {
- "transformers": "Swinv2Model"
- }
- },
- "tasks": [
- "Swinv2ForImageClassification",
- "Swinv2ForMaskedImageModeling",
- "Swinv2Model",
- "Swinv2PreTrainedModel",
- "Swinv2Backbone"
- ]
- }
- },
- "info.moe.switch-8": {
- "*": {
- "repo": "google/switch-base-8",
- "pkg": {
- "0": {
- "transformers": "SwitchTransformersModel"
- }
- },
- "tasks": [
- "SwitchTransformersEncoderModel",
- "SwitchTransformersForConditionalGeneration",
- "SwitchTransformersModel",
- "SwitchTransformersPreTrainedModel",
- "SwitchTransformersTop1Router",
- "SwitchTransformersSparseMLP"
- ]
- }
- },
- "info.stst.t5": {
- "*": {
- "repo": "google-t5/t5-small",
- "pkg": {
- "0": {
- "transformers": "T5Model"
- }
- },
- "identifiers": [
- [
- 4096
- ],
- "encoder.embed_tokens.weight",
- "text_encoders.t5xxl.transformer.shared.weight",
- "t5xxl",
- "encoder.block.0.layer.1.DenseReluDense.wi.weight"
- ],
- "file_256": [
- "ec87bffd1923e8b2774a6d240c922a41f6143081d52cf83b8fe39e9d838c893e",
- "565cb2487351282e8e4dbeb88e63f4ad28217ce0439f5a8e6525a924807d2d9b",
- "6e480b09fae049a72d2a8c5fbccb8d3e92febeb233bbe9dfe7256958a9167635",
- "4f2751ceeb2a96edd693e539dc5d6bba0b8d3814f49a9b3798403a0cec4b2e3d",
- "83690f3cc37cecb5e907f41ab0f7abb0855ef24a0a8aab9259f2888ce85a34e2",
- "7d330da4816157540d6bb7838bf63a0f02f573fc48ca4d8de34bb0cbfd514f09",
- "8490f7a22615c20651a63dbe7b4241929826a4de20292dc8e63bfc3c61e3654f",
- "d8720addef2596fef86b1b22e4b62875c9118779ba8723759a75dfcbc649ffd5",
- "7d0eac95abe8daae454bcd3d166b8bfc6a35fe68278f97479d62dbb6850f38c0",
- "ceabd6f71c7112cfaa4dfca8711dda97b79fb9b25983f1c95532de226045f1f8",
- "49e139f50824fef40908ef4307c851e7adaa8b91bed44054c4829600dbedfdda",
- "211ade1d474f5dc83190aec8be5c4baf52643777790d64de0cbd84f63613e5e9",
- "7894547154ba3fd6e364e66e2951ee82b4c3fc1ae0f95df6a4f9d1c5a4e98f17",
- "eb529f693f4b17773a24e787fcba29486d5e1700dadcc20bb91e4c8b00212d08",
- "d80116f6fc39801e4eef425a584e7a7a41cbe5119797bef2dad67299909fe2ae",
- "31ebe18e901bfb6e5709a20ec1c95fce29bce2b9545073231e0f909a53239f5c",
- "6be2b0b7e2de7cf2919340c88cb802a103a997ce46c53131cec91958c1db1af4",
- "b51cbb10b1a7aac6dd1c3b62f0ed908bfd06e0b42d2f3577d43e061361f51dae",
- "9ec60f6028534b7fe5af439fcb535d75a68592a9ca3fcdeb175ef89e3ee99825",
- "8f5ab879234384235d56732f0cda07bf8801f30a49645248c5bfdeeb1665f64b",
- "86427a1f4dba48940e45bf78d6db5bf0d48fce8b4656f5aba27955f06af9628e",
- "88b696cfae098f03bb078cc5944ef03aec1e91ec020a6b016b723a0f0532558c",
- "1dc600961d3c5ed081f6700485cdc7ed9cfb4631f2dc385b7ac6bd3c80846d0d",
- "f28631189911f8d7931e8fe642a4cb2a3c51f50da7cabbfa06b89bafc19c00d0",
- "de9dfdd19d7ba6859993cadec5100665dc7a4fb71e1c6c8970959cbdaf4366e3",
- "7a68b2c8c080696a10109612a649bc69330991ecfea65930ccfdfbdb011f2686",
- "2c0c539ab8e8fba3877cc94bc483e427f74c525f817a809b028ebc8d96d75a94"
- ],
- "layer_b3": [
- "ca94e03b7b1fdcb0d6ff5205eac56f145d2dff8a9c489faf80935bfec8387f18",
- "c0e2b054bedd782909191b05748a88c28d1538fa91789fec63f036ba01dcc001",
- "672de9b79d14001de7d1109ffc52e4d0cccc3bfee6f45648fa347703b58e2b99",
- "abdb187a996c51cb0469630c124b14eeb0bb8f5f635aca6c71dea264f8bd61ae",
- "8926f862b7763fd9688af317eba7809aa71a478484be0c738c269de368ace4a7",
- "e616b754cf55e55b3f9f17ab7e1fff95f0607c81782822fc1223ae22fb1e9f36",
- "b79e5f1878a62cd726bb4f9fc1415cacb071d278440e9026290c7b36cb41e1d4",
- "77619d5278d9f547ddac17d4d99df56cb6a3a9e660ae31b2f896a4297907e62e",
- "c87c9d3cc7becc46ee34821299cf8551a6df5541582a45469a031bccdc4bd340",
- "7e6c32c01c89fc5d1610c410135aa9708e77a7444510e5e479fa677ff2b53643",
- "a49c2bc301733967ddff113790e301773dc5dd71368b657af4141458de593ced",
- "c2ea94030ea362e03d73d448fa5353ace0a449dc38c51a4a49fb148444ebb8ef",
- "4a90463350f08ef41479da1d561ab41b8f8b792f1603a092226a838156aebfb0",
- "f86cd0324eebbffb81b15ad47dc8b63fedfa51dc222e44e1a958a7becce2bcb0",
- "48c54c61c5f14e42761c6177539b2da3a22222516dab053952ca8d8e92f93d65",
- "311332d9738773669128814d944b1e860a8e3176b37abf43370bc06b43b454d0",
- "3f4e51dec6d542759cdea49b3bec14c090a4908f953fa3e182e2ea43b5b05402",
- "beb25461e168359108add77263ea5cc121b7584cc4aa304ffc4e134783bb1d88",
- "43313f90a359c8c1c787a7a833b1ab9f7a38204ba36d0ba587c658d0d9bf0852",
- "fa9e97cdad26f55fedab83a3f114e0338c9cca3ea2bf8f1b168a6dfc5919bf8e",
- "93108d67f8829a7e1e8f3773e9ce53c67f365889c2acfd69816ac80fd43f8e08",
- "fc65a6cc55e89394d7bc0fa4ee952d63ce3bdc143b84b5aa4bb3edf7722a6b83",
- "8163bc781a7e013dfeb806bbb828a36913cf119363ea5fcd9071d87a0c227cda",
- "ad2ba63e1134bad1b15ee339313bc130708b2995e8b4b76fb44d727f28c26ad9",
- "4a844772638ffed2f61d45eaac984094b92540fa1391a4098608fc73a6cd4fd8",
- "76c31e1fd35da7de7cee97c1e7c5ccde640e6fac3e17a62e115ecf484c7196c3",
- "a4d672e22b5bdd8f8b0885cec4a173d0466bb1dcbfbf8400cedcc41c2494f16c",
- "d1860c3f01dc9f260d98b50d3d2bbc8dc2d3eefaa93778a8de9d7adfb897fc6e",
- "b8719092fc58487406211f52dc55bf40b573ccfd29933a989c33a36b694f6f0a",
- "795e272409bc4fa55f402485acf86b607256f91aa965295c5bb771c61f8e9e74"
- ],
- "layer_256": [
- "bb20f7805209379aea4d6548f17e551cf27d0f8426ca169e4df8234f718ed5ef",
- "431580c2d86f9a9ed3500f776a4c997223e5644aed211f965354869ccfa4d76e",
- "2ccd548c4ffe34168c60779ebd497b9b410981a2fda813c8723a24a805c94ea0",
- "a608fc4e1cc9762e46187a1ce66e98e8ba4bc3a604cbfd96174bd876baea0fa1",
- "dc9e74cdf535e0b7a17e1335d0d8b38a00f94facf0cb01363baee09945a25278",
- "f07409710a69b2247aa4723a9b40d2225d5e5bfba7b60c51f0ea901fc2ef5ad9",
- "ed28f8b6cc472f352fc840b5a9f841ff17d76ae6918f0676464dca20529aa92b",
- "97c1a08f87c59b4c55ad4672841977cfce43ca7730bcd11d8c178a9330de1855",
- "968972839b859a9c4457f190fad2e17e8585ce27d9ef318df4f5b4e902143944",
- "4dbdeadc957c898c327197a3d8770188535672e9208beb29bbf48dfdf51c8955",
- "669172c2b5e8b97774d9dd0227ede40c4d25cae3adae97d9f281d03531e7e137",
- "39fff130b9ee240102c28a78ee1c4a643e9f800b734ff133f3ab2ad1357bd2f6",
- "6e047ed8cb7007034ff15840dd53c92096f0e7ed5befa07808de8afa35d35874",
- "adbd0baa059074501b7686db2b0c01715f3a317275c2657c5dfbfd6ee92389b7",
- "eb63790fb32b5660de34fa42c2e608df58f7aa3680b4984f0ee9008fe613729c",
- "f125c20a33b0ff2dbd4e8ad9acebc34383cb2ef98668169ef79a8c06655ced35",
- "e64e0ac83a785ef584a0e86b347fae8f9e2bd84324a49396ca8a9fe7532a947b",
- "70001b3ac1b66522142bb86e4c3e87e20c2bbd07276c763878e0838ef6184aad",
- "f46fd1e2b5fef3b9f7ae80d183cc77f7be181117a72a0bb933bdef0bc6cd679e",
- "83676d73726d101325a47c7f8a60cedf10bab99ea79a6bedad7761220cb4a625",
- "a621a907586e5e270e7c7873b167364d8a935ff347d8240fa9bab319678da690",
- "f0af1a089f40d8611db5c59469314f1547e2df23c6eff24860359b37ea9bd966",
- "72478320b8dbfd9aeaea010dcf0896e3116fa5ab940f3b472882d9f9d2d7333f",
- "9c1a88e36334a48d8482fec54b14ea1d5fd31f0dbb65d13cc616e63dc7c42be5",
- "d0689f727e8ac4fef3ec4b1f29e8a3bd12e1116559eeefb2a1a457cd4e676d1e",
- "fea158a4afcfaa6e95e04799bae0287de0c4fcb188f3b41768a46ce48c71c9df",
- "2e5bc4e73312b5aec4c1a55631cb4ed69cf34ccaa6d1f28f7045f137a579b439",
- "015fdecbc3b5369dbcb2302e4b79985437ac4496d1b9ad63316423a222fb0803"
- ],
- "tasks": [
- "T5EncoderModel",
- "T5ForConditionalGeneration",
- "T5Model",
- "T5PreTrainedModel",
- "T5ForQuestionAnswering",
- "T5ForSequenceClassification",
- "T5ForTokenClassification"
- ]
- }
- },
- "info.stst.t5gemma-prefixlm": {
- "*": {
- "repo": "google/t5gemma-2b-2b-prefixlm-it",
- "pkg": {
- "0": {
- "transformers": "T5GemmaModel"
- }
- },
- "tasks": [
- "T5GemmaForConditionalGeneration",
- "T5GemmaModel",
- "T5GemmaEncoderModel",
- "T5GemmaPreTrainedModel",
- "T5GemmaForSequenceClassification",
- "T5GemmaForTokenClassification"
- ]
- }
- },
- "info.stst.t5gemma-2": {
- "*": {
- "repo": "google/t5gemma-2-270m-270m",
- "pkg": {
- "0": {
- "transformers": "T5Gemma2Model"
- }
- },
- "tasks": [
- "T5Gemma2ForConditionalGeneration",
- "T5Gemma2Model",
- "T5Gemma2PreTrainedModel",
- "T5Gemma2ForSequenceClassification",
- "T5Gemma2ForTokenClassification"
- ]
- }
- },
- "info.detr.table-transformer-detection": {
- "*": {
- "repo": "microsoft/table-transformer-detection",
- "pkg": {
- "0": {
- "transformers": "TableTransformerModel"
- }
- },
- "tasks": [
- "TableTransformerForObjectDetection",
- "TableTransformerModel",
- "TableTransformerPreTrainedModel"
- ]
- }
- },
- "info.art.tapas-finetuned-sqa": {
- "*": {
- "repo": "google/tapas-base-finetuned-sqa",
- "pkg": {
- "0": {
- "transformers": "TapasModel"
- }
- },
- "tasks": [
- "TapasForMaskedLM",
- "TapasForQuestionAnswering",
- "TapasForSequenceClassification",
- "TapasModel",
- "TapasPreTrainedModel"
- ]
- }
- },
- "info.vit.textnet": {
- "*": {
- "repo": "czczup/textnet-base",
- "pkg": {
- "0": {
- "transformers": "TextNetModel"
- }
- },
- "tasks": [
- "TextNetBackbone",
- "TextNetModel",
- "TextNetPreTrainedModel",
- "TextNetForImageClassification"
- ]
- }
- },
- "info.stst.time-series-transformer-tourism-monthly": {
- "*": {
- "repo": "huggingface/time-series-transformer-tourism-monthly",
- "pkg": {
- "0": {
- "transformers": "TimeSeriesTransformerModel"
- }
- },
- "tasks": [
- "TimeSeriesTransformerForPrediction",
- "TimeSeriesTransformerModel",
- "TimeSeriesTransformerPreTrainedModel"
- ]
- }
- },
- "info.art.timesfm-2-pytorch": {
- "*": {
- "repo": "google/timesfm-2.0-500m-pytorch",
- "pkg": {
- "0": {
- "transformers": "TimesFmModel"
- }
- },
- "tasks": [
- "TimesFmModelForPrediction",
- "TimesFmPreTrainedModel",
- "TimesFmModel"
- ]
- }
- },
- "info.vit.timesformer-finetuned-k600": {
- "*": {
- "repo": "facebook/timesformer-base-finetuned-k600",
- "pkg": {
- "0": {
- "transformers": "TimesformerModel"
- }
- },
- "tasks": [
- "TimesformerModel",
- "TimesformerForVideoClassification",
- "TimesformerPreTrainedModel"
- ]
- }
- },
- "info.detr.resnet18-a1-in": {
- "*": {
- "repo": "timm/resnet18.a1_in1k",
- "pkg": {
- "0": {
- "transformers": "TimmWrapperModel"
- }
- },
- "tasks": [
- "TimmWrapperPreTrainedModel",
- "TimmWrapperModel",
- "TimmWrapperForImageClassification"
- ]
- }
- },
- "info.detr.tvp": {
- "*": {
- "repo": "Intel/tvp-base",
- "pkg": {
- "0": {
- "transformers": "TvpModel"
- }
- },
- "tasks": [
- "TvpModel",
- "TvpPreTrainedModel",
- "TvpForVideoGrounding"
- ]
- }
- },
- "info.vit.udop": {
- "*": {
- "repo": "microsoft/udop-large",
- "pkg": {
- "0": {
- "transformers": "UdopModel"
- }
- },
- "tasks": [
- "UdopForConditionalGeneration",
- "UdopPreTrainedModel",
- "UdopModel",
- "UdopEncoderModel"
- ]
- }
- },
- "info.stst.umt5": {
- "*": {
- "repo": "google/umt5-small",
- "pkg": {
- "0": {
- "transformers": "UMT5Model"
- }
- },
- "identifiers": [
- "encoder.block.1.layer.0.SelfAttention.relative_attention_bias.weight"
- ],
- "file_256": [
- "a8e861969c7433e707cc5a74065d795d36cca07ec96eb6763eb4083df7248f58",
- "decf9b70814ed5e9965bfca9fbd0483462e2bf743790663025b7742f8c014c72",
- "0a07449cf1141c0ec86e653c00465f6f0d79c6e58a2c60c8bcf4203d0e4ec4f6",
- "c0ef3a140898e228a3520c9adec60743d2e8e5b3d229651bb37f1a3921919f99",
- "7b8850f1961e1cf8a77cca4c964a358d303f490833c6c087d0cff4b2f99db2af",
- "c3355d30191f1f066b26d93fba017ae9809dce6c627dda5f6a66eaa651204f68",
- "fa1d36fd54f171ae60fea915c23bd77986b330bbed9729f0d2f8ecbe9168bc48",
- "4a3176f32fd70c0a335b4419fcbf8c86cc875e23498c0fc06f5b4aa0930889e0",
- "adbc782b9145a27e15d63dfa25057efca0ac75e2db7d372c901ddaa130ca2def",
- "b7e2ca4c493c9d51fa951005e8ceba2f4b6b6877cfb4c36a8955c6cd68a1dba7",
- "2521d4de0bf9e1cc6549866463ceae85e4ec3239bc6063f7488810be39033bbc",
- "9209b4c77b34ad8cf3f06b04c6eaa27e7beeebb348a31f85e3b38a1d719b09ed",
- "8bc12d80bc0413573fa58a93626117440b4528f640dd9cb310732e05fa9e6c3e",
- "f64f8d6dc4d8a24276df69d0ccea789aae686f7417950a41e6568c30cb478a5c",
- "17cf97a5bbbc60a646d6105b832b6f657ce904a8a1ad970e4b59df0c67584a40",
- "eaea358bb438c5d211721a4feecc162000e3636e9cb96f51e216f1f44ebd12ce"
- ],
- "layer_b3": [
- "cd92b29c9099a640e3f5d4a76e64b3467f87f6c056119e0defdff94d311ad6de",
- "1c943dbcb8b328a7c6c852921ddaefbd84c9df8c83bc51fe303c1f06cb734102",
- "1639a6467af0db1e15828d33b878e568cba1335947eeadd481170bcdc9ba8e33",
- "72a0329740dee29a2c099eec3c320b3945590a74293356014c30249fe69652e5",
- "0374cba03c607ffe8ab8f04994d82f82e80901dc7578f1a9a6cb2637608be5d5",
- "d75a407f873e1cfa1a0a36214b53b14bfebe9253ea263465151c07f0d57f3f29",
- "621153502b985c143d304318c91dc3d10296d24268c81e3538fc336fdc84c915",
- "43bb052945d38a68bec27c3d26162e88e306e6074d027d3b4b2b8ae2b1851691",
- "98f50ea5d55e61c1478df47e567e48bdd036d240b9129e64d53a826406900adc",
- "9400313b8eae31699473daa5f840d25a4ef660f68de9a7894f1a28f214f23384",
- "9f13826b8e4ddde24d80de6a947a7868e26cea25dda52790ee6ed695ff72b9bb",
- "475773ab108a537ff904b84e7f3a80129ba4983deb7170b6b52c922ece6069ce",
- "5ef27b3c1eddb08cfe41b452cf9529d86dff811645d40c165bae324486d19e96",
- "e170559d8551cfe651344594e54c0a9a90c0068b00f3866f6e9a3737e20925cb",
- "e8dc7442a20bcdc7b6e5dd0265939d88896eab5ddd33ee16f1f09537e65914b8",
- "4d3d5049857d01741780daf01e96617092973305637b435f4895499a26bbaede",
- "7a2adadc2372feda23b2169337276adda6d1fdef82ba69f0d3321c4c6ba8c604",
- "0a7c61a85bb3f51f75924de48ef3f5e87cbf8901f600cbfcae97f5e2919c4148"
- ],
- "layer_256": [
- "467916d35f3053dce1d40d998fcaf6aa03feda75aa578d964dd61461e23641a3",
- "58deeef888d4ded4ffababfbf8da27227a4a6ff8adfa42016e12c0180f713816",
- "178ebd3fa3418d33a2e45a80d8b9d3662ff4a8e75f3de3f0332f82c505d8152a",
- "8700dcb651465fe6c925b7ad6068b58b32951832fff0ed19819510f8d0713ee5",
- "954f2129ba166e746c71433f717b572d8869ec14b32b7f214d1701d3b1120047",
- "32f5fc1daea014b6488b96c2a1330e0aad87e074844fa3e2e3f20b9e58440395",
- "9245abaf6df8a4b5fcc828ecbcd7b21a1b19bf5f3c4388fb5c8eabc140276dce",
- "172d0fbbd379ae014a7008e148813818494e9e645db802fd000d443369df9d17",
- "2fa68a26b0386aaf9123d2b4067dafc8631ee724602197dd353f3ea5a61dac8a",
- "16f0054014e6d07b86b0526d5bcfed7d2aa3aebe3e44e6758933d90cbd3da46e",
- "fd62047f5d27ff43210c117dc0f253c101e694a5331d6b684688606c92c65ccf",
- "ddc4f38db9f132fb1b736c1d693b5c039a2d6fe83bdf4f1c1e7a2745b5d79124",
- "9e9ab11b3ea059b84ae2bcc5be76ab3f730a486d92a16f1fd2a959bdc2ede08f",
- "bfb178b1ce27f00e122d2328c662fdef6cc239c07efc749aa61ae2d395441b02",
- "50addf6a911b90194a75b0212429d1af55eb2f9d24715479b9ccc4a40adc299b",
- "2e46e9f1b714d72160d3b3b775a845b3049a01396fab935f1278d9e8de2ef0c6",
- "db8d2b49d9042e39d6531b33ec3bebb9cdf42b9e6ad56163f08da2a7da2a53cd",
- "2d81d19ad5440422b85e0b17c71914269f6c25c9b1fa321c0dd6119ddb41d62d"
- ],
- "tasks": [
- "UMT5EncoderModel",
- "UMT5ForConditionalGeneration",
- "UMT5ForQuestionAnswering",
- "UMT5ForSequenceClassification",
- "UMT5ForTokenClassification",
- "UMT5Model",
- "UMT5PreTrainedModel"
- ]
- }
- },
- "info.aet.unispeech-1500h-cv": {
- "*": {
- "repo": "microsoft/unispeech-large-1500h-cv",
- "pkg": {
- "0": {
- "transformers": "UniSpeechModel"
- }
- },
- "tasks": [
- "UniSpeechForCTC",
- "UniSpeechForPreTraining",
- "UniSpeechForSequenceClassification",
- "UniSpeechModel",
- "UniSpeechPreTrainedModel"
- ]
- }
- },
- "info.aet.unispeech-sat-100h-libri-ft": {
- "*": {
- "repo": "microsoft/unispeech-sat-base-100h-libri-ft",
- "pkg": {
- "0": {
- "transformers": "UniSpeechSatModel"
- }
- },
- "tasks": [
- "UniSpeechSatForAudioFrameClassification",
- "UniSpeechSatForCTC",
- "UniSpeechSatForPreTraining",
- "UniSpeechSatForSequenceClassification",
- "UniSpeechSatForXVector",
- "UniSpeechSatModel",
- "UniSpeechSatPreTrainedModel"
- ]
- }
- },
- "info.gan.univnet-dev": {
- "*": {
- "repo": "dg845/univnet-dev",
- "pkg": {
- "0": {
- "transformers": "UnivNetModel"
- }
- },
- "tasks": [
- "UnivNetModel"
- ]
- }
- },
- "info.stst.vaultgemma": {
- "*": {
- "repo": "google/vaultgemma-7b",
- "pkg": {
- "0": {
- "transformers": "VaultGemmaModel"
- }
- },
- "tasks": [
- "VaultGemmaForCausalLM",
- "VaultGemmaModel",
- "VaultGemmaPreTrainedModel"
- ]
- }
- },
- "info.vit.videollama3-image-hf": {
- "*": {
- "repo": "lkhl/VideoLLaMA3-2B-Image-HF",
- "pkg": {
- "0": {
- "transformers": "VideoLlama3Model"
- }
- },
- "tasks": [
- "VideoLlama3VisionModel",
- "VideoLlama3PreTrainedModel",
- "VideoLlama3Model",
- "VideoLlama3ForConditionalGeneration"
- ]
- }
- },
- "info.vit.video-llava-hf": {
- "*": {
- "repo": "LanguageBind/Video-LLaVA-7B-hf",
- "pkg": {
- "0": {
- "transformers": "VideoLlavaModel"
- }
- },
- "tasks": [
- "VideoLlavaPreTrainedModel",
- "VideoLlavaModel",
- "VideoLlavaForConditionalGeneration"
- ]
- }
- },
- "info.vit.videomae": {
- "*": {
- "repo": "MCG-NJU/videomae-base",
- "pkg": {
- "0": {
- "transformers": "VideoMAEModel"
- }
- },
- "tasks": [
- "VideoMAEForPreTraining",
- "VideoMAEModel",
- "VideoMAEPreTrainedModel",
- "VideoMAEForVideoClassification"
- ]
- }
- },
- "info.vit.vilt-b32-mlm": {
- "*": {
- "repo": "dandelin/vilt-b32-mlm",
- "pkg": {
- "0": {
- "transformers": "ViltModel"
- }
- },
- "tasks": [
- "ViltForImageAndTextRetrieval",
- "ViltForImagesAndTextClassification",
- "ViltForTokenClassification",
- "ViltForMaskedLM",
- "ViltForQuestionAnswering",
- "ViltLayer",
- "ViltModel",
- "ViltPreTrainedModel"
- ]
- }
- },
- "info.vit.vip-llava-hf": {
- "*": {
- "repo": "ybelkada/vip-llava-7b-hf",
- "pkg": {
- "0": {
- "transformers": "VipLlavaModel"
- }
- },
- "tasks": [
- "VipLlavaModel",
- "VipLlavaForConditionalGeneration",
- "VipLlavaPreTrainedModel"
- ]
- }
- },
- "info.vit.japanese-clip-vit-h-14-bert-wider": {
- "*": {
- "repo": "hakuhodo-tech/japanese-clip-vit-h-14-bert-wider",
- "pkg": {
- "0": {
- "transformers": "VisionTextDualEncoderModel"
- }
- },
- "tasks": [
- "VisionTextDualEncoderModel"
- ]
- }
- },
- "info.art.visualbert-vqa-coco-pre": {
- "*": {
- "repo": "uclanlp/visualbert-vqa-coco-pre",
- "pkg": {
- "0": {
- "transformers": "VisualBertModel"
- }
- },
- "tasks": [
- "VisualBertForMultipleChoice",
- "VisualBertForPreTraining",
- "VisualBertForQuestionAnswering",
- "VisualBertForRegionToPhraseAlignment",
- "VisualBertForVisualReasoning",
- "VisualBertLayer",
- "VisualBertModel",
- "VisualBertPreTrainedModel"
- ]
- }
- },
- "info.vit.vit-patch16-224": {
- "*": {
- "repo": "google/vit-base-patch16-224",
- "pkg": {
- "0": {
- "transformers": "ViTModel"
- }
- },
- "tasks": [
- "ViTForImageClassification",
- "ViTForMaskedImageModeling",
- "ViTModel",
- "ViTPreTrainedModel"
- ]
- }
- },
- "info.vit.vit-mae": {
- "*": {
- "repo": "facebook/vit-mae-base",
- "pkg": {
- "0": {
- "transformers": "ViTMAEModel"
- }
- },
- "tasks": [
- "ViTMAEForPreTraining",
- "ViTMAELayer",
- "ViTMAEModel",
- "ViTMAEPreTrainedModel"
- ]
- }
- },
- "info.vit.vit-msn": {
- "*": {
- "repo": "facebook/vit-msn-base",
- "pkg": {
- "0": {
- "transformers": "ViTMSNModel"
- }
- },
- "tasks": [
- "ViTMSNModel",
- "ViTMSNForImageClassification",
- "ViTMSNPreTrainedModel"
- ]
- }
- },
- "info.vit.vitdet-patch16-224": {
- "*": {
- "repo": "google/vitdet-base-patch16-224",
- "pkg": {
- "0": {
- "transformers": "VitDetModel"
- }
- },
- "tasks": [
- "VitDetModel",
- "VitDetPreTrainedModel",
- "VitDetBackbone"
- ]
- }
- },
- "info.art.mms-tts-eng": {
- "*": {
- "repo": "facebook/mms-tts-eng",
- "pkg": {
- "0": {
- "transformers": "VitsModel"
- }
- },
- "tasks": [
- "VitsModel",
- "VitsPreTrainedModel"
- ]
- }
- },
- "info.vit.vivit16x2-kinetics400": {
- "*": {
- "repo": "google/vivit-b-16x2-kinetics400",
- "pkg": {
- "0": {
- "transformers": "VivitModel"
- }
- },
- "tasks": [
- "VivitModel",
- "VivitPreTrainedModel",
- "VivitForVideoClassification"
- ]
- }
- },
- "info.vit.vjepa2-vitl-fpc64-256": {
- "*": {
- "repo": "facebook/vjepa2-vitl-fpc64-256",
- "pkg": {
- "0": {
- "transformers": "VJEPA2Model"
- }
- },
- "tasks": [
- "VJEPA2Model",
- "VJEPA2PreTrainedModel",
- "VJEPA2ForVideoClassification"
- ]
- }
- },
- "info.stst.voxtral-2507": {
- "*": {
- "repo": "mistralai/Voxtral-Mini-3B-2507",
- "pkg": {
- "0": {
- "transformers": "VoxtralForConditionalGeneration"
- }
- },
- "tasks": [
- "VoxtralPreTrainedModel",
- "VoxtralEncoder",
- "VoxtralForConditionalGeneration"
- ]
- }
- },
- "info.aet.voxtral-2507": {
- "*": {
- "repo": "mistralai/Voxtral-Mini-3B-2507",
- "pkg": {
- "0": {
- "transformers": "VoxtralEncoder"
- }
- },
- "tasks": [
- "VoxtralPreTrainedModel",
- "VoxtralEncoder",
- "VoxtralForConditionalGeneration"
- ]
- }
- },
- "info.aet.wav2vec2-960h": {
- "*": {
- "repo": "facebook/wav2vec2-base-960h",
- "pkg": {
- "0": {
- "transformers": "Wav2Vec2Model"
- }
- },
- "tasks": [
- "Wav2Vec2ForAudioFrameClassification",
- "Wav2Vec2ForCTC",
- "Wav2Vec2ForMaskedLM",
- "Wav2Vec2ForPreTraining",
- "Wav2Vec2ForSequenceClassification",
- "Wav2Vec2ForXVector",
- "Wav2Vec2Model",
- "Wav2Vec2PreTrainedModel"
- ]
- }
- },
- "info.aet.wav2vec2-bert-rel-pos": {
- "*": {
- "repo": "facebook/w2v-bert-2.0",
- "pkg": {
- "0": {
- "transformers": "Wav2Vec2BertModel"
- }
- },
- "tasks": [
- "Wav2Vec2BertForAudioFrameClassification",
- "Wav2Vec2BertForCTC",
- "Wav2Vec2BertForSequenceClassification",
- "Wav2Vec2BertForXVector",
- "Wav2Vec2BertModel",
- "Wav2Vec2BertPreTrainedModel"
- ]
- }
- },
- "info.aet.wav2vec2-conformer-rel-pos": {
- "*": {
- "repo": "facebook/wav2vec2-conformer-rel-pos-large",
- "pkg": {
- "0": {
- "transformers": "Wav2Vec2ConformerModel"
- }
- },
- "tasks": [
- "Wav2Vec2ConformerForAudioFrameClassification",
- "Wav2Vec2ConformerForCTC",
- "Wav2Vec2ConformerForPreTraining",
- "Wav2Vec2ConformerForSequenceClassification",
- "Wav2Vec2ConformerForXVector",
- "Wav2Vec2ConformerModel",
- "Wav2Vec2ConformerPreTrainedModel"
- ]
- }
- },
- "info.aet.wavlm": {
- "*": {
- "repo": "microsoft/wavlm-base",
- "pkg": {
- "0": {
- "transformers": "WavLMModel"
- }
- },
- "tasks": [
- "WavLMForAudioFrameClassification",
- "WavLMForCTC",
- "WavLMForSequenceClassification",
- "WavLMForXVector",
- "WavLMModel",
- "WavLMPreTrainedModel"
- ]
- }
- },
- "info.aet.whisper": {
- "*": {
- "repo": "openai/whisper-tiny",
- "pkg": {
- "0": {
- "transformers": "WhisperModel"
- }
- },
- "tasks": [
- "WhisperForCausalLM",
- "WhisperForConditionalGeneration",
- "WhisperModel",
- "WhisperPreTrainedModel",
- "WhisperForAudioClassification"
- ]
- }
- },
- "info.vit.xclip-patch32": {
- "*": {
- "repo": "microsoft/xclip-base-patch32",
- "pkg": {
- "0": {
- "transformers": "XCLIPModel"
- }
- },
- "tasks": [
- "XCLIPModel",
- "XCLIPPreTrainedModel",
- "XCLIPTextModel",
- "XCLIPVisionModel"
- ]
- }
- },
- "info.gan.x-codec": {
- "*": {
- "repo": "Manel/X-Codec",
- "pkg": {
- "0": {
- "transformers": "XcodecModel"
- }
- },
- "tasks": [
- "XcodecModel",
- "XcodecPreTrainedModel"
- ]
- }
- },
- "info.art.xglm": {
- "*": {
- "repo": "facebook/xglm-564M",
- "pkg": {
- "0": {
- "transformers": "XGLMModel"
- }
- },
- "tasks": [
- "XGLMForCausalLM",
- "XGLMModel",
- "XGLMPreTrainedModel"
- ]
- }
- },
- "info.art.xlm-mlm-en-2048": {
- "*": {
- "repo": "FacebookAI/xlm-mlm-en-2048",
- "pkg": {
- "0": {
- "transformers": "XLMModel"
- }
- },
- "tasks": [
- "XLMForMultipleChoice",
- "XLMForQuestionAnswering",
- "XLMForQuestionAnsweringSimple",
- "XLMForSequenceClassification",
- "XLMForTokenClassification",
- "XLMModel",
- "XLMPreTrainedModel",
- "XLMWithLMHeadModel"
- ]
- }
- },
- "info.art.xlm-roberta": {
- "*": {
- "repo": "FacebookAI/xlm-roberta-base",
- "pkg": {
- "0": {
- "transformers": "XLMRobertaModel"
- }
- },
- "tasks": [
- "XLMRobertaForCausalLM",
- "XLMRobertaForMaskedLM",
- "XLMRobertaForMultipleChoice",
- "XLMRobertaForQuestionAnswering",
- "XLMRobertaForSequenceClassification",
- "XLMRobertaForTokenClassification",
- "XLMRobertaModel",
- "XLMRobertaPreTrainedModel"
- ]
- }
- },
- "info.art.xlm-roberta-xl": {
- "*": {
- "repo": "facebook/xlm-roberta-xl",
- "pkg": {
- "0": {
- "transformers": "XLMRobertaXLModel"
- }
- },
- "tasks": [
- "XLMRobertaXLForCausalLM",
- "XLMRobertaXLForMaskedLM",
- "XLMRobertaXLForMultipleChoice",
- "XLMRobertaXLForQuestionAnswering",
- "XLMRobertaXLForSequenceClassification",
- "XLMRobertaXLForTokenClassification",
- "XLMRobertaXLModel",
- "XLMRobertaXLPreTrainedModel"
- ]
- }
- },
- "info.art.xlnet-cased": {
- "*": {
- "repo": "xlnet/xlnet-large-cased",
- "pkg": {
- "0": {
- "transformers": "XLNetModel"
- }
- },
- "tasks": [
- "XLNetForMultipleChoice",
- "XLNetForQuestionAnswering",
- "XLNetForQuestionAnsweringSimple",
- "XLNetForSequenceClassification",
- "XLNetForTokenClassification",
- "XLNetLMHeadModel",
- "XLNetModel",
- "XLNetPreTrainedModel"
- ]
- }
- },
- "info.lstm.xlstm": {
- "*": {
- "repo": "NX-AI/xLSTM-7b",
- "pkg": {
- "0": {
- "transformers": "xLSTMModel"
- }
- },
- "tasks": [
- "xLSTMForCausalLM",
- "xLSTMModel",
- "xLSTMPreTrainedModel"
- ]
- }
- },
- "info.art.xmod": {
- "*": {
- "repo": "facebook/xmod-base",
- "pkg": {
- "0": {
- "transformers": "XmodModel"
- }
- },
- "tasks": [
- "XmodForCausalLM",
- "XmodForMaskedLM",
- "XmodForMultipleChoice",
- "XmodForQuestionAnswering",
- "XmodForSequenceClassification",
- "XmodForTokenClassification",
- "XmodModel",
- "XmodPreTrainedModel"
- ]
- }
- },
- "info.cnn.yolos": {
- "*": {
- "repo": "hustvl/yolos-base",
- "pkg": {
- "0": {
- "transformers": "YolosModel"
- }
- },
- "tasks": [
- "YolosForObjectDetection",
- "YolosModel",
- "YolosPreTrainedModel"
- ]
- }
- },
- "info.art.yoso-4096": {
- "*": {
- "repo": "uw-madison/yoso-4096",
- "pkg": {
- "0": {
- "transformers": "YosoModel"
- }
- },
- "tasks": [
- "YosoForMaskedLM",
- "YosoForMultipleChoice",
- "YosoForQuestionAnswering",
- "YosoForSequenceClassification",
- "YosoForTokenClassification",
- "YosoLayer",
- "YosoModel",
- "YosoPreTrainedModel"
- ]
- }
- },
- "info.ssm.zamba-v1": {
- "*": {
- "repo": "Zyphra/Zamba-7B-v1",
- "pkg": {
- "0": {
- "transformers": "ZambaModel"
- }
- },
- "tasks": [
- "ZambaForCausalLM",
- "ZambaForSequenceClassification",
- "ZambaModel",
- "ZambaPreTrainedModel"
- ]
- }
- },
- "info.ssm.zamba2": {
- "*": {
- "repo": "Zyphra/Zamba2-2.7B",
- "pkg": {
- "0": {
- "transformers": "Zamba2Model"
- }
- },
- "tasks": [
- "Zamba2ForCausalLM",
- "Zamba2ForSequenceClassification",
- "Zamba2Model",
- "Zamba2PreTrainedModel"
- ]
- }
- },
- "ops.precision.uint": {
- "U8": {
- "pkg": {
- "0": {
- "torch": {
- "uint8": {
- "variant": "uint8"
- }
- }
- }
- }
- },
- "U16": {
- "pkg": {
- "0": {
- "torch": {
- "uint16": {
- "variant": "uint16"
- }
- }
- }
- }
- },
- "U32": {
- "pkg": {
- "0": {
- "torch": {
- "uint32": {
- "variant": "uint32"
- }
- }
- }
- }
- },
- "U64": {
- "pkg": {
- "0": {
- "torch": {
- "uint64": {
- "variant": "uint64"
- }
- }
- }
- }
- },
- "U1": {
- "pkg": {
- "0": {
- "torch": {
- "uint1": {
- "variant": "uint1"
- }
- }
- }
- }
- },
- "U2": {
- "pkg": {
- "0": {
- "torch": {
- "uint2": {
- "variant": "uint2"
- }
- }
- }
- }
- },
- "U3": {
- "pkg": {
- "0": {
- "torch": {
- "uint3": {
- "variant": "uint3"
- }
- }
- }
- }
- },
- "U4": {
- "pkg": {
- "0": {
- "torch": {
- "uint4": {
- "variant": "uint4"
- }
- }
- }
- }
- },
- "U5": {
- "pkg": {
- "0": {
- "torch": {
- "uint5": {
- "variant": "uint5"
- }
- }
- }
- }
- },
- "U6": {
- "pkg": {
- "0": {
- "torch": {
- "uint6": {
- "variant": "uint6"
- }
- }
- }
- }
- },
- "U7": {
- "pkg": {
- "0": {
- "torch": {
- "uint7": {
- "variant": "uint7"
- }
- }
- }
- }
- }
- },
- "ops.precision.int": {
- "I8": {
- "pkg": {
- "0": {
- "torch": {
- "int8": {
- "variant": "int8"
- }
- }
- }
- }
- },
- "I16": {
- "pkg": {
- "0": {
- "torch": {
- "int16": {
- "variant": "int16"
- }
- }
- }
- }
- },
- "I32": {
- "pkg": {
- "0": {
- "torch": {
- "int32": {
- "variant": "int32"
- }
- }
- }
- }
- },
- "I64": {
- "pkg": {
- "0": {
- "torch": {
- "int64": {
- "variant": "int64"
- }
- }
- }
- }
- },
- "Q8": {
- "pkg": {
- "0": {
- "torch": {
- "qint8": {
- "variant": "qint8"
- }
- }
- }
- }
- },
- "Q32": {
- "pkg": {
- "0": {
- "torch": {
- "qint32": {
- "variant": "qint32"
- }
- }
- }
- }
- },
- "I1": {
- "pkg": {
- "0": {
- "torch": {
- "int1": {
- "variant": "int1"
- }
- }
- }
- }
- },
- "I2": {
- "pkg": {
- "0": {
- "torch": {
- "int2": {
- "variant": "int2"
- }
- }
- }
- }
- },
- "I3": {
- "pkg": {
- "0": {
- "torch": {
- "int3": {
- "variant": "int3"
- }
- }
- }
- }
- },
- "I4": {
- "pkg": {
- "0": {
- "torch": {
- "int4": {
- "variant": "int4"
- }
- }
- }
- }
- },
- "I5": {
- "pkg": {
- "0": {
- "torch": {
- "int5": {
- "variant": "int5"
- }
- }
- }
- }
- },
- "I6": {
- "pkg": {
- "0": {
- "torch": {
- "int6": {
- "variant": "int6"
- }
- }
- }
- }
- },
- "I7": {
- "pkg": {
- "0": {
- "torch": {
- "int7": {
- "variant": "int7"
- }
- }
- }
- }
- }
- },
- "ops.precision.float": {
- "F16": {
- "pkg": {
- "0": {
- "torch": {
- "float16": {
- "variant": "fp16"
- }
- }
- }
- }
- },
- "F32": {
- "pkg": {
- "0": {
- "torch": {
- "float32": {
- "variant": "fp32"
- }
- }
- }
- }
- },
- "F64": {
- "pkg": {
- "0": {
- "torch": {
- "float64": {
- "variant": "fp64"
- }
- }
- }
- }
- },
- "F8_E5M2": {
- "pkg": {
- "0": {
- "torch": {
- "float8_e5m2": {
- "variant": "fp8_e5m2"
- }
- }
- }
- }
- },
- "F8_E4M3": {
- "pkg": {
- "0": {
- "torch": {
- "float8_e4m3fn": {
- "variant": "fp8_e4m3fn"
- }
- }
- }
- }
- },
- "F8_E5M2FNUZ": {
- "pkg": {
- "0": {
- "torch": {
- "float8_e5m2fnuz": {
- "variant": "fp8_e5m2fnuz"
- }
- }
- }
- }
- },
- "F8_E4M3FNUZ": {
- "pkg": {
- "0": {
- "torch": {
- "float8_e4m3fnuz": {
- "variant": "fp8_e4m3fnuz"
- }
- }
- }
- }
- },
- "F8_E8M0FNU": {
- "pkg": {
- "0": {
- "torch": {
- "float8_e8m0fnu": {
- "variant": "fp8_e8m0fnu"
- }
- }
- }
- }
- },
- "F8_E2M1": {
- "pkg": {
- "0": {
- "torch": {
- "float4_e2m1fn_x2": {
- "variant": "fp4_e2m1fn_x2"
- }
- }
- }
- }
- }
- },
- "ops.precision.complex": {
- "C32": {
- "pkg": {
- "0": {
- "torch": {
- "complex32": {
- "variant": "complex32"
- }
- }
- }
- }
- },
- "C64": {
- "pkg": {
- "0": {
- "torch": {
- "complex64": {
- "variant": "complex64"
- }
- }
- }
- }
- },
- "C128": {
- "pkg": {
- "0": {
- "torch": {
- "complex128": {
- "variant": "complex128"
- }
- }
- }
- }
- }
- },
- "ops.precision.bool": {
- "Bbool": {
- "pkg": {
- "0": {
- "torch": {
- "bool": {
- "variant": "bool"
- }
- }
- }
- }
- }
- },
- "ops.precision.quint": {
- "Q8": {
- "pkg": {
- "0": {
- "torch": {
- "quint8": {
- "variant": "quint8"
- }
- }
- }
- }
- },
- "Q4x2": {
- "pkg": {
- "0": {
- "torch": {
- "quint4x2": {
- "variant": "quint4x2"
- }
- }
- }
- }
- },
- "Q2x4": {
- "pkg": {
- "0": {
- "torch": {
- "quint2x4": {
- "variant": "quint2x4"
- }
- }
- }
- }
- }
- },
- "ops.precision.bfloat": {
- "B16": {
- "pkg": {
- "0": {
- "torch": {
- "bfloat16": {
- "variant": "bf16"
- }
- }
- }
- }
- }
- },
- "ops.precision.bits": {
- "B1x8": {
- "pkg": {
- "0": {
- "torch": {
- "bits1x8": {
- "variant": "bits1x8"
- }
- }
- }
- }
- },
- "B2x4": {
- "pkg": {
- "0": {
- "torch": {
- "bits2x4": {
- "variant": "bits2x4"
- }
- }
- }
- }
- },
- "B4x2": {
- "pkg": {
- "0": {
- "torch": {
- "bits4x2": {
- "variant": "bits4x2"
- }
- }
- }
- }
- },
- "B8": {
- "pkg": {
- "0": {
- "torch": {
- "bits8": {
- "variant": "bits8"
- }
- }
- }
- }
- },
- "B16": {
- "pkg": {
- "0": {
- "torch": {
- "bits16": {
- "variant": "bits16"
- }
- }
- }
- }
- }
- },
- "ops.scheduler.amused": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "AmusedScheduler",
- "module_path": "diffusers.schedulers.scheduling_amused"
- }
- }
- }
- },
- "ops.scheduler.cmstochasticiterative": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "CMStochasticIterativeScheduler",
- "module_path": "diffusers.schedulers.scheduling_consistency_models"
- }
- }
- }
- },
- "ops.scheduler.cogvideoxddim": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "CogVideoXDDIMScheduler",
- "module_path": "diffusers.schedulers.scheduling_ddim_cogvideox"
- }
- }
- }
- },
- "ops.scheduler.cogvideoxdpm": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "CogVideoXDPMScheduler",
- "module_path": "diffusers.schedulers.scheduling_dpm_cogvideox"
- }
- }
- }
- },
- "ops.scheduler.ddiminverse": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "DDIMInverseScheduler",
- "module_path": "diffusers.schedulers.scheduling_ddim_inverse"
- }
- }
- }
- },
- "ops.scheduler.ddimparallel": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "DDIMParallelScheduler",
- "module_path": "diffusers.schedulers.scheduling_ddim_parallel"
- }
- }
- }
- },
- "ops.scheduler.ddim": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "DDIMScheduler",
- "module_path": "diffusers.schedulers.scheduling_ddim"
- }
- }
- }
- },
- "ops.scheduler.ddpmparallel": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "DDPMParallelScheduler",
- "module_path": "diffusers.schedulers.scheduling_ddpm_parallel"
- }
- }
- }
- },
- "ops.scheduler.ddpm": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "DDPMScheduler",
- "module_path": "diffusers.schedulers.scheduling_ddpm"
- }
- }
- }
- },
- "ops.scheduler.ddpmwuerstchen": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "DDPMWuerstchenScheduler",
- "module_path": "diffusers.schedulers.scheduling_ddpm_wuerstchen"
- }
- }
- }
- },
- "ops.scheduler.deis": {
- "multistep": {
- "pkg": {
- "0": {
- "diffusers": "DEISMultistepScheduler",
- "module_path": "diffusers.schedulers.scheduling_deis_multistep"
- }
- }
- }
- },
- "ops.scheduler.dpminverse": {
- "multistep": {
- "pkg": {
- "0": {
- "diffusers": "DPMSolverMultistepInverseScheduler",
- "module_path": "diffusers.schedulers.scheduling_dpmsolver_multistep_inverse"
- }
- }
- }
- },
- "ops.scheduler.dpm": {
- "multistep": {
- "pkg": {
- "0": {
- "diffusers": "DPMSolverMultistepScheduler",
- "module_path": "diffusers.schedulers.scheduling_dpmsolver_multistep"
- }
- }
- }
- },
- "ops.scheduler.dpmsinglestep": {
- "solver": {
- "pkg": {
- "0": {
- "diffusers": "DPMSolverSinglestepScheduler",
- "module_path": "diffusers.schedulers.scheduling_dpmsolver_singlestep"
- }
- }
- }
- },
- "ops.scheduler.edmdpm": {
- "multistep": {
- "pkg": {
- "0": {
- "diffusers": "EDMDPMSolverMultistepScheduler",
- "module_path": "diffusers.schedulers.scheduling_edm_dpmsolver_multistep"
- }
- }
- }
- },
- "ops.scheduler.edmeuler": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "EDMEulerScheduler",
- "module_path": "diffusers.schedulers.scheduling_edm_euler"
- }
- }
- }
- },
- "ops.scheduler.eulerancestral": {
- "discrete": {
- "pkg": {
- "0": {
- "diffusers": "EulerAncestralDiscreteScheduler",
- "module_path": "diffusers.schedulers.scheduling_euler_ancestral_discrete"
- }
- }
- }
- },
- "ops.scheduler.euler": {
- "discrete": {
- "pkg": {
- "0": {
- "diffusers": "EulerDiscreteScheduler",
- "module_path": "diffusers.schedulers.scheduling_euler_discrete"
- }
- }
- }
- },
- "ops.scheduler.flowmatcheuler": {
- "discrete": {
- "pkg": {
- "0": {
- "diffusers": "FlowMatchEulerDiscreteScheduler",
- "module_path": "diffusers.schedulers.scheduling_flow_match_euler_discrete"
- }
- }
- }
- },
- "ops.scheduler.flowmatchheun": {
- "discrete": {
- "pkg": {
- "0": {
- "diffusers": "FlowMatchHeunDiscreteScheduler",
- "module_path": "diffusers.schedulers.scheduling_flow_match_heun_discrete"
- }
- }
- }
- },
- "ops.scheduler.flowmatchlcm": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "FlowMatchLCMScheduler",
- "module_path": "diffusers.schedulers.scheduling_flow_match_lcm"
- }
- }
- }
- },
- "ops.scheduler.heun": {
- "discrete": {
- "pkg": {
- "0": {
- "diffusers": "HeunDiscreteScheduler",
- "module_path": "diffusers.schedulers.scheduling_heun_discrete"
- }
- }
- }
- },
- "ops.scheduler.ipndm": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "IPNDMScheduler",
- "module_path": "diffusers.schedulers.scheduling_ipndm"
- }
- }
- }
- },
- "ops.scheduler.karrasve": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "KarrasVeScheduler",
- "module_path": "diffusers.schedulers.deprecated.scheduling_karras_ve"
- }
- }
- }
- },
- "ops.scheduler.kdpm2ancestral": {
- "discrete": {
- "pkg": {
- "0": {
- "diffusers": "KDPM2AncestralDiscreteScheduler",
- "module_path": "diffusers.schedulers.scheduling_k_dpm_2_ancestral_discrete"
- }
- }
- }
- },
- "ops.scheduler.kdpm2": {
- "discrete": {
- "pkg": {
- "0": {
- "diffusers": "KDPM2DiscreteScheduler",
- "module_path": "diffusers.schedulers.scheduling_k_dpm_2_discrete"
- }
- }
- }
- },
- "ops.scheduler.lcm": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "LCMScheduler",
- "module_path": "diffusers.schedulers.scheduling_lcm"
- }
- }
- }
- },
- "ops.scheduler.pndm": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "PNDMScheduler",
- "module_path": "diffusers.schedulers.scheduling_pndm"
- }
- }
- }
- },
- "ops.scheduler.repaint": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "RePaintScheduler",
- "module_path": "diffusers.schedulers.scheduling_repaint"
- }
- }
- }
- },
- "ops.scheduler.sa": {
- "solver": {
- "pkg": {
- "0": {
- "diffusers": "SASolverScheduler",
- "module_path": "diffusers.schedulers.scheduling_sasolver"
- }
- }
- }
- },
- "ops.scheduler.scm": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "SCMScheduler",
- "module_path": "diffusers.schedulers.scheduling_scm"
- }
- }
- }
- },
- "ops.scheduler.scoresdeve": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "ScoreSdeVeScheduler",
- "module_path": "diffusers.schedulers.scheduling_sde_ve"
- }
- }
- }
- },
- "ops.scheduler.tcd": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "TCDScheduler",
- "module_path": "diffusers.schedulers.scheduling_tcd"
- }
- }
- }
- },
- "ops.scheduler.unclip": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "UnCLIPScheduler",
- "module_path": "diffusers.schedulers.scheduling_unclip"
- }
- }
- }
- },
- "ops.scheduler.unipc": {
- "multistep": {
- "pkg": {
- "0": {
- "diffusers": "UniPCMultistepScheduler",
- "module_path": "diffusers.schedulers.scheduling_unipc_multistep"
- }
- }
- }
- },
- "ops.scheduler.vqdiffusion": {
- "scheduler": {
- "pkg": {
- "0": {
- "diffusers": "VQDiffusionScheduler",
- "module_path": "diffusers.schedulers.scheduling_vq_diffusion"
- }
- }
- }
- },
- "ops.scheduler.karrasdiffusion": {
- "schedulers": {
- "pkg": {
- "0": {
- "diffusers": "KarrasDiffusionSchedulers",
- "module_path": "diffusers.schedulers.scheduling_utils"
- }
- }
- }
- },
- "info.lora.dmd": {
- "stable-diffusion-xl-1": {
- "repo": "tianweiy/DMD2",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "generation": {
- "num_inference_steps": 4,
- "guidance_scale": 0,
- "timesteps": [
- 999,
- 749,
- 499,
- 249
- ]
- },
- "scheduler": {
- "ops.scheduler.lcm": ""
- }
- }
- },
- "file_256": [
- "b3d9173815a4b595991c3a7a0e0e63ad821080f314a0b2a3cc31ecd7fcf2cbb8",
- "a374289e9446d7f14d2037c4b3770756b7b52c292142a691377c3c755010a1bb"
- ]
- }
- },
- "info.lora.dpo": {
- "stable-diffusion-xl-1": {
- "repo": "radames/sdxl-DPO-LoRA",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "generation": {
- "guidance_scale": 7.5,
- "num_inference_steps": 4
- },
- "scheduler": {
- "ops.scheduler.dpm": {
- "algorithm_type": "sde-dpmsolver++",
- "use_karras_sigmas": true,
- "order": 2
- }
- }
- }
- },
- "file_256": [
- "666f71a833fc41229ec7e8a264fb7b0fcb8bf47a80e366ae7486c18f38ec9fc0",
- "6b1dcbfb234d7b6000948b5b95ccebc8f903450ce2ba1b50bc3456987c9087ad"
- ]
- }
- },
- "info.lora.flash": {
- "stable-diffusion-xl-1": {
- "repo": "jasperai/flash-sdxl",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "scheduler": "ops.scheduler.lcm"
- }
- },
- "file_256": [
- "afe2ca6e27c4c6087f50ef42772c45d7b0efbc471b76e422492403f9cae724d7"
- ]
- },
- "pixart-alpha": {
- "repo": "jasperai/flash-pixart",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- }
- }
- },
- "file_256": [
- "99ef037fe3c1fb6d6bbefdbb85ad60df434fcc0577d34c768d752d60cf69681b"
- ]
- },
- "stable-diffusion-3": {
- "repo": "jasperai/flash-sd3",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- }
- }
- },
- "file_256": [
- "85fce13c36e3739aa42930f745eb9fceb6c53d53fb17e2a687e3234c1a58ee15"
- ]
- },
- "stable-diffusion-v1-5": {
- "repo": "jasperai/flash-sd",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "generation": {
- "num_inference_steps": 4,
- "guidance_scale": 0
- }
- }
- },
- "file_256": [
- "99353444c1a0f40719a1b3037049dbd24800317979a73c312025c05af3574a5f"
- ]
- }
- },
- "info.lora.hyper": {
- "stable-diffusion-xl-1": {
- "repo": "ByteDance/Hyper-SD",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {
- "fuse": 1.0
- }
- }
- }
- },
- "file_256": {
- "0b97f447b5878323a28fbe7c51ba7acebd21f4d77552ba77b04b11c8911825b6": {
- "num_inference_steps": 12
- },
- "55b51334c85061afff5eff7c550b61963c8b8607a5868bbe4f26db49374719b1": {
- "num_inference_steps": 8
- },
- "c912df184c5116792d2c604d26c6bc2aa916685f4a793755255cda1c43a3c78a": {
- "num_inference_steps": 1,
- "guidance_scale": 0.0
- },
- "69b25c0187ced301c3603c599c0bc509ac99b8ac34db89a2aecc3d5f77a35187": {
- "num_inference_steps": 2,
- "guidance_scale": 0.0
- },
- "12f81a27d00a751a40d68fd15597091896c5a90f3bd632fb6c475607cbdad76e": {
- "num_inference_steps": 4,
- "guidance_scale": 0.0
- },
- "ca689190e8c46038550384b5675488526cfe5a40d35f82b27acb75c100f417c1": {
- "num_inference_steps": 8,
- "guidance_scale": 0.0
- }
- }
- },
- "flux1-dev": {
- "repo": "ByteDance/Hyper-SD",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {
- "fuse": 0.125
- }
- }
- }
- },
- "file_256": {
- "6461f67dfc1a967ae60344c3b3f350877149ccab758c273cc37f5e8a87b5842e": {
- "num_inference_steps": 16,
- "guidance_scale": 0.0
- },
- "e0ab0fdf569cd01a382f19bd87681f628879dea7ad51fe5a3799b6c18c7b2d03": {
- "num_inference_steps": 8,
- "guidance_scale": 0.0
- }
- }
- },
- "stable-diffusion-3": {
- "repo": "ByteDance/Hyper-SD",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {
- "fuse": 0.125
- }
- }
- }
- },
- "file_256": {
- "5b4d0b99d58deb811bdbbe521a06f4dbf56a2e9148ff3211c594e0502b656bc9": {
- "num_inference_steps": 16
- },
- "0ee4e529abd17b06d4295e3bb91c0d4ddae393afad86b2b43c4f5eeb9e401602": {
- "num_inference_steps": 4
- },
- "fc6a3e73e14ed11e21e4820e960d7befcffe7e333850ada9545f239e9aa6027e": {
- "num_inference_steps": 8
- }
- }
- },
- "stable-diffusion-v1-5": {
- "repo": "ByteDance/Hyper-SD",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- }
- }
- },
- "file_256": {
- "64b98437383537cd968fda6f87a05c33160ece9c79ff4757949a1e212ff78361": {
- "num_inference_steps": 12
- },
- "f6123d5b950d5250ab6c33600e27f4dcf71b3099ebf888685e01e9e8117ce482": {
- "num_inference_steps": 8
- },
- "a04fd9a535c1e56d38f7590ee72a13fd5ca0409853b4fff021e5a9482cf1ca3b": {
- "num_inference_steps": 1,
- "guidance_scale": 0.0
- },
- "2f26dcc1d883feb07557a552315baae2ca2a04ac08556b08a355a244547e8c3a": {
- "num_inference_steps": 2,
- "guidance_scale": 0.0
- },
- "c5dd058616461ed5053e2b14eec4dbe3fa0eea3b13688642f6d6c80ea2ba5958": {
- "num_inference_steps": 4,
- "guidance_scale": 0.0
- },
- "91fc3186236e956d64dbb4357f2e120c69b968b78af7d2db9884a5ca74d3cd13": {
- "num_inference_steps": 8,
- "guidance_scale": 0.0
- }
- }
- }
- },
- "info.lora.lcm": {
- "stable-diffusion-xl-1": {
- "repo": "latent-consistency/lcm-lora-sdxl",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {
- "fuse": 1.0
- }
- },
- "scheduler": {
- "ops.scheduler.lcm": {
- "timestep_spacing": "trailing"
- }
- },
- "generation": {
- "num_inference_steps": 8
- }
- }
- },
- "file_256": [
- "a764e6859b6e04047cd761c08ff0cee96413a8e004c9f07707530cd776b19141"
- ]
- },
- "ssd": {
- "repo": "latent-consistency/lcm-lora-ssd-1b",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "generation": {
- "num_inference_steps": 8
- }
- }
- },
- "file_256": [
- "7adaaa69db6f011058a19fd1d5315fdf19ef79fcd513cdab30e173833fd5c59b"
- ]
- },
- "segmind-vega": {
- "repo": "segmind/Segmind-VegaRT",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "gen_kwargs": {
- "num_inference_steps": 8
- }
- }
- },
- "file_256": [
- "9b6e8cd833fa205eaeeed391ca623a6f2546e447470bd1c5dcce3fa8d2f26afb"
- ]
- },
- "stable-diffusion-v1-5": {
- "repo": "latent-consistency/lcm-lora-sdv1-5",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "generation": {
- "num_inference_steps": 8
- }
- }
- },
- "file_256": [
- "8f90d840e075ff588a58e22c6586e2ae9a6f7922996ee6649a7f01072333afe4"
- ]
- }
- },
- "info.lora.lightning": {
- "stable-diffusion-xl-1": {
- "repo": "ByteDance/SDXL-Lightning",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "generation": {
- "num_inference_steps": 4,
- "guidance_scale": 0
- }
- }
- }
- }
- },
- "info.lora.pcm": {
- "stable-diffusion-xl-1": {
- "repo": "wangfuyun/PCM_Weights",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- }
- }
- },
- "file_256": {
- "0365f6107250a4fed1b83e8ae6a070065e026a2ba54bff65f55a50284232bbe6": {
- "num_inference_steps": 4,
- "guidance_scale": 0.0
- },
- "04ea827435d5750e63d113dc509174b4f6e8a069ff8f91970c3d25299c10b1f8": {
- "num_inference_steps": 16
- },
- "7eb353b2abcaabab6251ba4e17d6cbe2e763feb0674b0f950555552212b44621": {
- "num_inference_steps": 16
- },
- "a85cf70ac16ed42011630a5cd6b5927722cb7c40a2107eff85e2670f9a38c893": {
- "num_inference_steps": 4
- },
- "9f7f13bb019925eacd89aeff678e4fd831f7b60245b986855dff6634aee4eba9": {
- "num_inference_steps": 4
- },
- "3b9c970a3e4c0e182931e71b3f769c1956f16c6b06db98b4d67236790d4d0b1d": {
- "num_inference_steps": 8
- },
- "7f04ba8911b4c25ef2c7cbf74abcb6daa3b4f0e4bc6a03896bdae7601f2f180b": {
- "num_inference_steps": 8
- },
- "13fb038025ce9dad93b8ee1b67fc81bac8affb59a77b67d408d286e0b0365a1d": {
- "num_inference_steps": 16,
- "guidance_scale": 0.0
- },
- "3442eff271aa3b60a094fd6f9169d03e49e4051044a974f6fcf690507959191f": {
- "num_inference_steps": 16,
- "guidance_scale": 0.0
- },
- "242cbe4695fe3f2e248faa71cf53f2ccbf248a316973e4b2f38ab9e34f35a5ab": {
- "num_inference_steps": 2,
- "guidance_scale": 0.0
- },
- "e1f600491bb8e0cd94f41144321e44fdb2cb346447f31e71f6e53f1c24cccfbf": {
- "num_inference_steps": 2,
- "guidance_scale": 0.0
- },
- "d0bf40a7f280829195563486bec7253f043a06b1f218602b20901c367641023e": {
- "num_inference_steps": 4,
- "guidance_scale": 0.0
- },
- "212150d7953627fb89df99aad579d6763645a1cb2ef26b19fee8b398d5e5ff4d": {
- "num_inference_steps": 4,
- "guidance_scale": 0.0
- },
- "e80fcf46d15f4d3821d3d9611bdb3022a4a8b647b2536833b168d317a91e4f74": {
- "num_inference_steps": 8,
- "guidance_scale": 0.0
- },
- "56ed9dc9f51f4bb0d6172e13b7947f215c347fc0da341c8951b2c12b9507d09e": {
- "num_inference_steps": 8,
- "guidance_scale": 0.0
- }
- }
- },
- "stable-diffusion-v1-5": {
- "repo": "wangfuyun/PCM_Weights",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- }
- }
- },
- "file_256": {
- "b80b27dd6504f1c3a7637237dda86bc7e26fa5766da30c4fc853c0a1d46bad31": {
- "num_inference_steps": 4,
- "guidance_scale": 0.0
- },
- "8f605ffde3616592deb37ed8c6bacb83fe98963c1fd0883c2a4f93787098aa45": {
- "num_inference_steps": 16
- },
- "fa6acb94f11dba3bf4120af5a12e3c88cd2b9572d43ec1a6fb04eede9f32829e": {
- "num_inference_steps": 4
- },
- "bff3d4499718b61455b0757b5f8d98fe23e73a768b538c82ecf91c693b69dbcd": {
- "num_inference_steps": 8
- },
- "c7ac2fa3df3a5b7080ebe63f259ab13630014f104c93c3c706d77b05cc48506b": {
- "num_inference_steps": 16,
- "guidance_scale": 0.0
- },
- "4c5f27a727d12146de4b1d987cee3343bca89b085d12b03c45297af05ce88ef4": {
- "num_inference_steps": 2,
- "guidance_scale": 0.0
- },
- "29278bc86274fdfc840961e3c250758ff5e2dc4666d940f103e78630d5b879d3": {
- "num_inference_steps": 4,
- "guidance_scale": 0.0
- },
- "41a7f0b966d18f643d16c4401f0b5ef6b9ef7362c20e17128322f17874709107": {
- "num_inference_steps": 8,
- "guidance_scale": 0.0
- }
- }
- },
- "stable-diffusion-3": {
- "repo": "wangfuyun/PCM_Weights",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- }
- }
- },
- "file_256": {
- "8a45878ecc34e53855fe21146cb6ef32682053b7c4eacc013be89fb08c4c19d8": {
- "num_inference_steps": 2,
- "guidance_scale": 1.2
- },
- "9444a5cead551c56c4d1c455ce829ba9f96f01fbcca31294277e0862a6a15b76": {
- "num_inference_steps": 4,
- "guidance_scale": 1.2
- },
- "e365902c208cbc0456ca5e7c41a490f637c15f3f7b98691cbba21f96a8c960b4": {
- "num_inference_steps": 4,
- "guidance_scale": 1.2
- },
- "3550fa018cd0b60d9e36ac94c31b30f27e402d3855ed63e47668bb181b35a0ad": {
- "num_inference_steps": 4,
- "guidance_scale": 1.2
- }
- }
- }
- },
- "info.lora.slam": {
- "stable-diffusion-xl-1": {
- "repo": "alimama-creative/slam-lora-sdxl",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "scheduler": {
- "ops.scheduler.lcm": {
- "timestep_spacing": "trailing"
- }
- },
- "generation": {
- "num_inference_steps": 4,
- "guidance_scale": 1
- }
- }
- },
- "file_256": [
- "22569a946b0db645aa3b8eb782c674c8e726a7cc0d655887c21fecf6dfe6ad91"
- ]
- },
- "stable-diffusion-v1-5": {
- "repo": "alimama-creative/slam-sd1.5",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- }
- }
- }
- }
- },
- "info.lora.spo": {
- "stable-diffusion-xl-1": {
- "repo": "SPO-Diffusion-Models/SPO-SDXL_4k-p_10ep_LoRA",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "generation": {
- "guidance_scale": 5.0
- }
- }
- },
- "file_256": [
- "0b9896f30d29daa5eedcfc9e7ad03304df6efc5114508f6ca9c328c0b4f057df"
- ]
- },
- "stable-diffusion-v1-5": {
- "repo": "SPO-Diffusion-Models/SPO-SD-v1-5_4k-p_10ep_LoRA",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "generation": {
- "guidance_scale": 7.5
- }
- }
- },
- "file_256": [
- "1be130c5be2de0beacadd3bf0bafe3bedd7e7a380729932a1e369fb29efa86f4"
- ]
- }
- },
- "info.lora.tcd": {
- "stable-diffusion-xl-1": {
- "repo": "h1t/TCD-SDXL-LoRA",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- },
- "generation": {
- "num_inference_steps": 4,
- "guidance_scale": 0,
- "eta": 0.3
- },
- "scheduler": {
- "ops.scheduler.tcd": {}
- }
- }
- },
- "file_256": [
- "2c777bc60abf41d3eb0fe405d23d73c280a020eea5adf97a82a141592c33feba"
- ]
- },
- "stable-diffusion-v1-5": {
- "repo": "h1t/TCD-SD15-LoRA",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {}
- }
- }
- },
- "file_256": [
- "eaecb24a1cda4411eab67275b1d991071216ac93693e8fa0c9226c9df0386232"
- ],
- "layer_256": [
- "e9825b81bca684126ac3cc8867d2ebc655f74268bc26bea4e4b7e58a52ad6c75"
- ],
- "layer_b3": [
- "90158259812a89beb8874216009c799f420334aac49bbf4fa1bf0ebf4bbd256b"
- ]
- }
- },
- "info.lora.turbo": {
- "stable-diffusion-xl-1": {
- "file_256": [
- "a599c42a9f4f7494c7f410dbc0fd432cf0242720509e9d52fa41aac7a88d1b69"
- ]
- },
- "flux1-dev": {
- "repo": "alimama-creative/FLUX.1-Turbo-Alpha",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {
- "fuse": 0.125
- }
- },
- "generation": {
- "guidance_scale": 3.5,
- "num_inference_steps": 8,
- "max_sequence_length": 512
- }
- }
- },
- "file_256": [
- "77f7523a5e9c3da6cfc730c6b07461129fa52997ea06168e9ed5312228aa0bff"
- ]
- },
- "stable-diffusion-3": {
- "repo": "tensorart/stable-diffusion-3.5-large-TurboX",
- "pkg": {
- "0": {
- "diffusers": {
- "load_lora_weights": {
- "fuse": 1.0
- }
- },
- "scheduler": {
- "ops.scheduler.flow-match": {
- "shift": 5
- }
- }
- }
- },
- "file_256": {
- "fae59d1b749c0d14a8fd4c68cc94eaac92876cee7b91fa75cf8fde3160e09548": {
- "num_inference_steps": "8"
- }
- }
- }
- },
- "info.art.audiogen": {
- "*": {
- "repo": "facebook/audiogen-medium",
- "pkg": {
- "0": {
- "audiocraft": "models.AudioGen",
- "generation": {
- "duration": 5
- },
- "stage_2": {
- "audiocraft": ".data.audioaudio_write",
- "generation": {
- "strategy": "loudness",
- "loudness_compressor": true
- }
- }
- }
- }
- }
- },
- "info.art.parler-tts-v1": {
- "*": {
- "repo": "parler-tts/parler-tts-large-v1",
- "pkg": {
- "0": {
- "parler_tts": "ParlerTTSForConditionalGeneration",
- "generation": {
- "return_tensors": "pt"
- }
- }
- }
- }
- },
- "info.gan.snac-st": {
- "*": {
- "repo": "Zuellni/snac-24khz-ST",
- "pkg": {
- "0": {
- "snac": "SNAC"
- },
- "1": {
- "mlx_audio": "tts.generate.generate_audio"
- }
- },
- "file_256": [
- "e61ae2f638f56ee07a37592cd5a6a9e7d642560ddc78a76ee4a7f96d6922f1be",
- "973ee1be4032319fd9685ec54eee1b93e79c7bc98c786e67f17c04669714f11d"
- ],
- "layer_256": [
- "35ba9aa1feb931010559a178fcac243673d2efdd1396a4b69d406c9853a88300",
- "5a22c4707ed6c928043f23b59f2d102a579db3a9af41cf6e60d7c3958f182841"
- ],
- "layer_b3": [
- "18307b00460a64cc4893f9061592ce8d7e15b70fc54065cc8ae0f0155381ec46",
- "d599b1bb36dee3cee4674b7922fcd69e5ec05b74413f611d21cfdfdf8f9b6119"
- ]
- }
- },
- "info.gan.kokoro": {
- "*": {
- "repo": "hexgrad/Kokoro-82M",
- "pkg": {
- "0": {
- "kokoro": "KPipeline"
- },
- "1": {
- "mlx_audio": "tts.generate.generate_audio",
- "generation": {
- "audio_format": "wav",
- "join_audio": true,
- "verbose": false
- }
- }
- },
- "file_256": [
- "5a5cb3d87478f2e74dfca208ee52209ccfce024095e137097fd276026506e45f",
- "496dba118d1a58f5f3db2efc88dbdc216e0483fc89fe6e47ee1f2c53f18ad1e4"
- ],
- "layer_256": [
- "dbedf0e2115aa309b92689f86534be4a77b91d7900365e1717879fbb19b849f6",
- "2c68574571b3f9229e015a909788116ea2251142e29c1bd5c687863192124e8b"
- ],
- "layer_b3": [
- "3e9b5017cfe67a7804ac717b18b6add42ffc0bd3353490df2bcc520eaaef79b6",
- "379660a87a64524bab69a267e3d9580f04b5eec4f7e3fbd48c6597d164d9b17d",
- "997f154f5a78879ef3ba1a1556977c40b28b9c21076b8f583f752c57ecc36e932dc3dba29452b85ea85266084a6248f9e0efe642d5f75b43e64f25b9f2837f92"
- ]
- }
- },
- "info.stst.silero-vad": {
- "*": {
- "repo": "freddyaboulton/silero-vad",
- "pkg": {
- "0": {
- "onnx": "onnx"
- },
- "1": {
- "mlx_audio": "tts.generate.generate_audio",
- "generation": {
- "audio_format": "wav",
- "join_audio": true,
- "verbose": false
- }
- }
- },
- "file_256": [
- "591f853590d11ddde2f2a54f9e7ccecb2533a8af7716330e8adfa6f3849787a9"
- ],
- "layer_256": [
- "2ffef1834d5fe14ad8db58fc78d769d5dc38dda5eddbfc396786f74b326215fd"
- ],
- "layer_b3": [
- "41ca5931452b3ffee588c6c7e5bd327c4e914141604eaf3fd05f4a790ac83bb2",
- "7dc736cd5d840182792bde4edfbf5ddc5aeaf16826a9c72d1ba8166c1e3fab9b",
- "6e2c1bdbad74f56663ffb5710c7cb849a2b91ba331d81acdba47a21f69107434",
- "ab5ff443aece9171af5e7603d0b4309d3ecc934e3940ccedefff10f0b54b931e"
- ]
- }
- },
- "info.stst.wav2vec2-conformer-rope-960h-ft": {
- "*": {
- "repo": "facebook/wav2vec2-conformer-rope-large-960h-ft",
- "pkg": {
- "0": {
- "transformers": "Wav2Vec2ConformerForCTC"
- }
- },
- "file_256": [
- "97bb9761fb71ec1225100bc81ccf7d002e0d0ba3d0604c1fd2dbda7d7d491f1d"
- ],
- "layer_256": [
- "1afcfda68307a75caa1a1c4456cf97e20c7914e8aba828006e9fe17e8675a79d"
- ],
- "layer_b3": [
- "6c9c5642aa8dce62bcb3eb577bc519619a2d868005c767c5e65371c583a8a8eb"
- ],
- "tasks": [
- "Wav2Vec2ConformerForAudioFrameClassification",
- "Wav2Vec2ConformerForCTC",
- "Wav2Vec2ConformerForPreTraining",
- "Wav2Vec2ConformerForSequenceClassification",
- "Wav2Vec2ConformerForXVector",
- "Wav2Vec2ConformerModel",
- "Wav2Vec2ConformerPreTrainedModel"
- ]
- }
- },
- "info.art.orpheus-0-ft": {
- "*": {
- "repo": "canopylabs/orpheus-3b-0.1-ft",
- "pkg": {
- "0": {
- "orpheus_tts": "OrpheusModel",
- "generation": {
- "max_model_len": 2048
- }
- },
- "1": {
- "mlx_audio": "tts.generate.generate_audio",
- "generation": {
- "audio_format": "wav",
- "join_audio": true,
- "verbose": false
- }
- }
- }
- }
- },
- "info.art.outetts-0": {
- "*": {
- "repo": "OuteAI/OuteTTS-0.3-1B",
- "pkg": {
- "0": {
- "outetts": "InterfaceHF"
- },
- "1": {
- "mlx_audio": "tts.generate.generate_audio",
- "generation": {
- "audio_format": "wav",
- "join_audio": true,
- "verbose": false
- }
- }
- }
- }
- },
- "info.gan.speecht5-hifigan": {
- "*": {
- "file_256": [
- "d9dc6513c30a5b86c2497712690c04fe74b4aa79fdab6d490b34fcb4e24c590c"
- ],
- "layer_256": [
- "bd52b538e7ac05711be9321cfb7619d4056996ce32923c9c91ee02cf69154770"
- ],
- "layer_b3": [
- "85b5acdf29ad04c63f885383340d8e3445ae0055521f82cabb82bd09cfb9a956"
- ]
- }
- },
- "info.dit.flux1-dev": {
- "mystic": {
- "repo": "enhanceaiteam/Mystic",
- "pkg": {
- "0": {
- "generation": {
- "num_inference_steps": 16,
- "guidance_scale": 7.5,
- "width": 768,
- "height": 1024
- }
- }
- },
- "file_256": [
- "179d4000e44295f6dfadc0e4ac210146454724d46371b82657200ff9fb5c68a9",
- "48ca85274e3b67f07f70dd84b67725e62395c2f7b188394342716f783ea4c6ac"
- ],
- "layer_256": [
- "3942e6a52dbb0abaf63b031d9c4eda0df47576b51d4c81361978a3dc27b1309e"
- ],
- "layer_b3": [
- "91074aaebe1b5f3b2e7755d3c092af7eb240e92a192360690f1033949d3c8a68"
- ]
- },
- "flux1-lite": {
- "repo": "freepik/flux.1-lite-8b",
- "pkg": {
- "0": {
- "generation": {
- "num_inference_steps": 28
- }
- }
- },
- "file_256": [
- "09e970a7b8d1813ea7cacd48f9a944fd223882b137a8f4f3b61d864cdc20bbec",
- "de90e69945c2f4afcb9b6a057ce48190905c984370fce76b16ba3b97d46e2747"
- ],
- "layer_256": [
- "e1afe2f9b1ca55b3c659293cf3237f6b5571f5c4e826bad025ff0f7b54dc34ee"
- ],
- "layer_b3": [
- "9276fa4805efeb45c08cca32c5b51d490e57a2ce5c15ef476a8e468a509c5cdf"
- ]
- },
- "f-lite": {
- "repo": "freepik/f-lite",
- "pkg": {
- "0": {
- "f_lite": "FLitePipeline",
- "generation": {
- "num_inference_steps": 28
- }
- }
- }
- },
- "f-lite-texture": {
- "repo": "freepik/f-lite-texture",
- "pkg": {
- "0": {
- "f_lite": "FLitePipeline",
- "generation": {
- "num_inference_steps": 28
- }
- }
- }
- },
- "flux": {
- "repo": "TencentARC/flux-mini",
- "file_256": [
- "4236455adeaeb4ed444d63b253ec99805022d17e962ed7261ada9c72ce11cfee"
- ],
- "layer_256": [
- "e4a0d8cf2034da094518ab058da1d4aea14e00d132c6152a266ec196ffef02d0"
- ],
- "layer_b3": [
- "c1a6f83585398fe452d20596a79a522e2986f4c2c01a40e7bfd787af113735d3"
- ]
- },
- "flex2": {
- "repo": "ostris/Flex.2-preview",
- "file_256": [
- "0407108e446a4f57efffc5e7518bc374876af970d3c6068dc4074de0d221c615",
- "df168ba94d5f96c478b24604a6beedff6189047152190509c73c162ea0d8ec02"
- ],
- "layer_256": [
- "5063de856be5365807d12b47ef6919b4ac611a72651739b2b4050e113bed7a83"
- ],
- "layer_b3": [
- "7f85cdc186896da6965b57d5edb672f08663075d2b207f0e20e328c4034a8076"
- ]
- },
- "flex1-alpha": {
- "repo": "ostris/Flex.1-alpha",
- "file_256": [
- "5d6dce30a266ccbf530c3a3bf253cd5486720a8fb71cdeed556c28304201dc2f",
- "7acf8771b80a91eaa21566abe8c7d9d3ba33d8688e6e98446827749aee7ca1ee"
- ],
- "layer_256": [
- "a6b9af6efc25fa77cd24046b81ee66fea09a9987d2a8e56ffca9b7a1c9c9c519"
- ],
- "layer_b3": [
- "cb3d3edafd81651eefd62894b3572deb02c5304f4b5d4f7ab8654f1fb922ecd6"
- ]
- },
- "*": {
- "pkg": {
- "0": {
- "precision": "ops.precision.bfloat.B16",
- "generation": {
- "height": 1024,
- "width": 1024,
- "guidance_scale": 3.5,
- "num_inference_steps": 50,
- "max_sequence_length": 512
- }
- },
- "1": {
- "mflux": "flux.flux.Flux1",
- "generation": {
- "height": 1024,
- "width": 1024,
- "gudance": 3.5,
- "num_inference_steps": 25
- }
- }
- },
- "file_256": [
- "f6315581b7cddd450b9aba72b4e9ccf8b6580dc1a6b9538aff43ee26a1a3b6c2",
- "1b2170ac37156d4cf91909eb6834bb8adac84bc1fce8098a29cfb03738df84ad",
- "4610115bb0c89560703c892c59ac2742fa821e60ef5871b33493ba544683abd7",
- "d86a3038eacaa720682cb9b1da3c49fecf8a3ded605af4def6061eaa18903eb8",
- "b7d840eef01c27dfd72ae9143c261355a51bab3b2662263a6cb0059d55347c3d"
- ],
- "layer_b3": [
- "261559c8eaccae558f72621804a9ee188d338e45e2c622a58db709ac190198ba",
- "87f5d565c66e40eb02eb96498243ad81afcbf86192db99a4fc8fff215470320e",
- "e61d10a394902dadca9367467b2245070f651f4553ec4a96192fbba64e820acb"
- ],
- "layer_256": [
- "3db58cf834d2f81abb1e035131956da4c90451074c681d0db10810e55e60c2c4",
- "ddf1a34a06b355ce2bcd0f9beb0713450d9bcdc61a03a6bc37716361735e96f1",
- "ad8763121f98e28bc4a3d5a8b494c1e8f385f14abe92fc0ca5e4ab3191f3a881"
- ],
- "identifiers": [
- "double_blocks.12.txt_mod.lin.weight",
- "add_q_proj.weight",
- "single_transformer_blocks.9.norm.linear.weight"
- ],
- "tasks": [
- "Image",
- "Redux",
- "Kontext",
- "Depth",
- "Fill",
- "ConceptAttention",
- "ControlNet",
- "CavTon",
- "IC-Edit"
- ]
- }
- },
- "info.dit.wan2-flf2v-720p": {
- "diffusers": {
- "repo": "Wan-AI/Wan2.1-FLF2V-14B-720P-Diffusers",
- "file_256": [
- "",
- ""
- ],
- "layer_256": [
- ""
- ],
- "layer_b3": [
- ""
- ]
- }
- },
- "ops.patch.hidiffusion": {
- "stable-diffusion-xl-1": {
- "pkg": {
- "0": {
- "hidiffusion": {
- "apply_hidiffusion": {
- "timesteps": "StableDiffusionXLTimesteps"
- }
- },
- "generation": {
- "height": 2048,
- "width": 2048,
- "eta": 1.0,
- "guidance_scale": 7.5,
- "num_inference_steps": 10
- }
- }
- }
- }
- },
- "ops.scheduler.align-your-steps": {
- "stable-diffusion-xl-1": {
- "pkg": {
- "0": {
- "diffusers": "schedulers.scheduling_utils.AysSchedules",
- "generation": {
- "timesteps": "StableDiffusionXLTimesteps",
- "num_inference_steps": 10
- }
- }
- }
- }
- },
- "info.art.chameleon": {
- "lumina-mgpt-1024": {
- "repo": "Alpha-VLLM/Lumina-mGPT-7B-1024",
- "pkg": {
- "0": {
- "inference_solver": {
- "FlexARInferenceSolver": {
- "precision": "bf16",
- "target_size": 768
- }
- },
- "generation": {
- "images": [],
- "qas": [
- [
- "q1",
- null
- ]
- ],
- "max_gen_len": 8192,
- "temperature": 1.0
- }
- },
- "1": {
- "inference_solver": "ChameleonXLLMXForConditionalGeneration"
- }
- },
- "file_256": [
- "6b71408a7c574d98f00114ab770ac6addc71471770456e482e7b5ec641c02345",
- "1d5d8d5532bae0f32ba35d10d411e506d61e4378dc9fc338f2b1e6af2aa322ec",
- "a8fe636bbee30fef06dcd8e806ffc65b2aed0ad08a07fdc62f35717d0f851be5",
- "6420fa13483576d46263996627ba7add2237a01f46dedd3b7750112c0cc2d95b"
- ],
- "layer_256": [
- "eaa882db6a69cf8ed0104a15b2cdbbb570a23a06ab8c8f65f4c6c21719c6ba25"
- ],
- "layer_b3": [
- "6cd6b3caaea270feb5aff8e9fec205a27da4f48a1e740e63dc9a08f16e70a656"
- ]
- }
- },
- "info.vit.clip-vit-patch14": {
- "*": {
- "repo": "openai/clip-vit-large-patch14",
- "pkg": {
- "0": {
- "transformers": "CLIPTextModel"
- }
- },
- "file_256": [
- "cb0cba1ead482a850532ebe5ff6b5c8d4456aee32a5228acf0a31e7d9472415e",
- "39e79c916feca4ddf546d9fe923e664714b59ea61074f7228037d17c302f3d17",
- "893d67a23f4693ed42cdab4cbad7fe3e727cf59609c40da28a46b5470f9ed082",
- "778d02eb9e707c3fbaae0b67b79ea0d1399b52e624fb634f2f19375ae7c047c3",
- "660c6f5b1abae9dc498ac2d21e1347d2abdb0cf6c0c0c8576cd796491d9a6cdd",
- "71e183d11db0c6b6282a4d9e0abb74125edc8692393e89ed8ee5571005f35cb1",
- "5c3d6454dd2d23414b56aa1b5858a72487a656937847b6fea8d0606d7a42cdbc",
- "87c1c0b0894c9e9e10b962e597e8d64dd3a3a2d372c389922b335a53c250b2ae",
- "bd289dd57fee86bc8816b55919a2b03f9c3c75af6025e21777325a6730872325",
- "8377b1ca9d88fe06ec483dd7b3cfc62e5e8dbf8ddd252f455e79d659fa0553c5",
- "5487ea0eee9c9a9bff8abd097908d4deff3ae1fa87b3b67397f8b9538139d447",
- "92b998a9a64549bfa05c019bde114be6681549a0c79caee903fe30c9444d08b9",
- "1e090d6a828fd92401be5f83e615fd7b4fb1f4a22e9af9040a38f602e839317c",
- "11807cb2522cfe99240e5ee2bbeb1ccb42cecca2215102ee872567c7773b28b9",
- "d008943c017f0092921106440254dbbe00b6a285f7883ec8ba160c3faad88334",
- "77795e2023adcf39bc29a884661950380bd093cf0750a966d473d1718dc9ef4e",
- "b70c11ad5d7e9abf6109348908f599ea382f8019e1f36910bbc8ebecde936633",
- "fc42badf529dd83f2f7c3d20fe6bda1e22036162f37c4c668b9e130884e20561",
- "e27bafa0b3029ad637ef3ace24ce1efe85b8d0dbd22e03a2e70bda6fc88963a1"
- ],
- "layer_256": [
- "48daa3d8f939972e69f044533a4312a941971c18c78255f5e555fa26faf664c1",
- "60f5734a74c342be8b0011fc704e718431839790bcfdc7d7004fc39d70f7fec6",
- "6e76e25b4a55dddfa2eecf4b7ab189a8148658a9f6df165c00170f6ce661033c",
- "2d5249df489fec9137cc3a5e9bda499dd9b72a957ddd8e7ad4e99ff3684bad99",
- "3bf085e701713ed3e79775dafea375c3e2a43659ad1ee788b1b393c0aeff9f0e",
- "efb7976800692772e449c81a739339f59394886590ff3f768b0f9ddd87d2a94c",
- "9b0ac8d127c6c457b2eb8c7236f18c4e4ba9e8bbf27130aa8fe854d7c3f7b1e0",
- "24a9ee3d60cdde6c967f08e4b2ec7088fe1bfe308c6896e73caa874860570a5c",
- "5d6d9d0cc7943eb1b8c16862bfd5bee5c3766d0df027ec837e90fac715ac2bd3",
- "68fb122f7d6c3cfbef320341b2af8f5916678e36a69ed36fa8cfcb19e7d5c43d",
- "11807cb2522cfe99240e5ee2bbeb1ccb42cecca2215102ee872567c7773b28b9",
- "50c46cdddbe9f0162278c69b9a1f818519330e3a91b994272e19b5c789670471",
- "ffe1c4f55e07c2010ace7b9cf35798bb9f431bc954a32784e5acbdc16acc0364",
- "146ea48d234e05a934db9d8988e9a9dd86b2ac70f535eaa550ecb0ee23ec135e",
- "d97560cf9704cf71711f6121df2bf55e55a1eda4b574a6ddba074767420bc8c3"
- ],
- "layer_b3": [
- "f58a22a381f79985b6d38782f6110a52c2f319b40fdedd3b88b24945dfcbdf64",
- "8faa00b8fd1dbd9286a7237df18caeb8c91af100a6813849b6bae272a01dd7b7",
- "ab5bebc98299c155251a06deccde599ba0128038ee3ce021e8c59a45f58f72c0",
- "c70e9d86a9dcbbbe7c269ef9dfac96ce9c96c46922577338cc1902e5fe936315",
- "f285e9b7b70745df81adc8b558ec74b536b79b6fc02a453ecc61ea9d13f25f1a",
- "7ab17bfa06ab8d65840997ef641f3f593d096860e20141f1eeb0169d131c1c23",
- "2737d3f327e8176dbb549b9c5c4994821430a6c3b07e3bbc925d97511c802636",
- "58a826a4a5fe555b4df188a1ebc0d8d9c96cedae3a26ce84c247861dbb93388f",
- "1540fd8844898960e18ce8fd153e5f21a8c446bd8c4d6f536a7cf11418f02bf3",
- "c4c9caccdbec12b965d93688c521893f75e0bf9a5e0aad70a6a962b669e7b9d5",
- "e43fae8d5fd1e562607da172369cc0c5ec99b834e42502e682287ff7d12baacc",
- "c6f79f7416a882891957b815fbdfd6edfaa253c43970b1a25ef14e217599c7bc",
- "daf5e09f67ad09a909f58a01298fec0132324634cb8fca2a604c3a240c2c453f",
- "3f62bfb6bbde05f01435129326166c44aeb113ac0d9f735f31ed3f7dd04f6980",
- "22f866f3c96a92bc61e9965cf366d706db942ad047ba8cb82109edcd4e68fa40",
- "f3fa9d7a8f15741621c1fe82f8a1bcc5c601c900d947ac09fba7016615a252a5"
- ],
- "tasks": [
- "CLIPModel",
- "CLIPPreTrainedModel",
- "CLIPTextModel",
- "CLIPTextModelWithProjection",
- "CLIPVisionModel",
- "CLIPVisionModelWithProjection",
- "CLIPForImageClassification"
- ]
- }
- },
- "info.vit.clip-vit-g-14-laion-s-b": {
- "*": {
- "repo": "laion/CLIP-ViT-g-14-laion2B-s12B-b42K",
- "pkg": {
- "0": {
- "transformers": "CLIPTextModelWithProjection"
- }
- },
- "file_256": [
- "ca18e0c67c1ef1e64cac22926266765b60688f692307ecc06283d987c5768134",
- "ec310df2af79c318e24d20511b601a591ca8cd4f1fce1d8dff822a356bcdb1f4",
- "fa5b2e6f4c2efc2d82e4b8312faec1a5540eabfc6415126c9a05c8436a530ef4",
- "b84f413eebecbd049b72874c1df533a516510cb5a2489ae58c7e320209cf0ebe",
- "d3df577f6e3799c8e1bd9b40e30133710e02e8e25d0ce48cdcc790e7dfe12d6d",
- "943a2924ee888295a156dd47089d67181d633b782337890af11ef4b15af17ec5",
- "5b98e4a57a9292eeb819d67e2d2100f66f17db723cde4ecea27a7c3741160d0c",
- "4d6effa7a5e600cabf7528ed7234146a13ead1b2c151211d706b293a060b112a",
- "3a6032f63d37ae02bbc74ccd6a27440578cd71701f96532229d0154f55a8d3ff",
- "162042ac6556e73f93d4172d4c67532c1cbe4dc7a6a8fa7e44dd2e3d7cbb772b"
- ],
- "layer_256": [
- "270e998633eb22145100a3889a62ca270d5080654735e5ff8dda09a7c233af8d",
- "df18800c2a9d9318c4323d991a0fb24a6a9afceb41bea203812f60517c301536",
- "4c228b104f6b9b383e0808c9baa1998957f5125d8f90a4d98c1a86e71edd72dc",
- "f7fc81d8b5ae91ec28a5106ecc0d067be9a94fd3f394c4aa4686ed131ce5a5b3",
- "61ab42bd5c0fcb9fd3db1d4014cb844ccae8dc17fd69a108cf077a573d092946",
- "6c64e36cdda3bec7067e94b05619f882f5d31070792acaadac60ddbef580453a",
- "43c9e64995b485a7f128771c48defce128640df28e65c7f79537d472f43ebe46"
- ],
- "layer_b3": [
- "d754db276f2d89d2808abb7086b3b8eccee43ac521c128d21a071f3a631474a8",
- "2eb93685b34719e1d1e0541d8902b0a592d95848f80657e32816cf3b152a0f31",
- "e253a5cf3a6242c58037abd6b378bf0281f278e441f28dff7ca1bcfcd3cd6bd8",
- "16d0eec4e55b0aa63cdca4e4d36f78f66a4b1b9605ce3b1089305026f853c3d2",
- "f606463295ecf3bae8920d3d45bb9d180793418b3d08c3e84d4c4135c7dc2aa5",
- "7060993a5eb32d94d1ea8aef7a7301e7be73b199c639c63f8f7cfbfcd2abf10e",
- "b92af95334c657371af6051a91374a41b5455907fa6622bb66a8c112dc511600"
- ],
- "tasks": [
- "CLIPModel",
- "CLIPPreTrainedModel",
- "CLIPTextModel",
- "CLIPTextModelWithProjection",
- "CLIPVisionModel",
- "CLIPVisionModelWithProjection",
- "CLIPForImageClassification"
- ]
- }
- },
- "info.vit.clip-vit-h-14-laion-s-b": {
- "*": {
- "repo": "laion/CLIP-ViT-H-14-laion2B-s32B-b79K",
- "pkg": {
- "0": {
- "transformers": "CLIPModel"
- }
- },
- "file_256": [
- "036e6e2bd49697511f4f8b8cb5ee465f93025f7a69a145eadeb9a881ace9b18d",
- "0084e75319a50ad85ef45377bad5bc38f2f58824459eb690048d51c9f8863be5",
- "64a7ef761bfccbadbaa3da77366aac4185a6c58fa5de5f589b42a65bcc21f161"
- ],
- "layer_256": [
- "130a94ed12569e099196a6ca27388181922e20148dee5bcb58c5e309acfc2352",
- "cfdbd3fd2b90b64ba12d395a62dd7c3c3ea3e811f0a54593e91bae6516ca5061",
- "9125ce5970c649d6f9368c25493d3aaa6b41e224d4cc427e955115f7b7e53d1c"
- ],
- "layer_b3": [
- "227f26ed63120b9034f4a0c90b6b37eede721a8260f2c1e8f7ea3ccc0d109e7e",
- "3a38ffd1b60499cf2f451f3065079ff26efb9190a86f23ad1c8d993bbeb9af05",
- "ce06cf1fd684269ee96631b2bf9334c6ecde6a84a55760dfa0d9d2a6411f28e4"
- ],
- "tasks": [
- "CLIPModel",
- "CLIPPreTrainedModel",
- "CLIPTextModel",
- "CLIPTextModelWithProjection",
- "CLIPVisionModel",
- "CLIPVisionModelWithProjection",
- "CLIPForImageClassification"
- ]
- }
- },
- "info.aet.chatglm3": {
- "*": {
- "repo": "zai-org/chatglm3-6b",
- "pkg": {
- "0": {
- "transformers": "AutoModel"
- }
- },
- "file_256": [
- "0054d03310248928fdabdeef3fdc753170218dc49a1e9eb5f98323e27683f654",
- "b1052386eac358a18add3d0f92521c85ab338979da8eeb08a6499555b857f80d"
- ],
- "layer_256": [
- "174924fd7a07f370bb6fcd1ad07a73eecb7de901f15eefb80f420c1042c47d44"
- ],
- "layer_b3": [
- "a45dfba6a9fa8739777c76deb845fc9589b40f88670d3ce4661646a7b7b1d481"
- ]
- }
- },
- "info.art.qwen2": {
- "bagel-mot": {
- "repo": "ByteDance-Seed/BAGEL-7B-MoT",
- "pkg": {
- "0": {
- "Bagel": "app"
- }
- }
- }
- },
- "info.vae.tae": {
- "stable-diffusion-3": {
- "repo": "madebyollin/taesd3",
- "pkg": {
- "0": {
- "diffusers": "AutoencoderTiny"
- }
- },
- "file_256": [
- "6f79c1397cb9ce1dac363722dbe70147aee0ccca75e28338f8482fe515891399"
- ]
- },
- "stable-diffusion-xl-1": {
- "repo": "madebyollin/taesdxl",
- "pkg": {
- "0": {
- "diffusers": "AutoencoderTiny"
- }
- },
- "file_256": [
- "ff4824aca94dd6111e0340fa749347fb74101060d9712cb5ef1ca8f1cf17502f"
- ]
- },
- "stable-diffusion-v1-5": {
- "repo": "madebyollin/taesd",
- "pkg": {
- "0": {
- "diffusers": "AutoencoderTiny"
- }
- },
- "file_256": [
- "db169d69145ec4ff064e49d99c95fa05d3eb04ee453de35824a6d0f325513549"
- ]
- },
- "flux1-dev": {
- "repo": "madebyollin/taef1",
- "pkg": {
- "0": {
- "diffusers": "AutoencoderTiny"
- }
- },
- "file_256": [
- "927f7de7f11bbd3b2d5ce402e608d97a7649e0921a9601995b044e8efc81e449"
- ]
- }
- },
- "info.vae.kl": {
- "qwen-image": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLQwenImage"
- }
- },
- "file_256": [
- "0c8bc8b758c649abef9ea407b95408389a3b2f610d0d10fcb054fe171d0a8344"
- ],
- "layer_256": [
- "42f255440ef1d379a8a731456bc44312a73a8568716caa6100803990cd5ea7dc"
- ],
- "layer_b3": [
- "64af8fb08d2054c81ad2aef94965be8fb1366fcc6136cb9222ae046550af014b"
- ]
- },
- "ltx-video": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLLTXVideo"
- }
- },
- "file_256": [],
- "layer_256": [],
- "layer_b3": []
- },
- "allegro": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLAllegro"
- }
- },
- "file_256": [
- "47871a698b18f92f15019d361a81cbc8af4676f8eef9a47fd2b95354a39f831a"
- ],
- "layer_256": [
- "bfd496586118165a13243997101fc7cdd4f855b2d8a73ee2b771a4484c4c2f9f"
- ],
- "layer_b3": [
- "93654cbab7541504d2377c66e72943c7fd9947fca2eb1be01bcc8877c322c1e0"
- ]
- },
- "cosmos-1-diffusion-video2world": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLCosmos"
- }
- },
- "file_256": [],
- "layer_256": [],
- "layer_b3": []
- },
- "easyanimatev5-zh": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLMagvit"
- }
- },
- "file_256": [],
- "layer_256": [],
- "layer_b3": []
- },
- "hunyuanvideo-i2v": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLHunyuanVideo"
- }
- },
- "file_256": [
- "95d1fc707c1421ccd88ea542838ab4c5d45a5babb48205bac9ce0985525f9818",
- "7c68a6295f9034a88225fbafb1f3258291a08d57a1fdb938233fa57b1b8f4883",
- "fbe5ea338431bc8ba20f7019b474e83379fe5763abfd562adcc04b1c0d35c728",
- "019973c147e0c3462629d8d06bdbdbb83408f3ebd4ea4b4ae21a99c3cdcb54c0"
- ]
- },
- "mochi-1": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLMochi"
- }
- },
- "file_256": [],
- "layer_256": [],
- "layer_b3": []
- },
- "audioldm-s-v2": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKL"
- }
- },
- "file_256": [
- "42f64f7565b23eabde68c9694e39f18b8bba5f7a14f477e7ed4b51e0ea7de8a5"
- ],
- "layer_256": [
- "54d075953d5253a3abac651de070736c1d5510b857a8ab24c624304f428146b6"
- ],
- "layer_b3": [
- "00959677dae940b9cfdbe5380c8cbb5a6b4951864cd26f8211d74a3d22b4f3de"
- ]
- },
- "stable-video-diffusion-img2vid-xt": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLTemporalDecoder"
- }
- }
- },
- "stable-diffusion-xl-1": {
- "repo": "madebyollin/sdxl-vae-fp16-fix",
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKL"
- }
- },
- "file_256": [
- "235745af8d86bf4a4c1b5b4f529868b37019a10f7c0b2e79ad0abca3a22bc6e1",
- "1b909373b28f2137098b0fd9dbc6f97f8410854f31f84ddc9fa04b077b0ace2c",
- "78f6189c8492013e3cac81637a1f657f790a237387f8a9dfd6bfa5fee28eb646",
- "6353737672c94b96174cb590f711eac6edf2fcce5b6e91aa9d73c5adc589ee48",
- "bcb60880a46b63dea58e9bc591abe15f8350bde47b405f9c38f4be70c6161e68",
- "1598f3d24932bcfe6634e8b618ea1e30ab1d57f5aad13a6d2de446d2199f2341",
- "703abdcd7c389316b5128faa9b750a530ea1680b453170b27afebac5e4db30c4",
- "98a14dc6fe8d71c83576f135a87c61a16561c9c080abba418d2cc976ee034f88"
- ],
- "layer_256": [
- "c9399a4cd39a180a0bb2af96a8297b9330541e090c21e83317cebb2f7cc651da",
- "2240ae134a3b983abf45200c198f07e3d8068012fbbd2f658bbaa1fd6a0629c0"
- ],
- "layer_b3": [
- "bd5b356b509814025a9cf692710b87116d4fcd0e30a8232ed1db133e908d0e74",
- "9106380403dee83238af63ff1738396d2fdff9f6d78d0d9c1d0bf770ae4294d0"
- ]
- },
- "stable-diffusion-xl-1*": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKL"
- }
- },
- "file_256": [
- "235745af8d86bf4a4c1b5b4f529868b37019a10f7c0b2e79ad0abca3a22bc6e1",
- "27ed3b02e09638568e99d4398c67bc654dde04e6c0db61fb2d21dba630e7058a",
- "eb6516ab7e1104d5d1a174a4d65c57835ae38061531d0a2192103aecfb790cc1",
- "e6bb9ea85bbf7bf6478a7c6d18b71246f22e95d41bcdd80ed40aa212c33cfeff"
- ],
- "layer_256": [
- "c9399a4cd39a180a0bb2af96a8297b9330541e090c21e83317cebb2f7cc651da",
- "2240ae134a3b983abf45200c198f07e3d8068012fbbd2f658bbaa1fd6a0629c0"
- ],
- "layer_b3": [
- "bd5b356b509814025a9cf692710b87116d4fcd0e30a8232ed1db133e908d0e74"
- ]
- },
- "shuttle-jaguar": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKL"
- }
- },
- "file_256": [
- "6fdfa2add4f04d94f36157cbb0197f97966b612e3f8eff4095315aefea74b904"
- ],
- "layer_256": [
- "9b28f36873ea283905094a64e1ccb7cfc2b0f0aa166201d0ca63807ac37caa7b"
- ],
- "layer_b3": [
- "0ebf9b7010accc44e219e355dd24bf1e3128004093c0c1dfc06f88c0a39fdbdd",
- "d0e7ef3c4af06fa08b4c0485a073e2df55f7b1e9e3ba8f7b261688bc562568f0"
- ]
- },
- "flux1-dev": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKL"
- }
- },
- "file_256": [
- "afc8e28272cd15db3919bacdb6918ce9c1ed22e96cb12c4d5ed0fba823529e38",
- "f5b59a26851551b67ae1fe58d32e76486e1e812def4696a4bea97f16604d40a3",
- "8c717328c8ad41faab2ccfd52ae17332505c6833cf176aad56e7b58f2c4d4c94",
- "8f53304a79335b55e13ec50f63e5157fee4deb2f30d5fae0654e2b2653c109dc"
- ],
- "layer_256": [
- "7950e4f3897c75affaa5f9f3c51c88b4d9a27bfd9b05ad41c3f71d8c1c620b89",
- "79d2bfe93a2ac037cdc59ccb5576e32d00d75d4741fba49fc7e82b9724928216",
- "8f084dc91fd5b481875bc9c86a4ef05e5f176896b7d31c6a5c2ce45c2e174004",
- "322e01bd511e20bc2a3c27cd611f81ed85f0046b7c023b5622c2c9a5b8b34f80"
- ],
- "layer_b3": [
- "b6db93ed78c4a10d69e80831c1b8fbc1447f04e9b3d494889ee2056b98d41f17",
- "a8a3ebdec4d7b38d65b7169d3604c19b587330e5e66f69ebf0ded56a24ec6903"
- ]
- },
- "musicldm": {
- "file_256": [
- "16e0c6c7c34e459c19500cc15cf538e6331db14969ea15917caa9b0966e44fd4"
- ],
- "layer_256": [
- "1610c0ce39d1379091eb9ab2a4d14a8567e0f1a5dc6cca40fc0fa6f8e4e97c0f"
- ],
- "layer_b3": [
- "c5c32b3fb3e73799838836ccce27d883254254daecd10f86ba8ddc55214014e0"
- ]
- },
- "stable-diffusion-v1-5": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKL"
- }
- },
- "file_256": [
- "0b204ad0cae549e0a7e298d803d57e36363760dec71c63109c1da3e1147ec520",
- "95f26a5ab04779d5467d1fcecaf93160ffa523afe399b835b3e1bb77ff2d937a",
- "32db726da04f06c1b6b14c0043ce115cc87a501482945c5add89a40d838fcb46",
- "c6a580b13a5bc05a5e16e4dbb80608ff2ec251a162311590c1f34c013d7f3dab",
- "735e4c3a447a3255760d7f86845f09f937809baa529c17370d83e4c3758f3c75",
- "a1d993488569e928462932c8c38a0760b874d166399b14414135bd9c42df5815",
- "a2b5134f4dbc140d9c11f11cba3233099e00af40f262f136c691fb7d38d2194c",
- "4fbcf0ebe55a0984f5a5e00d8c4521d52359af7229bb4d81890039d2aa16dd7c"
- ],
- "layer_256": [
- "e43f3a227b5ecb43a6272fa92ed6011d2e9abcadadd1032dfa7ea7f875f9d5bd",
- "2494154245becf98891be884f943276aa3f54e9b3f0ea1042903fc15fba488f3"
- ],
- "layer_b3": [
- "82e2dc440a23d78bb91df8c9fce069a8512da51f8f54ea29e3431f545808171e",
- "2230487833925a104bee96e7ecfebaa4c3c43cc426c7a5b863f2584313dd4833"
- ]
- }
- },
- "info.vae.wan": {
- "wan2-i2v-480p": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLWan",
- "precision": "ops.precision.float.F32"
- }
- },
- "file_256": [
- "d6e524b3fffede1787a74e81b30976dce5400c4439ba64222168e607ed19e793",
- "2fc39d31359a4b0a64f55876d8ff7fa8d780956ae2cb13463b0223e15148976b"
- ],
- "layer_256": [
- "121b3974b39263dcca9d644d1b5c9b9251a911b6a8a8e307fcb21ca778e78ed2",
- "364be43a8959012d798d3f98e17d8b5c4b99ba1e70077008dd19acca3ced395e"
- ],
- "layer_b3": [
- "f867543d636029ebfc05b8075e572be0b313a83b0470e56bcf4bbad07a6db010",
- "6b5b229727a2d4e37993687c62c94ff8519a371ab4103c699ff1f5969ca0b433"
- ]
- },
- "skyreels-v2-t2v-720p": {
- "file_256": [],
- "layer_256": [],
- "layer_b3": []
- },
- "skyreels-v2-i2v-720p": {
- "file_256": [],
- "layer_256": [],
- "layer_b3": []
- }
- },
- "info.vae.cogvideox": {
- "cogvideox-i2v": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKLCogVideoX"
- }
- },
- "file_256": [
- "a410e48d988c8224cef392b68db0654485cfd41f345f4a3a81d3e6b765bb995e"
- ],
- "layer_256": [
- "43c7e9cb4364e55fd563817f01484ede8a09ff19a8e69eb61a32a12f93d6f66e"
- ],
- "layer_b3": [
- "246addb8dc798240638bffee4546a3c5c83572139b4a2a602d68b4c4146226eb"
- ]
- },
- "cogvideox-fun-v-pose": {
- "file_256": [],
- "layer_256": [],
- "layer_b3": []
- },
- "consisid": {
- "file_256": [],
- "layer_256": [],
- "layer_b3": []
- }
- },
- "info.vae.dc": {
- "sana-1024px-bf16": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderDC"
- }
- },
- "file_256": [
- "15a4b09e56d95b768a0ec9da50b702e21d920333fc9b3480d66bb5c7fad9d87f"
- ],
- "layer_256": [
- "abfc39d1a6d71f03dde7bc40fec4a90478a97d17ae1688be9aad00e0512b9bde"
- ],
- "layer_b3": [
- "cf4ecc6697d18b0663e4eac58203f1dd6d9fb689cf99adfeadbc0019de0c73d0"
- ]
- }
- },
- "info.vae.oobleck": {
- "stable-audio-open-1": {
- "pkg": {
- "0": {
- "diffusers": "AutoencoderOobleck"
- }
- }
- }
- },
- "info.vae.eq": {
- "stable-diffusion-xl-1": {
- "repo": "KBlueLeaf/EQ-SDXL-VAE",
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKL"
- }
- }
- }
- },
- "info.vae.ms-lc-eq": {
- "stable-diffusion-xl-1": {
- "repo": "Anzhc/MS-LC-EQ-D-VR_VAE",
- "pkg": {
- "0": {
- "diffusers": "AutoencoderKL"
- }
- }
- }
- }
-}
\ No newline at end of file
diff --git a/mir/__init__.py b/mir/__init__.py
index c2ad045..1922713 100644
--- a/mir/__init__.py
+++ b/mir/__init__.py
@@ -10,6 +10,7 @@
ROOT_PATH = os.path.dirname(__file__)
MIR_PATH_NAMED = os.path.join(ROOT_PATH, "mir.json")
+
BREAKING = read_json_file(os.path.join(ROOT_PATH, "spec", "regex.json"))["breaking"]
SEARCH = read_json_file(os.path.join(ROOT_PATH, "spec", "regex.json"))["search"]
PARAMETERS = read_json_file(os.path.join(ROOT_PATH, "spec", "regex.json"))["parameters"]
diff --git a/data/__init__.py b/mir/data/__init__.py
similarity index 90%
rename from data/__init__.py
rename to mir/data/__init__.py
index c766341..a8f596e 100644
--- a/data/__init__.py
+++ b/mir/data/__init__.py
@@ -14,6 +14,6 @@
MIGRATIONS = read_json_file(os.path.join(ROOT_PATH, "data", "migrations.json"))
NN_FILTER = read_json_file(os.path.join(ROOT_PATH, "data", "nn_filter.json"))
PARAMETERS = read_json_file(os.path.join(ROOT_PATH, "data", "parameters.json"))
-PREFIXES = read_json_file(os.path.join(ROOT_PATH, "data", "prefixes.json"))
+PIPE_MARKERS = read_json_file(os.path.join(ROOT_PATH, "data", "pipe_markers.json"))
TAG_SCRAPE = read_json_file(os.path.join(ROOT_PATH, "data", "tag_scrape.json"))
TRANSFORMERS_ADDS = read_json_file(os.path.join(ROOT_PATH, "data", "transformers_adds.json"))
diff --git a/data/diffusers_adds.json b/mir/data/diffusers_adds.json
similarity index 99%
rename from data/diffusers_adds.json
rename to mir/data/diffusers_adds.json
index 6f39afd..1de001e 100644
--- a/data/diffusers_adds.json
+++ b/mir/data/diffusers_adds.json
@@ -361,7 +361,7 @@
"generation": {
"height": 1024,
"width": 1024,
- "gudance": 3.5,
+ "guidance": 3.5,
"num_inference_steps": 25
}
}
diff --git a/data/exclusions.json b/mir/data/exclusions.json
similarity index 100%
rename from data/exclusions.json
rename to mir/data/exclusions.json
diff --git a/data/migrations.json b/mir/data/migrations.json
similarity index 68%
rename from data/migrations.json
rename to mir/data/migrations.json
index 8f696e7..5bc9929 100644
--- a/data/migrations.json
+++ b/mir/data/migrations.json
@@ -28,23 +28,23 @@
"THUDM/GLM-4-100B-A10B": "zai-org/GLM-4.5-Air",
"zai-org/GLM-4-100B-A10B": "zai-org/GLM-4.5-Air"
},
- "model": {
- "bark": "suno/bark",
- "aria_text": "rhymes-ai/Aria-Chat",
- "cwm": "facebook/cwm",
- "decision_transformer": "edbeeching/decision-transformer-gym-hopper-medium",
- "distilbert": "distilbert-base-uncased",
- "gpt_bigcode": "bigcode/gpt_bigcode-santacoder",
- "granite": "ibm-granite/granite-3.3-2b-base",
- "granitemoe": "ibm-research/PowerMoE-3b",
- "granitemoehybrid": "ibm-granite/granite-4.0-h-small",
- "musicgen": "facebook/musicgen-small",
- "seamless_m4t_v2": "facebook/seamless-m4t-v2-large",
- "timm_backbone": "microsoft/resnet-50",
- "gpt_oss": "openai/gpt-oss-120b",
- "bert": "google-bert/bert-base-uncased",
- "timm_wrapper": "timm/resnet18.a1_in1k",
- "vision-text-dual-encoder": "hakuhodo-tech/japanese-clip-vit-h-14-bert-wider"
+ "config": {
+ "BarkConfig": "suno/bark",
+ "AriaTextConfig": "rhymes-ai/Aria-Chat",
+ "CwmConfig": "facebook/cwm",
+ "DecisionTransformerConfig": "edbeeching/decision-transformer-gym-hopper-medium",
+ "DistilBertConfig": "distilbert-base-uncased",
+ "GPTBigCodeConfig": "bigcode/gpt_bigcode-santacoder",
+ "GraniteConfig": "ibm-granite/granite-3.3-2b-base",
+ "GraniteMoeConfig": "ibm-research/PowerMoE-3b",
+ "GraniteMoeHybridConfig": "ibm-granite/granite-4.0-h-small",
+ "MusicgenConfig": "facebook/musicgen-small",
+ "SeamlessM4Tv2Config": "facebook/seamless-m4t-v2-large",
+ "TimmBackboneConfig": "microsoft/resnet-50",
+ "GptOssConfig": "openai/gpt-oss-120b",
+ "BertConfig": "google-bert/bert-base-uncased",
+ "TimmWrapperConfig": "timm/resnet18.a1_in1k",
+ "VisionTextDualEncoderConfig": "hakuhodo-tech/japanese-clip-vit-h-14-bert-wider"
},
"module": {
"blip_diffusion": "blip_diffusion",
diff --git a/data/nn_filter.json b/mir/data/nn_filter.json
similarity index 100%
rename from data/nn_filter.json
rename to mir/data/nn_filter.json
diff --git a/data/parameters.json b/mir/data/parameters.json
similarity index 74%
rename from data/parameters.json
rename to mir/data/parameters.json
index 5a3f650..18e927c 100644
--- a/data/parameters.json
+++ b/mir/data/parameters.json
@@ -1,10 +1,10 @@
{
- "bark": {
+ "BarkConfig": {
"n_head": [
""
]
},
- "aria_text": {
+ "AriaTextConfig": {
"vision_config": [
""
],
@@ -12,17 +12,17 @@
""
]
},
- "cwm": {
+ "CwmConfig": {
"n_head": [
""
]
},
- "bert": {
+ "BertConfig": {
"act_dropout": [
""
]
},
- "timm_wrapper": {
+ "TimmWrapperConfig": {
"_resnet_": [
""
]
diff --git a/data/prefixes.json b/mir/data/pipe_markers.json
similarity index 100%
rename from data/prefixes.json
rename to mir/data/pipe_markers.json
diff --git a/data/tag_scrape.json b/mir/data/tag_scrape.json
similarity index 100%
rename from data/tag_scrape.json
rename to mir/data/tag_scrape.json
diff --git a/data/transformers_adds.json b/mir/data/transformers_adds.json
similarity index 100%
rename from data/transformers_adds.json
rename to mir/data/transformers_adds.json
diff --git a/mir/generate/.notes.txt b/mir/generate/.notes.txt
deleted file mode 100644
index e133139..0000000
--- a/mir/generate/.notes.txt
+++ /dev/null
@@ -1,66 +0,0 @@
-# type: ignore
-# ruff: noqa
-
-tag_model_from_repo
-
-mir_tag_from_config
-import_submodules
-
-
-constants
-tag_scheduler
-read_json_file
-mir_prefix_from_forward_pass
-
-Set Data Format
-Find classes
-get_repo_from_class_map
-check repo/model migration
-
-transformers_index
- classmapentry
- + find_transformers_classes
- +check_migrations
- get_repo_from_class_map
- mir_tag_from_config
- check_migrations
- import_submodules tokenizers
-
-
-diffusers_index
- docstringentry
- find_diffusers_classes
- check_migrations
- retrieve_diffusers_docstrings
- import_submodules module for model class
- import_submodules model class
- extract_init_parameters
- create_pipe_entry
- extract_init_parameters
- mir_prefix_from_forward_pass
- tag_model_from_repo
- check_migrations
-
-add_mir_dtype
- + tag_dtype
- MIRDatabase
-
-add_mir_schedulers
- tag_scheduler
-
-
-task_analysis
- import_submodules
- mapped_cls
- import_submodules
- tag_scheduler
- resolve_code_names
-
-
-# def create_model_tag(model_header,metadata_dict):
-# parse_file = parse_model_header(model_header)
-# reconstructed_file_path = os.path.join(disk_path,each_file)
-# attribute_dict = metadata_dict | {"disk_path": reconstructed_file_path}
-# file_metadata = parse_file | attribute_dict
-# index_tag = create_model_tag(file_metadata)
-#
\ No newline at end of file
diff --git a/mir/generate/__main__.py b/mir/generate/__main__.py
index 8a1e85b..2255ae6 100644
--- a/mir/generate/__main__.py
+++ b/mir/generate/__main__.py
@@ -274,3 +274,121 @@ def pipe(mir_db: MIRDatabase = None):
if __name__ == "__main__":
pipe()
+
+
+def main(mir_db: Callable | None = None, remake: bool = True) -> None:
+ """Build the database"""
+ from sys import modules as sys_modules
+
+ if __name__ != "__main__" and "pytest" not in sys_modules: #
+ import argparse
+
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawTextHelpFormatter,
+ description="Build a custom MIR model database from the currently installed system environment.\nOffline function.",
+ usage="mir-maid",
+ epilog="""Does NOT include results of `mir-task` and `mir-pipe`. These commands should be run separately. Output:
+ 2025-08-03 14:22:47 INFO ('Wrote 0 lines to MIR database file.',)
+ 2025-08-03 14:22:47 INFO ('Wrote #### lines to MIR database file.',)""",
+ )
+ parser.add_argument(
+ "-r",
+ "--remake_off",
+ action="store_true",
+ default=False,
+ help="Prevent erasing and remaking the MIR database file (default: False, always start from a completely empty MIR file)",
+ )
+
+ args = parser.parse_args()
+ remake = not args.remake_off
+
+ from mir.automata import (
+ add_mir_audio,
+ add_mir_diffusion,
+ add_mir_dtype,
+ add_mir_llm,
+ add_mir_lora,
+ add_mir_schedulers,
+ add_mir_vae,
+ hf_pkg_to_mir,
+ mir_update,
+ )
+ from mir.config.json_io import write_json_file
+
+ if remake:
+ os.remove(MIR_PATH_NAMED)
+ folder_path_named = os.path.dirname(MIR_PATH_NAMED)
+ mode = "x"
+ else:
+ mode = "w"
+ write_json_file(folder_path_named, file_name="mir.json", data={"expected": "data"}, mode=mode)
+ mir_db = MIRDatabase()
+ mir_db.database.pop("expected", {})
+ hf_pkg_to_mir(mir_db)
+ add_mir_dtype(mir_db)
+ add_mir_schedulers(mir_db)
+ add_mir_lora(mir_db)
+ add_mir_audio(mir_db)
+ add_mir_diffusion(mir_db)
+ add_mir_llm(mir_db)
+ add_mir_vae(mir_db)
+ mir_db.write_to_disk()
+ mir_db = MIRDatabase()
+ mir_db = MIRDatabase()
+ mir_update(mir_db)
+ mir_db.write_to_disk()
+
+
+if __name__ == "__main__":
+ remake: bool = True
+ tasks = True
+ pipes = True
+
+ from sys import modules as sys_modules
+
+ if "pytest" not in sys_modules: #
+ import argparse
+
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.RawTextHelpFormatter,
+ description="Build a custom MIR model database from the currently installed system environment.\nOffline function.",
+ usage="python -m nnll.mir.maid",
+ epilog="""Includes `mir-task` and `mir-pipe` by default. Output:
+ 2025-08-15 19:41:18 INFO ('Wrote 0 lines to MIR database file.',)
+ 2025-08-15 19:38:48 INFO ('Wrote ### lines to MIR database file.',)
+ INFO ('Wrote ### lines to MIR database file.',)
+ INFO ('Wrote ### lines to MIR database file.',)""",
+ )
+ parser.add_argument(
+ "-r",
+ "--remake_off",
+ action="store_true",
+ default=False,
+ help="Don't erase and remake the MIR database (default: False)",
+ )
+ parser.add_argument(
+ "-t",
+ "--tasks_off",
+ action="store_true",
+ default=False,
+ help="Don't append task information to the MIR database (default: False)",
+ )
+ parser.add_argument(
+ "-p",
+ "--pipes_off",
+ action="store_true",
+ default=False,
+ help="Don't append pipeline information to the MIR database (default: False)",
+ )
+
+ args = parser.parse_args()
+ remake = not args.remake_off
+ tasks = not args.tasks_off
+ pipes = not args.pipes_off
+
+ main(remake=remake)
+ update_mir()
+ from mir.inspect.tasks import pipe, run_task
+
+ mir_db = run_task()
+ pipe(mir_db)
diff --git a/mir/generate/diffusers/doc_parse.py b/mir/generate/diffusers/doc_parse.py
index 67c3103..18e091b 100644
--- a/mir/generate/diffusers/doc_parse.py
+++ b/mir/generate/diffusers/doc_parse.py
@@ -6,7 +6,7 @@
from pydantic import BaseModel, field_validator
from mir import NFO
from mir.generate.diffusers import DocParseData
-from mir.data import PREFIXES
+from mir.data import PIPE_MARKERS
class DocStringValidator:
@@ -58,34 +58,35 @@ def normalize_doc(cls, docs: str) -> str:
def doc_match(self, prefix_set: List[str] | None = None):
if prefix_set is None:
- prefix_set = PREFIXES["pipe_prefixes"]
+ prefix_set = PIPE_MARKERS["pipe_variables"]
candidate = None
staged = None
+ prior_candidate = ""
for prefix in prefix_set:
candidate = self.doc_string.partition(prefix)[2]
prior_candidate = self.doc_string.partition(prefix)[0]
if candidate:
- staged = candidate if any(call_type in candidate for call_type in PREFIXES["staged_call_types"]) else None
+ staged = candidate if any(call_method in candidate for call_method in PIPE_MARKERS["staged_call_methods"]) else None
break
return candidate, prior_candidate, staged
def parse(self) -> DocParseData | None:
- candidate, prior_candidate, staged = self.doc_match(PREFIXES["pipe_prefixes"])
+ candidate, prior_candidate, staged = self.doc_match(PIPE_MARKERS["pipe_prefixes"])
if candidate:
pipe_class, pipe_repo = self._extract_class_and_repo(
segment=candidate,
- call_types=PREFIXES["call_types"],
+ call_methods=PIPE_MARKERS["call_types"],
prior_text=prior_candidate,
)
motion_adapter = "motion_adapter" in candidate or "adapter" in candidate
if motion_adapter and pipe_repo:
- staged, prior_candidate, _ = self.doc_match(PREFIXES["pipe_prefixes"][2:]) # skip the adapter statements
+ staged, prior_candidate, _ = self.doc_match(PIPE_MARKERS["pipe_prefixes"][2:]) # skip the adapter statements
staged_class, staged_repo = (
self._extract_class_and_repo(
segment=staged,
- call_types=PREFIXES["staged_call_types"] if not motion_adapter else PREFIXES["call_types"],
+ call_methods=PIPE_MARKERS["staged_call_types"] if not motion_adapter else PIPE_MARKERS["call_types"],
prior_text=prior_candidate,
prior_class=pipe_class,
)
@@ -104,23 +105,23 @@ def parse(self) -> DocParseData | None:
def _extract_class_and_repo(
self,
segment: str,
- call_types: List[str],
+ call_methods: List[str],
prior_text: str,
prior_class: Optional[str] = None,
) -> Tuple[Optional[str], Optional[str]]:
pipe_class = None
pipe_repo = None
- for call_type in call_types:
- if call_type in segment:
- pipe_class = segment.partition(call_type)[0].strip().split("= ")[-1].split(".")[-1]
- if prior_class == pipe_class and prior_text.split(call_type)[-1].strip().replace(")", ""):
- pipe_class = prior_text.partition(call_type)[0].strip().split("= ")[-1]
- repo_segment = segment.partition(call_type)[2].partition(")")[0]
+ for method_name in call_methods:
+ if method_name in segment:
+ pipe_class = segment.partition(method_name)[0].strip().split("= ")[-1].split(".")[-1]
+ if prior_class == pipe_class and prior_text.split(method_name)[-1].strip().replace(")", ""):
+ pipe_class = prior_text.partition(method_name)[0].strip().split("= ")[-1]
+ repo_segment = segment.partition(method_name)[2].partition(")")[0]
else:
- repo_segment = segment.partition(call_type)[2].partition(")")[0]
+ repo_segment = segment.partition(method_name)[2].partition(")")[0]
pipe_repo = repo_segment.replace("...", "").partition('",')[0].strip('" ')
if not DocStringValidator.is_valid_repo_path(pipe_repo):
- for reference in PREFIXES["repo_variables"]:
+ for reference in PIPE_MARKERS["repo_variables"]:
if reference in segment:
pipe_repo = self._resolve_variable(reference, prior_text)
break # Not empty!! 确保解析的路径不是空的!!
diff --git a/mir/generate/from_module.py b/mir/generate/from_module.py
index c85ec70..bbc6288 100644
--- a/mir/generate/from_module.py
+++ b/mir/generate/from_module.py
@@ -8,11 +8,6 @@
from importlib import import_module
from typing import Callable, Type
-from mir import NFO
-from mir.generate import REGEX
-from mir.generate.diffusers import IMPORT_STRUCTURE
-from mir.generate.transformers import MODEL_MAPPING_NAMES
-
def import_object_named(module: str, pkg_name_or_abs_path: str) -> Callable | None:
"""Convert two strings into a callable function or property\n
@@ -20,6 +15,7 @@ def import_object_named(module: str, pkg_name_or_abs_path: str) -> Callable | No
:param library_path: Base package for the module
:return: The callable attribute or property
"""
+ from mir import NFO
module_normalized: str = module.strip()
library = pkg_name_or_abs_path.strip()
@@ -89,22 +85,24 @@ def show_path_for(code_name: str, pkg_name: str) -> list[str] | str | None:
return import_path
-def get_internal_name_for(module_name: str | Type | None = None, pkg_name: str = "transformers", path_format: bool | None = False) -> list[str] | str | None:
- """Reveal code names for class names from Diffusers or Transformers (formerly get code names)\n
- :param class_name: To return only one class, defaults to None
- :param pkg_name: optional field for library, defaults to "transformers"
- :param path_format: Retrieve just the code name, or the full module path and code name within the package
- :return: A list of all code names, or the one corresponding to the provided class"""
+# def get_internal_name_for(module_name: str | Type | None = None, pkg_name: str = "transformers", path_format: bool | None = False) -> list[str] | str | None:
+# """Reveal code names for class names from Diffusers or Transformers (formerly get code names)\n
+# :param class_name: To return only one class, defaults to None
+# :param pkg_name: optional field for library, defaults to "transformers"
+# :param path_format: Retrieve just the code name, or the full module path and code name within the package
+# :return: A list of all code names, or the one corresponding to the provided class"""
+# from mir.generate.diffusers import IMPORT_STRUCTURE
+# from mir.generate.transformers import MODEL_MAPPING_NAMES
- package_imports = IMPORT_STRUCTURE if pkg_name == "diffusers" else MODEL_MAPPING_NAMES
- pkg_name = pkg_name.lower()
- MAPPING_NAMES: dict[str, str] = import_object_named(*package_imports[pkg_name])
- if module_name:
- if isinstance(module_name, Type):
- module_name = module_name.__name__
- code_name = next(iter(key for key, value in MAPPING_NAMES.items() if module_name in str(value)), "")
- return show_path_for(code_name, pkg_name) if path_format else code_name.replace("_", "-")
- return list(MAPPING_NAMES)
+# package_imports = IMPORT_STRUCTURE if pkg_name == "diffusers" else MODEL_MAPPING_NAMES
+# pkg_name = pkg_name.lower()
+# MAPPING_NAMES: dict[str, str] = import_object_named(*package_imports[pkg_name])
+# if module_name:
+# if isinstance(module_name, Type):
+# module_name = module_name.__name__
+# code_name = next(iter(key for key, value in MAPPING_NAMES.items() if module_name in str(value)), "")
+# return show_path_for(code_name, pkg_name) if path_format else code_name.replace("_", "-")
+# return list(MAPPING_NAMES)
def to_domain_tag(transformers: bool = False, **kwargs):
@@ -112,8 +110,9 @@ def to_domain_tag(transformers: bool = False, **kwargs):
:param transformers: Use transformers data instead of diffusers data, defaults to False
:raises ValueError: Model type not detected
:return: MIR prefix based on model configuration"""
+ from mir.data import NN_FILTER
- data = REGEX
+ data = NN_FILTER
if transformers:
flags = data["arch"]["transformer"] # pylint:disable=unsubscriptable-object
diff --git a/mir/generate/indexers.py b/mir/generate/indexers.py
index 8ef00f3..51f755a 100644
--- a/mir/generate/indexers.py
+++ b/mir/generate/indexers.py
@@ -4,7 +4,34 @@
"""類發現和拆卸"""
# pylint:disable=no-name-in-module
-from mir.generate import MIGRATIONS
+from mir import NFO
+from mir.data import MIGRATIONS
+from mir.maid import MIRDatabase
+from mir.spec import mir_entry
+
+
+def write_to_mir(new_data: dict, mir_db: MIRDatabase) -> None:
+ """Generate MIR HF Hub model database
+ :param new_data: Data for the MIR database
+ :param mir_database: MIRDatabase instance
+ """
+ for series, comp_name in new_data.items():
+ id_segment = series.split(".")
+ for compatibility in comp_name:
+ # dbug(id_segment)
+ try:
+ mir_db.add(
+ mir_entry(
+ domain=id_segment[0],
+ arch=id_segment[1],
+ series=id_segment[2],
+ comp=compatibility,
+ **new_data[series][compatibility],
+ ),
+ )
+ except IndexError: # as error_log:
+ NFO(f"Failed to create series: {series} compatibility: {comp_name} ")
+ # dbug(error_log)
def migrations(repo_path: str):
diff --git a/mir/generate/transformers/__init__.py b/mir/generate/transformers/__init__.py
index cbdf6f8..e3f09b3 100644
--- a/mir/generate/transformers/__init__.py
+++ b/mir/generate/transformers/__init__.py
@@ -2,32 +2,15 @@
#
-from dataclasses import dataclass, field
-from typing import Callable
-
from transformers.models.auto.configuration_auto import CONFIG_MAPPING
from transformers.models.auto.modeling_auto import (
MODEL_MAPPING, # config: model map
MODEL_MAPPING_NAMES,
+ AutoModel,
)
-from transformers.models.auto.tokenization_auto import TOKENIZER_MAPPING_NAMES
+from transformers.models.auto.tokenization_auto import TOKENIZER_MAPPING
from mir.generate.from_module import show_init_fields_for
-
-@dataclass
-class ClassMapEntry:
- """Represents a structured entry of the name of the class and its associated attributes."""
-
- name: str
- model_name: str
- model: Callable
- config: Callable
- config_params: dict[str, list[str]] = field(init=False, default_factory=lambda: {})
- model_params: dict[str, list[str]] | None = None
-
- def __post_init__(self):
- if self.model:
- self.model_params = show_init_fields_for(self.model)
- if self.config:
- self.config_params = show_init_fields_for(self.config)
+AUTO_MAP = AutoModel._model_mapping
+REVERSE_MAP = AUTO_MAP._reverse_config_mapping
diff --git a/mir/generate/transformers/index.py b/mir/generate/transformers/index.py
index adc8c65..8c53762 100644
--- a/mir/generate/transformers/index.py
+++ b/mir/generate/transformers/index.py
@@ -1,216 +1,126 @@
# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
#
-
-from typing import Callable
-
-from mir import NFO, DBUQ
-from mir.data import PARAMETERS
-from mir.generate.from_module import import_object_named, to_domain_tag
-from mir.generate.indexers import migrations
-from mir.tag import tag_model_from_repo
-from mir.generate.transformers import CONFIG_MAPPING, MODEL_MAPPING, TOKENIZER_MAPPING_NAMES, ClassMapEntry
-
-
-def mapped_cls(model_identifier: str):
- """Get model class from identifier without calling huggingface_hub.\n
- :param model_identifier: Model identifier like "bert-base-uncased" or "gpt2"
- :return: Model class (e.g., BertModel, GPT2Model)
- """
- import transformers
- from transformers.models.auto.configuration_auto import CONFIG_MAPPING_NAMES
- from transformers.models.auto.modeling_auto import MODEL_MAPPING, MODEL_MAPPING_NAMES
-
- code_name = model_identifier.split("/")[-1].split("-")[0].lower()
-
- model_class_name = MODEL_MAPPING_NAMES.get(code_name, None)
-
- config_class_name = CONFIG_MAPPING_NAMES.get(code_name)
- if config_class_name:
- config_class = getattr(transformers, config_class_name, None)
- if config_class:
- model_class = MODEL_MAPPING.get(config_class, None)
- if model_class:
- if isinstance(model_class, tuple):
- model_class = model_class[0]
- return model_class
-
- normalized = code_name.replace("_", "-")
- if normalized != code_name:
- if model_class_name := MODEL_MAPPING_NAMES.get(normalized, None):
- if isinstance(model_class_name, tuple):
- model_class_name = model_class_name[0]
- return getattr(transformers, model_class_name, None)
-
- return None
-
-
-def get_repo_from_class_map(class_map: ClassMapEntry) -> str | None:
- """The name of the repository that is associated with a transformers configuration class\n
- :param class_map: Transformers class information extracted from dependency
- :returns: A string matching the repo path for the class"""
-
- import re
-
- doc_attempt = []
- if hasattr(class_map.config, "forward"):
- doc_attempt = [getattr(class_map.config, "forward")]
- doc_attempt.append(class_map.config)
- for pattern in doc_attempt:
- doc_string = pattern.__doc__
- matches = re.findall(r"\[([^\]]+)\]", doc_string)
- if matches:
- try:
- repo_path = next(iter(snip.strip('"').strip() for snip in matches if "/" in snip))
- except StopIteration as error_log:
- NFO(f"ERROR >>{matches} : LOG >> {error_log}")
- continue
- return repo_path
- return None
-
-
-def find_transformers_classes() -> list[ClassMapEntry]:
- """Eat the 🤗Transformers classes as a treat, leaving any tasty subclass class morsels neatly arranged as a dictionary.\n
- Nom.\n
- :return: Tasty mapping of subclasses to their class references"""
-
- model_data = []
- for config_name, config_obj in CONFIG_MAPPING.items():
- model_params = None
- if model_obj := MODEL_MAPPING.get(config_obj, None):
- if isinstance(model_obj, Callable):
- model_obj = (model_obj,)
- assert isinstance(model_obj, tuple), f"Expected model class object, got {model_obj} type {type(model_obj)}"
- for model_class in model_obj:
- if model_params and ("inspect" not in model_params["config"]) and ("deprecated" not in list(model_params["config"])):
- pass
- else:
- model_params = None
- model_name = model_class.__name__
- model_data.append(
- ClassMapEntry(
- name=config_name,
- model_name=model_name.split(".")[-1],
- model=model_class, # type: ignore
- config=config_obj,
- ),
- )
- return model_data
-
-
-def mir_tag_from_config(class_map: ClassMapEntry, repo_path: str) -> tuple[str, str, str]:
- """Change a transformers config class into a MIR series and comp\n
- :param class_map: Transformers class information extracted from dependency
- :param repo_path: The
- """
-
- mir_prefix = to_domain_tag(transformers=True, **class_map.config_params)
- if not mir_prefix:
- if class_map.model_params:
- if mir_prefix := to_domain_tag(transformers=True, **class_map.model_params):
- pass
- else:
- raise ValueError(f"Unable to determine MIR prefix from {class_map, repo_path}")
- else:
- raise ValueError(f"Unrecognized model type, no tag matched {class_map.name} with {class_map.config_params} or {class_map.model_params}")
- mir_prefix = "info." + mir_prefix
- if class_map.name != "funnel":
- mir_suffix, mir_comp = tag_model_from_repo(repo_path)
- else:
- mir_suffix, mir_comp = ["funnel", "*"]
- mir_series = mir_prefix + "." + mir_suffix
- return mir_series, mir_comp, mir_suffix
-
-
-def show_transformers_tasks(class_name: str | None = None, code_name: str | None = None) -> list[str]:
- """Retrieves a list of task classes associated with a specified transformer class.\n
- :param class_name: The name of the transformer class to inspect.
- :param pkg_type: The dependency for the module
- :param alt_method: Use an alternate method to return the classes
- :return: A list of task classes associated with the specified transformer."""
-
- task_classes = None
-
- if not code_name:
- class_obj: Callable = import_object_named(class_name, "transformers")
- class_module: Callable = import_object_named(*class_obj.__module__.split(".", 1)[-1:], class_obj.__module__.split(".", 1)[0])
- if class_module and class_module.__name__ != "DummyPipe":
- task_classes = getattr(class_module, "__all__")
- else:
+from typing import Any, Callable
+
+from chanfig import NestedDict
+
+from mir.generate.transformers.raw_data import PrepareData
+
+
+class HarvestClasses:
+ def __init__(self) -> None:
+ """Initializes the HarvestClasses instance with an empty list to store raw class data."""
+ self.raw_data = []
+ from mir.maid import MIRDatabase
+
+ self.mir_db = MIRDatabase()
+ self.find_transformers_classes()
+ self.info = NestedDict({})
+
+ def find_transformers_classes(self) -> None:
+ """Finds and collects PrepareData entries for all transformer classes defined in AUTO_MAP.\n
+ :return: List of PrepareData entries representing the transformer classes."""
+
+ from mir.generate.transformers import AUTO_MAP
+
+ model_data = []
+ for pair_map in AUTO_MAP.items():
+ config_class, model_class = pair_map # type:ignore
+ if isinstance(model_class, tuple):
+ model_class: Callable = model_class[0]
+ print(model_class)
+ if config_data := self.extract_config_class_data(config_class):
+ if model_data := self.extract_model_class_data(model_class):
+ if prepared_data := PrepareData(**config_data, **model_data): # type:ignore
+ self.add_to_database(prepared_data)
+
+ def extract_config_class_data(self, config_class: Callable) -> dict[str, str | Callable | dict[str, Any]] | None:
+ """Extracts information from config classes.\n
+ :param config_class: Model class or callable returning model classes.
+ :return: dictionary of discovered elements"""
+ from mir.data import MIGRATIONS, PARAMETERS
+ from mir.generate.from_module import show_init_fields_for
+
+ config_name = config_class.__name__
+ config_params = PARAMETERS.get(config_name, {})
+ repo_path = MIGRATIONS["config"].get(config_name, {})
+ if not config_params:
+ config_params = show_init_fields_for(config_class)
+ if not repo_path:
+ repo_path = self.config_to_repo(config_class)
+ if not repo_path or not config_params or "inspect" in config_params or "deprecated" in config_params:
return None
- elif code_name:
- from httpx import HTTPStatusError
-
- from mir.generate.transformers.index import mapped_cls
-
- try:
- model_class = mapped_cls(code_name)
- if model_class is not None:
- # Convert class type to list containing the class name string
- task_classes = [model_class.__name__]
- else:
- return None
- except (OSError, HTTPStatusError) as e:
- DBUQ(f"Error mapping class {code_name}: {e}")
+ return {
+ "name": config_name,
+ "config": config_class,
+ "config_params": config_params,
+ "repo_path": repo_path,
+ }
+
+ def extract_model_class_data(self, model_class: Callable) -> dict[str, str | Any] | None:
+ """Extracts information from model classes.\n
+ :param model_class: Model class or callable returning model classes.
+ :return: dictionary of discovered elements"""
+ from mir.generate.from_module import show_init_fields_for # Ensure it's a tuple for consistency.
+
+ model_data: dict[str, str | Any] = {"model": model_class}
+ model_params = show_init_fields_for(model_class)
+ if "inspect" in model_params or "deprecated" in model_params:
return None
-
- return task_classes
-
-
-def transformers_index():
- """Generate LLM model data for MIR index\n
- :return: Dictionary ready to be applied to MIR data fields"""
-
- missing_config_params = PARAMETERS
-
- mir_data = {}
- transformers_data: list[ClassMapEntry] = find_transformers_classes()
- for entry in transformers_data:
- repo_path = get_repo_from_class_map(entry)
- if entry.name == "bert":
- print(entry)
- if config := missing_config_params.get(entry.name, {}):
- entry.config_params = config.get("params", entry.config_params)
- repo_path = config.get("repo_path", repo_path)
- if entry.name == "bert":
- print(entry)
- if not repo_path:
- raise ValueError(f"Unable to determine repo from {entry}")
- if entry.config_params:
- mir_series, mir_comp, mir_suffix = mir_tag_from_config(entry, repo_path)
- # modalities = add_mode_types(mir_tag=[mir_series, mir_comp])
-
- repo_path = migrations(repo_path)
- tk_pkg = {}
- tokenizer_classes = TOKENIZER_MAPPING_NAMES.get(entry.name)
- if isinstance(tokenizer_classes, str):
- tokenizer_classes = [tokenizer_classes]
- # mode = modalities.get("mode")
- if tokenizer_classes:
- index = 0
- for tokenizer in tokenizer_classes:
- if tokenizer:
- tokenizer_class = import_object_named(tokenizer, "transformers")
- tk_pkg.setdefault(index, {"transformers": f"{tokenizer_class.__module__}.{tokenizer_class.__name__}"})
- index += 1
- if tk_pkg:
- mir_data.get("info.encoder.tokenizer", mir_data.setdefault("info.encoder.tokenizer", {})).update(
- {
- mir_suffix: {
- "pkg": tk_pkg,
- }
- },
- )
- mir_data.setdefault(
- mir_series,
+ else:
+ return model_data | {
+ "model_params": model_params,
+ }
+
+ def config_to_repo(self, config_class: Callable) -> str | None:
+ """Extracts the repository path from the configuration class documentation.\n
+ :param config_class: Configuration class to extract repository path from.
+ :return: Repository path as a string if found, otherwise None."""
+ import re
+
+ from mir import NFO
+
+ doc_check = [config_class]
+ if hasattr(config_class, "forward"):
+ doc_check.append(config_class.forward) # type: ignore
+ for pattern in doc_check:
+ doc_string = pattern.__doc__
+ repo_brackets = r"\[([^\]]+)\]"
+ matches = re.findall(repo_brackets, doc_string) # type: ignore
+ if matches:
+ try:
+ self.repo_path = next(iter(snip.strip('"').strip() for snip in matches if "/" in snip))
+ except StopIteration as error_log:
+ NFO(f"ERROR >>{matches} : LOG >> {error_log}")
+ continue
+
+ def add_to_database(self, prepared_data: PrepareData) -> None:
+ if hasattr(prepared_data, "tokenizer"):
+ token_info = NestedDict(
{
- mir_comp: {
- "repo": repo_path,
- "pkg": {
- 0: {"transformers": entry.model_name},
+ "encoder": {
+ "tokenizer": {
+ prepared_data.mir_comp: {
+ "pkg": {f"{prepared_data.tokenizer.__module__}.{prepared_data.tokenizer.__name__}"},
+ },
},
- # "mode": mode,
},
- },
+ }
)
- return mir_data
+
+ info = NestedDict(
+ {
+ prepared_data.mir_arch: {
+ prepared_data.mir_series: {
+ prepared_data.mir_comp: {
+ "repo": prepared_data.repo_path,
+ "pkg": {"transformers": prepared_data.model_name},
+ "tokenizer": {f"info.encoder.tokenizer.{prepared_data.mir_comp}"},
+ }
+ }
+ }
+ }
+ )
+ self.info = token_info | info
+ print(f"added {prepared_data}")
diff --git a/mir/generate/transformers/raw_data.py b/mir/generate/transformers/raw_data.py
new file mode 100644
index 0000000..39cce6d
--- /dev/null
+++ b/mir/generate/transformers/raw_data.py
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
+
+
+from dataclasses import dataclass, field
+from typing import Callable
+
+
+@dataclass
+class PrepareData:
+ """Represents a structured entry of the name of the class and its associated attributes."""
+
+ name: str
+ model: Callable
+ config: Callable
+ repo_path: str
+ config_params: dict[str, list[str]]
+ model_params: dict[str, list[str]] | None = None
+ mir_arch: str = field(init=False)
+ mir_series: str = field(init=False)
+ mir_comp: str = field(init=False)
+
+ def __post_init__(self) -> None:
+ """Initializes the PrepareData instance by setting derived attributes."""
+ from mir.generate.transformers import REVERSE_MAP, TOKENIZER_MAPPING
+
+ self.model_name: str = self.model.__name__.split(".")[-1]
+ if tokenizer := TOKENIZER_MAPPING.get(self.config, None):
+ self.tokenizer = tokenizer
+ self.tokenizer_pkg: dict[str, str] | None = {"transformers": f"{self.tokenizer.__module__}.{self.tokenizer.__name__}"}
+ if internal_name := REVERSE_MAP.get(self.config):
+ self.internal_name = internal_name
+ self.model_to_tasks()
+ self.mir_tag_from_config()
+
+ def model_to_tasks(self) -> None:
+ """Transform a single model class into derivative classes for specific tasks.\n
+ :return: A list of task classes associated with the model."""
+ from pathlib import Path
+ from importlib import import_module
+
+ import_path = Path(self.model.__module__).stem
+ parent_module = import_module(import_path)
+
+ if hasattr(parent_module, "__all__") and parent_module.__name__ != "DummyPipe":
+ self.task_classes = parent_module.__all__
+ else:
+ self.task_classes = [self.model.__name__]
+
+ def mir_tag_from_config(self) -> None:
+ """Generates MIR series and component tags based on the configuration class.\n
+ :return: Tuple containing MIR series, component, and suffix tags."""
+
+ from mir.generate.from_module import to_domain_tag
+ from mir.tag import tag_model_from_repo
+
+ mir_prefix = to_domain_tag(transformers=True, **self.config_params)
+ if not mir_prefix:
+ if self.model_params:
+ if mir_prefix := to_domain_tag(transformers=True, **self.model_params):
+ pass
+ raise ValueError(f"Unable to determine MIR prefix from {self}")
+ else:
+ raise ValueError(f"Unrecognized model type, no tag matched {self.name} with {self.config_params} or {self.model_params}")
+ self.mir_arch = mir_prefix
+ self.mir_series, self.mir_comp = tag_model_from_repo(self.repo_path)
diff --git a/mir/generate/transformers/tokenizers.py b/mir/generate/transformers/tokenizers.py
new file mode 100644
index 0000000..a395d0e
--- /dev/null
+++ b/mir/generate/transformers/tokenizers.py
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
+
+import re
+from importlib import import_module
+
+from mir.generate.transformers import TOKENIZER_MAPPING
+from mir.maid import MIRDatabase
+from mir.spec import mir_entry
+
+
+def tag_tokenizers(config_class: Callable):
+ tokenizer_class = TOKENIZER_MAPPING[config_class] # type: ignore
+ if tokenizer_class:
+ { "pkg":{"transformers": f"{tokenizer_class.__module__}.{tokenizer_class.__name__}"})
+ if tk_pkg:
+ mir_data.get("info.encoder.tokenizer", mir_data.setdefault("info.encoder.tokenizer", {})).update(
+ {
+ mir_suffix: {
+ "pkg": tk_pkg,
+ }
+ },
+ )
+ return tokenizer_class
diff --git a/mir/generate/write_to_mir.py b/mir/generate/write_to_mir.py
deleted file mode 100644
index 4976502..0000000
--- a/mir/generate/write_to_mir.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-
-
-from mir.spec import mir_entry
-from mir import NFO
-from mir.maid import MIRDatabase
-
-
-def write_to_mir(new_data: dict, mir_db: MIRDatabase) -> None:
- """Generate MIR HF Hub model database
- :param new_data: Data for the MIR database
- :param mir_database: MIRDatabase instance
- """
- for series, comp_name in new_data.items():
- id_segment = series.split(".")
- for compatibility in comp_name:
- # dbug(id_segment)
- try:
- mir_db.add(
- mir_entry(
- domain=id_segment[0],
- arch=id_segment[1],
- series=id_segment[2],
- comp=compatibility,
- **new_data[series][compatibility],
- ),
- )
- except IndexError: # as error_log:
- NFO(f"Failed to create series: {series} compatibility: {comp_name} ")
- # dbug(error_log)
diff --git a/mir/maid.py b/mir/maid.py
index a25a3eb..acdf86c 100644
--- a/mir/maid.py
+++ b/mir/maid.py
@@ -5,24 +5,26 @@
# pylint: disable=possibly-used-before-assignment, line-too-long
import os
-from typing import Any, Callable, List, Optional
+from typing import Any, List, Optional
-from mir.config.constants import MIR_PATH_NAMED
-from mir.config.json_io import read_json_file, write_json_file
-from mir.config.console import dbuq
+from mir import MIR_PATH_NAMED
+from mir.json_io import read_json_file, write_json_file
class MIRDatabase:
- """Machine Intelligence Resource Database"""
+ """Machine Intelligence Resource Database Object
+ Database search and read/write operations"""
def __init__(self, database: dict | None = None) -> None:
from json.decoder import JSONDecodeError
+ from mir import DBUQ
if not database:
+ self.database = {"expected": "data"}
try:
- self.database: dict[str, Any] = read_json_file(MIR_PATH_NAMED)
+ self.read_from_disk()
except JSONDecodeError as error_log:
- dbuq(error_log)
+ DBUQ(error_log)
self.database = {}
def add(self, resource: dict[str, Any]) -> None:
@@ -39,12 +41,10 @@ def add(self, resource: dict[str, Any]) -> None:
def write_to_disk(self, data: Optional[dict] = None) -> None: # pylint:disable=unused-argument
"""Save data to JSON file\n"""
- from mir.config.console import nfo
+ from mir import NFO
if not os.path.exists(MIR_PATH_NAMED):
mode = "x"
- if not self.database:
- self.database = {"expected": "data"}
else:
mode = "w"
# except (FileNotFoundError, OSError) as error_log:
@@ -52,7 +52,7 @@ def write_to_disk(self, data: Optional[dict] = None) -> None: # pylint:disable=
write_json_file(os.path.dirname(MIR_PATH_NAMED), file_name="mir.json", data=self.database, mode=mode)
written_data = self.read_from_disk()
- nfo(f"Wrote {len(written_data)} lines to MIR database file.")
+ NFO(f"Wrote {len(written_data)} lines to MIR database file.")
self.database = written_data
def read_from_disk(self, data: Optional[dict] = None) -> dict[str, Any]:
@@ -60,7 +60,8 @@ def read_from_disk(self, data: Optional[dict] = None) -> dict[str, Any]:
:param data: mir decorator auto-populated, defaults to None
:return: dict of MIR data"""
if not os.path.exists(MIR_PATH_NAMED):
- return {}
+ self.write_to_disk({})
+ return self.database
else:
self.database = read_json_file(MIR_PATH_NAMED)
return self.database
@@ -75,7 +76,7 @@ def _stage_maybes(self, maybe_match: str, target: str, series: str, compatibilit
:return: A list of likely options and their MIR paths"""
import re
- from mir.config.constants import SEARCH_SUFFIX
+ from mir import SEARCH
results = []
if isinstance(maybe_match, str):
@@ -86,8 +87,8 @@ def _stage_maybes(self, maybe_match: str, target: str, series: str, compatibilit
else:
maybe_match = list(maybe_match.keys())
for option in maybe_match:
- option_lower = re.sub(SEARCH_SUFFIX, "", option.lower())
- target = re.sub(SEARCH_SUFFIX, "", target.lower())
+ option_lower = re.sub(SEARCH, "", option.lower())
+ target = re.sub(SEARCH, "", target.lower())
if option_lower:
if option_lower:
if option_lower in target:
@@ -146,7 +147,7 @@ def find_tag(self, field: str, target: str, sub_field: Optional[str] = None, dom
:raises KeyError: Target string not found
"""
import re
- from mir.config.console import nfo
+ from mir import NFO
parameters = r"-gguf|-exl2|-exl3|-onnx|-awq|-mlx|-ov" #
target = target.lower().strip("-")
@@ -168,123 +169,5 @@ def find_tag(self, field: str, target: str, sub_field: Optional[str] = None, dom
if best_match := self.grade_maybes(self.matches, target):
return best_match
else:
- nfo(f"Query '{target}' not found when {len(self.database)}'{field}' options searched\n")
+ NFO(f"Query '{target}' not found when {len(self.database)}'{field}' options searched\n")
return None
-
-
-def main(mir_db: Callable | None = None, remake: bool = True) -> None:
- """Build the database"""
- from sys import modules as sys_modules
-
- if __name__ != "__main__" and "pytest" not in sys_modules: #
- import argparse
-
- parser = argparse.ArgumentParser(
- formatter_class=argparse.RawTextHelpFormatter,
- description="Build a custom MIR model database from the currently installed system environment.\nOffline function.",
- usage="mir-maid",
- epilog="""Does NOT include results of `mir-task` and `mir-pipe`. These commands should be run separately. Output:
- 2025-08-03 14:22:47 INFO ('Wrote 0 lines to MIR database file.',)
- 2025-08-03 14:22:47 INFO ('Wrote #### lines to MIR database file.',)""",
- )
- parser.add_argument(
- "-r",
- "--remake_off",
- action="store_true",
- default=False,
- help="Prevent erasing and remaking the MIR database file (default: False, always start from a completely empty MIR file)",
- )
-
- args = parser.parse_args()
- remake = not args.remake_off
-
- from mir.automata import (
- add_mir_audio,
- add_mir_diffusion,
- add_mir_dtype,
- add_mir_llm,
- add_mir_lora,
- add_mir_schedulers,
- add_mir_vae,
- hf_pkg_to_mir,
- mir_update,
- )
- from mir.config.json_io import write_json_file
-
- if remake:
- os.remove(MIR_PATH_NAMED)
- folder_path_named = os.path.dirname(MIR_PATH_NAMED)
- mode = "x"
- else:
- mode = "w"
- write_json_file(folder_path_named, file_name="mir.json", data={"expected": "data"}, mode=mode)
- mir_db = MIRDatabase()
- mir_db.database.pop("expected", {})
- hf_pkg_to_mir(mir_db)
- add_mir_dtype(mir_db)
- add_mir_schedulers(mir_db)
- add_mir_lora(mir_db)
- add_mir_audio(mir_db)
- add_mir_diffusion(mir_db)
- add_mir_llm(mir_db)
- add_mir_vae(mir_db)
- mir_db.write_to_disk()
- mir_db = MIRDatabase()
- mir_db = MIRDatabase()
- mir_update(mir_db)
- mir_db.write_to_disk()
-
-
-if __name__ == "__main__":
- remake: bool = True
- tasks = True
- pipes = True
-
- from sys import modules as sys_modules
-
- if "pytest" not in sys_modules: #
- import argparse
-
- parser = argparse.ArgumentParser(
- formatter_class=argparse.RawTextHelpFormatter,
- description="Build a custom MIR model database from the currently installed system environment.\nOffline function.",
- usage="python -m nnll.mir.maid",
- epilog="""Includes `mir-task` and `mir-pipe` by default. Output:
- 2025-08-15 19:41:18 INFO ('Wrote 0 lines to MIR database file.',)
- 2025-08-15 19:38:48 INFO ('Wrote ### lines to MIR database file.',)
- INFO ('Wrote ### lines to MIR database file.',)
- INFO ('Wrote ### lines to MIR database file.',)""",
- )
- parser.add_argument(
- "-r",
- "--remake_off",
- action="store_true",
- default=False,
- help="Don't erase and remake the MIR database (default: False)",
- )
- parser.add_argument(
- "-t",
- "--tasks_off",
- action="store_true",
- default=False,
- help="Don't append task information to the MIR database (default: False)",
- )
- parser.add_argument(
- "-p",
- "--pipes_off",
- action="store_true",
- default=False,
- help="Don't append pipeline information to the MIR database (default: False)",
- )
-
- args = parser.parse_args()
- remake = not args.remake_off
- tasks = not args.tasks_off
- pipes = not args.pipes_off
-
- main(remake=remake)
- update_mir()
- from mir.inspect.tasks import pipe, run_task
-
- mir_db = run_task()
- pipe(mir_db)
diff --git a/data/mir.json b/mir/mir.json
similarity index 100%
rename from data/mir.json
rename to mir/mir.json
diff --git a/mir/spec/docstring_patterns.json b/mir/spec/docstring_patterns.json
deleted file mode 100644
index 691ab3c..0000000
--- a/mir/spec/docstring_patterns.json
+++ /dev/null
@@ -1,41 +0,0 @@
-{
- "uncommon_naming": {
- "blip_diffusion": "blip_diffusion",
- "cogvideo": "cogvideox",
- "cogview3": "cogview3plus",
- "deepfloyd_if": "if",
- "cosmos": "cosmos2_text2image",
- "visualcloze": "visualcloze_generation",
- "marigold": "marigold_depth"
- },
- "exclusion_list": [
- "auto_pipeline",
- "consistency_models",
- "pipeline_utils",
- "deprecated",
- "ddim",
- "ddpm",
- "deprecated",
- "autopipeline",
- "dance_diffusion",
- "diffusionpipeline",
- "dit",
- "latent_consistency_models",
- "latent_diffusion",
- "ledits_pp",
- "pag",
- "paint_by_example",
- "semantic_stable_diffusion",
- "stable_diffusion_attend_and_excite",
- "stable_diffusion_diffedit",
- "stable_diffusion_k_diffusion",
- "stable_diffusion_panorama",
- "stable_diffusion_safe",
- "stable_diffusion_sag",
- "t2i_adapter",
- "text_to_video_synthesis",
- "unclip",
- "unidiffuser",
- "controlnet_hunyuandit"
- ]
-}
\ No newline at end of file
diff --git a/mir/spec/missing_params.json b/mir/spec/missing_params.json
deleted file mode 100644
index c3aebdc..0000000
--- a/mir/spec/missing_params.json
+++ /dev/null
@@ -1,73 +0,0 @@
-{
- "bark": {
- "repo_path": "suno/bark",
- "params": {
- "n_head": [
- ""
- ]
- }
- },
- "aria_text": {
- "repo_path": "rhymes-ai/Aria-Chat",
- "params": {
- "vision_config": [
- ""
- ],
- "text_config": [
- ""
- ]
- }
- },
- "cwm": {
- "repo_path": "facebook/cwm",
- "params": {
- "n_head": [
- ""
- ]
- }
- },
- "decision_transformer": {
- "repo_path": "edbeeching/decision-transformer-gym-hopper-medium"
- },
- "distilbert": {
- "repo_path": "distilbert-base-uncased"
- },
- "gpt_bigcode": {
- "repo_path": "bigcode/gpt_bigcode-santacoder"
- },
- "granite": {
- "repo_path": "ibm-granite/granite-3.3-2b-base"
- },
- "granitemoe": {
- "repo_path": "ibm-research/PowerMoE-3b"
- },
- "granitemoehybrid": {
- "repo_path": "ibm-granite/granite-4.0-h-small"
- },
- "musicgen": {
- "repo_path": "facebook/musicgen-small"
- },
- "seamless_m4t_v2": {
- "repo_path": "facebook/seamless-m4t-v2-large"
- },
- "timm_backbone": {
- "repo_path": "microsoft/resnet-50"
- },
- "gpt_oss": {
- "repo_path": "openai/gpt-oss-120b"
- },
- "bert": {
- "repo_path": "google-bert/bert-base-uncased"
- },
- "timm_wrapper": {
- "repo_path": "timm/resnet18.a1_in1k",
- "params": {
- "_resnet_": [
- ""
- ]
- }
- },
- "vision-text-dual-encoder": {
- "repo_path": "hakuhodo-tech/japanese-clip-vit-h-14-bert-wider"
- }
-}
\ No newline at end of file
diff --git a/mir/spec/repo_migrations.json b/mir/spec/repo_migrations.json
deleted file mode 100644
index 799f906..0000000
--- a/mir/spec/repo_migrations.json
+++ /dev/null
@@ -1,29 +0,0 @@
-{
- "/helium-2b": "/helium-1-2b",
- "allenai/Olmo2-7B-1124-hf": "allenai/Olmo-2-1124-7B",
- "apple/mobilevitv2-1.0": "apple/mobilevitv2-1.0-imagenet1k-256",
- "caidas/swin2SR-classical-sr-x2-64": "caidas/swin2SR-classical-sr-x2-64",
- "facebook/hiera-base-224": "facebook/hiera-base-224-hf",
- "facebook/sam_hq-vit-huge": "syscv-community/sam-hq-vit-huge",
- "facebook/vit_msn_base": "facebook/vit-msn-base",
- "facebook/wav2vec2-bert-rel-pos-large": "facebook/w2v-bert-2.0",
- "google/gemma-3-4b": "google/gemma-3-4b-it",
- "google/gemma2-7b": "google/gemma-2-9b",
- "google/gemma3_text-7b": "google/gemma-3-12b-it",
- "IDEA-Research/dab_detr-base": "IDEA-Research/dab-detr-resnet-50",
- "LGAI-EXAONE/EXAONE-4.0-Instruct": "LGAI-EXAONE/EXAONE-4.0-32B",
- "meta/chameleon-7b'": "facebook/chameleon-7b",
- "mixtralai/Mixtral-8x7B": "mistralai/Mixtral-8x7B-v0.1",
- "paligemma-hf/paligemma-2b": "google/paligemma2-3b-mix-224",
- "pixtral-hf/pixtral-9b": "mistralai/Pixtral-12B-Base-2409",
- "Qwen/Qwen2-7B-beta": "Qwen/Qwen2-7B",
- "Qwen/Qwen3-15B-A2B": "Qwen/Qwen3-30B-A3B",
- "s-JoL/Open-Llama-V1": "openlm-research/open_llama_3b",
- "Salesforce/instruct-blip-flan-t5": "Salesforce/instructblip-flan-t5-xl",
- "state-spaces/mamba2-2.8b": "AntonV/mamba2-2.7b-hf",
- "ibm-fms/FalconH1-9.8b-2.2T-hf": "tiiuae/Falcon-H1-34B-Instruct",
- "nvidia/nemotron-3-8b-base-4k-hf": "mgoin/nemotron-3-8b-chat-4k-sft-hf",
- "THUDM/": "zai-org/",
- "THUDM/GLM-4-100B-A10B": "zai-org/GLM-4.5-Air",
- "zai-org/GLM-4-100B-A10B": "zai-org/GLM-4.5-Air"
-}
\ No newline at end of file
diff --git a/mir/tag.py b/mir/tag.py
index 6cb4d16..38b8929 100644
--- a/mir/tag.py
+++ b/mir/tag.py
@@ -2,17 +2,15 @@
#
from typing import Any
-from mir import PARAMETERS, BREAKING, SEARCH
-def tag_model_from_repo(repo_title: str, decoder=False, data: dict | None = None) -> tuple[str, Any]:
+def tag_model_from_repo(repo_title: str, decoder=False) -> tuple[str, Any]:
"""Create a mir label from a repo path\n
:param mir_prefix: Known period-separated prefix and model type
:param repo_path: Typical remote source repo path, A URL without domain
:return: The assembled mir tag with compatibility pre-separated"""
import re
-
- # print(repo_title)
+ from mir import PARAMETERS, BREAKING
root = "decoder" if decoder else "*"
repo_title = repo_title.split(":latest")[0]
diff --git a/pyproject.toml b/pyproject.toml
index 4c33193..7d95cc5 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -24,6 +24,7 @@ classifiers = [
"Topic :: Scientific/Engineering :: Artificial Intelligence",
]
dependencies = [
+ "chanfig>=0.0.114",
"diffusers>=0.35.2",
"ftfy>=6.3.1",
"huggingface-hub[hf-xet]>=1.1.7",
diff --git a/uv.lock b/uv.lock
index c945785..3616533 100644
--- a/uv.lock
+++ b/uv.lock
@@ -37,6 +37,20 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/e6/ad/3cc14f097111b4de0040c83a525973216457bbeeb63739ef1ed275c1c021/certifi-2026.1.4-py3-none-any.whl", hash = "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c", size = 152900, upload-time = "2026-01-04T02:42:40.15Z" },
]
+[[package]]
+name = "chanfig"
+version = "0.0.114"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "lazy-imports" },
+ { name = "pyyaml" },
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/5e/54/f34f58b4b883eb22594246f0da686f3f71d88c2614eee7d4551345411641/chanfig-0.0.114.tar.gz", hash = "sha256:50de7928d29e048042c1c62affbc3d8e3fd31b91ae4e1670bf10478a718ba9c0", size = 6416742, upload-time = "2025-12-16T08:38:16.136Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/cd/ee/5e806a325abbfce8633828c396bd274ebdd0a1cfd4b43ad20671da69e0ee/chanfig-0.0.114-py3-none-any.whl", hash = "sha256:7b2332f0c89000e732e34569d0c6b98fb9ac3a3969ba54c8f95e3a9e074acc45", size = 59250, upload-time = "2025-12-16T08:38:14.048Z" },
+]
+
[[package]]
name = "charset-normalizer"
version = "3.4.4"
@@ -311,6 +325,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" },
]
+[[package]]
+name = "lazy-imports"
+version = "1.2.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/25/67/04432aae0c1e2729bff14e1841f4a3fb63a9e354318e66622251487760c3/lazy_imports-1.2.0.tar.gz", hash = "sha256:3c546b3c1e7c4bf62a07f897f6179d9feda6118e71ef6ecc47a339cab3d2e2d9", size = 24470, upload-time = "2025-12-28T13:51:51.218Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/cd/62/60ed24fa8707f10c1c5aef94791252b820be3dd6bdfc6e2fcdb08bc8912f/lazy_imports-1.2.0-py3-none-any.whl", hash = "sha256:97134d6552e2ba16f1a278e316f05313ab73b360e848e40d593d08a5c2406fdf", size = 18681, upload-time = "2025-12-28T13:51:49.802Z" },
+]
+
[[package]]
name = "markdown2"
version = "2.5.4"
@@ -399,6 +422,7 @@ name = "mir"
version = "0.0.1"
source = { editable = "." }
dependencies = [
+ { name = "chanfig" },
{ name = "diffusers" },
{ name = "ftfy" },
{ name = "huggingface-hub", extra = ["hf-xet"] },
@@ -422,6 +446,7 @@ dev = [
[package.metadata]
requires-dist = [
+ { name = "chanfig", specifier = ">=0.0.114" },
{ name = "diffusers", git = "https://github.com/huggingface/diffusers" },
{ name = "ftfy", specifier = ">=6.3.1" },
{ name = "huggingface-hub", extras = ["hf-xet"], specifier = ">=1.1.7" },
From 846037a94046b67ee8c3315ad952f7ab8e003328 Mon Sep 17 00:00:00 2001
From: exdysa <91800957+exdysa@users.noreply.github.com>
Date: Wed, 14 Jan 2026 22:03:18 -0500
Subject: [PATCH 07/16] ~nice API
---
mir/__init__.py | 12 +-
mir/data/nn_filter.json | 4 +-
mir/generate/automata.py | 54 -------
mir/generate/from_module.py | 9 +-
mir/generate/tasks.py | 8 +-
mir/generate/transformers/__init__.py | 2 +-
.../transformers/{index.py => harvest.py} | 56 ++-----
mir/generate/transformers/raw_data.py | 40 ++---
mir/generate/transformers/tokenizers.py | 24 ---
mir/maid.py | 71 ++++++---
mir/tag.py | 143 +++++++++++++-----
11 files changed, 199 insertions(+), 224 deletions(-)
rename mir/generate/transformers/{index.py => harvest.py} (69%)
delete mode 100644 mir/generate/transformers/tokenizers.py
diff --git a/mir/__init__.py b/mir/__init__.py
index 1922713..c688644 100644
--- a/mir/__init__.py
+++ b/mir/__init__.py
@@ -1,9 +1,12 @@
# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
#
+
import os
+from importlib import import_module
+from logging import DEBUG, INFO, Logger
from mir.json_io import read_json_file
-from logging import DEBUG, INFO, Logger
+from mir.generate.transformers.harvest import HarvestClasses
NFO = Logger(INFO).info
DBUQ = Logger(DEBUG).debug
@@ -17,3 +20,10 @@
SEMANTIC = read_json_file(os.path.join(ROOT_PATH, "spec", "regex.json"))["semantic"]
SUFFIX = read_json_file(os.path.join(ROOT_PATH, "spec", "regex.json"))["suffix"]
IGNORE = read_json_file(os.path.join(ROOT_PATH, "spec", "regex.json"))["ignore"]
+
+
+tag_name = lambda path: path.rsplit(".", 1)
+mir_run = lambda parts: getattr(import_module(parts[0]), parts[1])
+
+
+Mir = HarvestClasses().db.db
diff --git a/mir/data/nn_filter.json b/mir/data/nn_filter.json
index 66e5fc3..4638ce1 100644
--- a/mir/data/nn_filter.json
+++ b/mir/data/nn_filter.json
@@ -17,7 +17,7 @@
"scheduler": "",
"resnet": ""
},
- "transformer": {
+ "transformers": {
"mlp": [
"prediction_channel_indices"
],
@@ -139,7 +139,7 @@
"tie_codebooks_embeddings"
]
},
- "diffuser": {
+ "diffusers": {
"lora": [
"motion_adapter"
],
diff --git a/mir/generate/automata.py b/mir/generate/automata.py
index da548b5..ad5b0c8 100644
--- a/mir/generate/automata.py
+++ b/mir/generate/automata.py
@@ -28,64 +28,10 @@
sd3_series, sd3_comp = tag_model_from_repo("stable-diffusion-3.5-medium") #
-def assimilate(mir_db: MIRDatabase, data_tuple: List[Tuple[Dict[str, Any]]]) -> None:
- """Merge new data into a pre-generated MIR database, updating while preserving existing data structures.\n
- :param mir_db: The MIRDatabase instance
- :param data_tuple: A list of tuples, each containing:\n
- - arch (str): The architecture name
- - series (str): The series name
- - `new_data`: New data to be merged into the database.
- :raises TypeError: If any field in `new_data` is not a dictionary.
- """
-
- def update_nested_dict(target, source):
- for key, value in source.items():
- if isinstance(value, dict) and key in target:
- if isinstance(target, dict):
- update_nested_dict(target[key], value)
- else:
- if isinstance(source, dict):
- # dbuq(target)
- target.setdefault(key, value)
- else:
- target = {key: value}
-
- dbuq(f"{data_tuple}, {len(data_tuple)}")
- for arch, series, new_data in data_tuple:
- mir_data = mir_db.database[f"{arch}.{series}"]
- for comp, field_data in new_data.items():
- if not isinstance(field_data, dict):
- raise TypeError(f"{field_data} <-- Cannot combine with database: Not `dict()`")
-
- # dbuq(f"{arch}.{series} : {comp}")
- update_nested_dict(mir_data.setdefault(comp, {}), field_data)
-
- if series == sdxl_series:
- for field, field_data in field_data.items():
- if isinstance(field_data, dict):
- for definition, sub_def_data in field_data.items():
- # dbug(definition)
- if isinstance(sub_def_data, dict):
- mir_data[comp][field].setdefault(definition, {})
- update_nested_dict(mir_data[comp][field][definition], sub_def_data)
-
-
# def auto_gan etc etc
# ai-forever/Real-ESRGAN
-def mir_update(mir_db: MIRDatabase, task_list: list = None, pipe_list: list = None):
- """Create mir unet info database"""
-
- additional_tags = [tag_pipe(*entry) for entry in diffusers_addons]
- additional_tags.extend([tag_base_model(*entry) for entry in transformers_addons])
-
- assimilate(
- mir_db, # format
- additional_tags,
- )
-
-
def add_mir_diffusion(mir_db: MIRDatabase):
"""Create MIR entries missing from the database"""
diff --git a/mir/generate/from_module.py b/mir/generate/from_module.py
index bbc6288..a39778f 100644
--- a/mir/generate/from_module.py
+++ b/mir/generate/from_module.py
@@ -105,19 +105,14 @@ def show_path_for(code_name: str, pkg_name: str) -> list[str] | str | None:
# return list(MAPPING_NAMES)
-def to_domain_tag(transformers: bool = False, **kwargs):
+def to_domain_tag(library: str, **kwargs):
"""Set type of MIR prefix depending on model type\n
:param transformers: Use transformers data instead of diffusers data, defaults to False
:raises ValueError: Model type not detected
:return: MIR prefix based on model configuration"""
from mir.data import NN_FILTER
- data = NN_FILTER
-
- if transformers:
- flags = data["arch"]["transformer"] # pylint:disable=unsubscriptable-object
- else:
- flags = data["arch"]["diffuser"] # pylint:disable=unsubscriptable-object
+ flags = NN_FILTER["arch"][library] # pylint:disable=unsubscriptable-object
for mir_prefix, key_match in flags.items():
if any(kwargs.get(param, None) for param in key_match):
return mir_prefix
diff --git a/mir/generate/tasks.py b/mir/generate/tasks.py
index 1e28e2e..5da5834 100644
--- a/mir/generate/tasks.py
+++ b/mir/generate/tasks.py
@@ -39,7 +39,7 @@ async def detect_tasks(self, mir_db: MIRDatabase, field_name: str = "pkg") -> di
:rtype: dict"""
data_tuple = []
- for series, compatibility_data in mir_db.database.items():
+ for series, compatibility_data in mir_db.db.items():
if (
series.startswith("info.") # formatting comment
and not any(tag for tag in self.skip_series if series.startswith(tag))
@@ -68,7 +68,7 @@ async def detect_pipes(self, mir_db: MIRDatabase, field_name: str = "pkg") -> di
:rtype: dict"""
data_tuple = []
- for series, compatibility_data in mir_db.database.items():
+ for series, compatibility_data in mir_db.db.items():
if (
series.startswith("info.") # formatting comment
and not any(series.startswith(tag) for tag in self.skip_series)
@@ -133,14 +133,14 @@ async def tag_class(self, pipe_class: Callable, pipe_role: str, series: str, mir
sub_field = pipe_class.__module__.split(".")[0]
scheduler_series, scheduler_comp = tag_scheduler(class_name)
mir_tag = [f"ops.scheduler.{scheduler_series}", scheduler_comp]
- if not mir_db.database.get(mir_tag[0], {}).get(mir_tag[1]):
+ if not mir_db.db.get(mir_tag[0], {}).get(mir_tag[1]):
mir_tag = mir_db.find_tag(field="pkg", target=class_name, sub_field=sub_field, domain="ops.scheduler")
DBUQ(f"scheduler {mir_tag} {class_name} {sub_field} ")
elif pipe_role == "vae":
sub_field = pipe_class.__module__.split(".")[0]
mir_comp = series.rsplit(".", 1)[-1]
DBUQ(mir_comp)
- mir_tag = [mir_id for mir_id, comp_data in mir_db.database.items() if "info.vae" in mir_id and next(iter(comp_data)) == mir_comp]
+ mir_tag = [mir_id for mir_id, comp_data in mir_db.db.items() if "info.vae" in mir_id and next(iter(comp_data)) == mir_comp]
if mir_tag:
mir_tag.append(mir_comp) # keep mir tag as single list
elif class_name != "AutoencoderKL":
diff --git a/mir/generate/transformers/__init__.py b/mir/generate/transformers/__init__.py
index e3f09b3..9eaedd4 100644
--- a/mir/generate/transformers/__init__.py
+++ b/mir/generate/transformers/__init__.py
@@ -10,7 +10,7 @@
)
from transformers.models.auto.tokenization_auto import TOKENIZER_MAPPING
-from mir.generate.from_module import show_init_fields_for
+from mir.generate.transformers.harvest import HarvestClasses
AUTO_MAP = AutoModel._model_mapping
REVERSE_MAP = AUTO_MAP._reverse_config_mapping
diff --git a/mir/generate/transformers/index.py b/mir/generate/transformers/harvest.py
similarity index 69%
rename from mir/generate/transformers/index.py
rename to mir/generate/transformers/harvest.py
index 8c53762..c36cd92 100644
--- a/mir/generate/transformers/index.py
+++ b/mir/generate/transformers/harvest.py
@@ -3,25 +3,22 @@
from typing import Any, Callable
-from chanfig import NestedDict
-
from mir.generate.transformers.raw_data import PrepareData
+from mir.tag import MIRTag
class HarvestClasses:
def __init__(self) -> None:
"""Initializes the HarvestClasses instance with an empty list to store raw class data."""
- self.raw_data = []
from mir.maid import MIRDatabase
- self.mir_db = MIRDatabase()
+ self.db = MIRDatabase()
+ self.raw_data = []
self.find_transformers_classes()
- self.info = NestedDict({})
def find_transformers_classes(self) -> None:
"""Finds and collects PrepareData entries for all transformer classes defined in AUTO_MAP.\n
:return: List of PrepareData entries representing the transformer classes."""
-
from mir.generate.transformers import AUTO_MAP
model_data = []
@@ -29,11 +26,11 @@ def find_transformers_classes(self) -> None:
config_class, model_class = pair_map # type:ignore
if isinstance(model_class, tuple):
model_class: Callable = model_class[0]
- print(model_class)
if config_data := self.extract_config_class_data(config_class):
if model_data := self.extract_model_class_data(model_class):
if prepared_data := PrepareData(**config_data, **model_data): # type:ignore
- self.add_to_database(prepared_data)
+ mir_tag = MIRTag("info", prepared_data)
+ self.db.add_tag(mir_tag)
def extract_config_class_data(self, config_class: Callable) -> dict[str, str | Callable | dict[str, Any]] | None:
"""Extracts information from config classes.\n
@@ -44,12 +41,14 @@ def extract_config_class_data(self, config_class: Callable) -> dict[str, str | C
config_name = config_class.__name__
config_params = PARAMETERS.get(config_name, {})
- repo_path = MIGRATIONS["config"].get(config_name, {})
if not config_params:
config_params = show_init_fields_for(config_class)
+ repo_path = MIGRATIONS["config"].get(config_name, {})
if not repo_path:
repo_path = self.config_to_repo(config_class)
- if not repo_path or not config_params or "inspect" in config_params or "deprecated" in config_params:
+ if not repo_path or not config_params:
+ return None
+ elif "inspect" in config_params or "deprecated" in config_params:
return None
return {
"name": config_name,
@@ -86,41 +85,14 @@ def config_to_repo(self, config_class: Callable) -> str | None:
doc_check.append(config_class.forward) # type: ignore
for pattern in doc_check:
doc_string = pattern.__doc__
- repo_brackets = r"\[([^\]]+)\]"
- matches = re.findall(repo_brackets, doc_string) # type: ignore
+ matches = re.findall(r"\[([^\]]+)\]", doc_string) # type: ignore
if matches:
try:
- self.repo_path = next(iter(snip.strip('"').strip() for snip in matches if "/" in snip))
+ return next(iter(snip.strip('"').strip() for snip in matches if "/" in snip))
except StopIteration as error_log:
NFO(f"ERROR >>{matches} : LOG >> {error_log}")
continue
- def add_to_database(self, prepared_data: PrepareData) -> None:
- if hasattr(prepared_data, "tokenizer"):
- token_info = NestedDict(
- {
- "encoder": {
- "tokenizer": {
- prepared_data.mir_comp: {
- "pkg": {f"{prepared_data.tokenizer.__module__}.{prepared_data.tokenizer.__name__}"},
- },
- },
- },
- }
- )
-
- info = NestedDict(
- {
- prepared_data.mir_arch: {
- prepared_data.mir_series: {
- prepared_data.mir_comp: {
- "repo": prepared_data.repo_path,
- "pkg": {"transformers": prepared_data.model_name},
- "tokenizer": {f"info.encoder.tokenizer.{prepared_data.mir_comp}"},
- }
- }
- }
- }
- )
- self.info = token_info | info
- print(f"added {prepared_data}")
+
+if __name__ == "__main__":
+ HarvestClasses()
diff --git a/mir/generate/transformers/raw_data.py b/mir/generate/transformers/raw_data.py
index 39cce6d..fdbe5dd 100644
--- a/mir/generate/transformers/raw_data.py
+++ b/mir/generate/transformers/raw_data.py
@@ -3,7 +3,7 @@
from dataclasses import dataclass, field
-from typing import Callable
+from typing import Callable, Any
@dataclass
@@ -12,13 +12,11 @@ class PrepareData:
name: str
model: Callable
- config: Callable
+ config: type
repo_path: str
config_params: dict[str, list[str]]
- model_params: dict[str, list[str]] | None = None
- mir_arch: str = field(init=False)
- mir_series: str = field(init=False)
- mir_comp: str = field(init=False)
+ model_params: dict[str, list[str]] | None = field(init=True, default_factory=lambda: {"": [""]})
+ tasks: list[str] = field(init=False, default_factory=lambda: [""])
def __post_init__(self) -> None:
"""Initializes the PrepareData instance by setting derived attributes."""
@@ -26,12 +24,10 @@ def __post_init__(self) -> None:
self.model_name: str = self.model.__name__.split(".")[-1]
if tokenizer := TOKENIZER_MAPPING.get(self.config, None):
- self.tokenizer = tokenizer
- self.tokenizer_pkg: dict[str, str] | None = {"transformers": f"{self.tokenizer.__module__}.{self.tokenizer.__name__}"}
+ self.tokenizer: tuple[type[Any] | None, type[Any] | None] = tokenizer
if internal_name := REVERSE_MAP.get(self.config):
self.internal_name = internal_name
self.model_to_tasks()
- self.mir_tag_from_config()
def model_to_tasks(self) -> None:
"""Transform a single model class into derivative classes for specific tasks.\n
@@ -41,26 +37,10 @@ def model_to_tasks(self) -> None:
import_path = Path(self.model.__module__).stem
parent_module = import_module(import_path)
-
+ self.tasks = []
if hasattr(parent_module, "__all__") and parent_module.__name__ != "DummyPipe":
- self.task_classes = parent_module.__all__
+ for module in parent_module.__all__:
+ if (module.lower() != module) and (module != self.model_name) and (module != self.config.__name__):
+ self.tasks.append(module)
else:
- self.task_classes = [self.model.__name__]
-
- def mir_tag_from_config(self) -> None:
- """Generates MIR series and component tags based on the configuration class.\n
- :return: Tuple containing MIR series, component, and suffix tags."""
-
- from mir.generate.from_module import to_domain_tag
- from mir.tag import tag_model_from_repo
-
- mir_prefix = to_domain_tag(transformers=True, **self.config_params)
- if not mir_prefix:
- if self.model_params:
- if mir_prefix := to_domain_tag(transformers=True, **self.model_params):
- pass
- raise ValueError(f"Unable to determine MIR prefix from {self}")
- else:
- raise ValueError(f"Unrecognized model type, no tag matched {self.name} with {self.config_params} or {self.model_params}")
- self.mir_arch = mir_prefix
- self.mir_series, self.mir_comp = tag_model_from_repo(self.repo_path)
+ self.tasks = [self.model.__name__]
diff --git a/mir/generate/transformers/tokenizers.py b/mir/generate/transformers/tokenizers.py
deleted file mode 100644
index a395d0e..0000000
--- a/mir/generate/transformers/tokenizers.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-
-import re
-from importlib import import_module
-
-from mir.generate.transformers import TOKENIZER_MAPPING
-from mir.maid import MIRDatabase
-from mir.spec import mir_entry
-
-
-def tag_tokenizers(config_class: Callable):
- tokenizer_class = TOKENIZER_MAPPING[config_class] # type: ignore
- if tokenizer_class:
- { "pkg":{"transformers": f"{tokenizer_class.__module__}.{tokenizer_class.__name__}"})
- if tk_pkg:
- mir_data.get("info.encoder.tokenizer", mir_data.setdefault("info.encoder.tokenizer", {})).update(
- {
- mir_suffix: {
- "pkg": tk_pkg,
- }
- },
- )
- return tokenizer_class
diff --git a/mir/maid.py b/mir/maid.py
index acdf86c..ef7ba78 100644
--- a/mir/maid.py
+++ b/mir/maid.py
@@ -9,34 +9,67 @@
from mir import MIR_PATH_NAMED
from mir.json_io import read_json_file, write_json_file
+from mir.tag import MIRTag
class MIRDatabase:
"""Machine Intelligence Resource Database Object
Database search and read/write operations"""
- def __init__(self, database: dict | None = None) -> None:
+ def __init__(self, db: dict | None = None) -> None:
+ from chanfig import NestedDict
from json.decoder import JSONDecodeError
from mir import DBUQ
- if not database:
- self.database = {"expected": "data"}
+ if not db:
+ self.db = NestedDict()
try:
self.read_from_disk()
except JSONDecodeError as error_log:
DBUQ(error_log)
- self.database = {}
+ self.db = NestedDict()
- def add(self, resource: dict[str, Any]) -> None:
- """Merge pre-existing MIR entries, or add new ones
- :param resource: Entry to apply
+ def add_tag(self, mir_tag: MIRTag):
+ """Add or update entry to MIR Database
+ :param prepared_data: An instance of PrepareData to convert into tags
"""
- parent_key = next(iter(resource))
- if self.database is not None:
- if self.database.get(parent_key, 0):
- self.database[parent_key] = self.database[parent_key] | resource[parent_key]
+ from chanfig import NestedDict
+
+ library = mir_tag.pkg.split(".")[0]
+ pkg = {library: (mir_tag.pkg,)}
+ if hasattr(mir_tag.data, "tokenizer") and mir_tag.data.tokenizer:
+ info = NestedDict({f"info.encoder.tokenizer.{mir_tag.series}": {mir_tag.tokenizer_pkg}})
+ self._update_data(self.db, info)
+ pkg = pkg | {"tokenizer": f"info.encoder.tokenizer.{mir_tag.series}"}
+ if hasattr(mir_tag, "comp"):
+ info = NestedDict({f"info.{mir_tag.arch}.{mir_tag.series}{mir_tag.comp}": pkg})
+ else:
+ info = NestedDict({f"info.{mir_tag.arch}.{mir_tag.series}": pkg})
+ self._update_data(self.db, info)
+
+ self.db = NestedDict(self.db)
+
+ def _update_data(self, target, source):
+ """Recursively merges `source` into `target` without overwriting nested dictionaries entirely."""
+
+ for key, value in source.items():
+ if isinstance(value, dict) and key in target and isinstance(target[key], dict):
+ self._update_data(target[key], value)
+ else:
+ # Update only if key doesn't exist or value is not a dict to avoid overwriting
+ if key not in target or not isinstance(target[key], dict):
+ target.setdefault(key, value)
+
+ # Handle cases where source might have non-dict values that should update target's non-dict values
+ for key in target:
+ if key not in source and isinstance(target[key], dict):
+ continue
+ elif key not in source and not isinstance(target[key], dict):
+ # If key exists in target but not in source and is not a dict, ensure it's preserved
+ pass
else:
- self.database[parent_key] = resource[parent_key]
+ # Additional logic if needed for specific conditions
+ pass
def write_to_disk(self, data: Optional[dict] = None) -> None: # pylint:disable=unused-argument
"""Save data to JSON file\n"""
@@ -50,10 +83,10 @@ def write_to_disk(self, data: Optional[dict] = None) -> None: # pylint:disable=
# except (FileNotFoundError, OSError) as error_log:
# nfo(f"MIR file not found before write, regenerating... {error_log}")
- write_json_file(os.path.dirname(MIR_PATH_NAMED), file_name="mir.json", data=self.database, mode=mode)
+ write_json_file(os.path.dirname(MIR_PATH_NAMED), file_name="mir.json", data=self.db, mode=mode)
written_data = self.read_from_disk()
NFO(f"Wrote {len(written_data)} lines to MIR database file.")
- self.database = written_data
+ self.db = written_data
def read_from_disk(self, data: Optional[dict] = None) -> dict[str, Any]:
"""Populate mir database\n
@@ -61,10 +94,10 @@ def read_from_disk(self, data: Optional[dict] = None) -> dict[str, Any]:
:return: dict of MIR data"""
if not os.path.exists(MIR_PATH_NAMED):
self.write_to_disk({})
- return self.database
+ return self.db
else:
- self.database = read_json_file(MIR_PATH_NAMED)
- return self.database
+ self.db = read_json_file(MIR_PATH_NAMED)
+ return self.db
def _stage_maybes(self, maybe_match: str, target: str, series: str, compatibility: str) -> list[str | bool]:
"""Process a single value for matching against the target\n
@@ -154,7 +187,7 @@ def find_tag(self, field: str, target: str, sub_field: Optional[str] = None, dom
target = re.sub(parameters, "", target)
self.matches = []
- for series, comp in self.database.items():
+ for series, comp in self.db.items():
if (not domain) or series.startswith(domain):
for compatibility, fields in comp.items():
if maybe_match := fields.get(field):
@@ -169,5 +202,5 @@ def find_tag(self, field: str, target: str, sub_field: Optional[str] = None, dom
if best_match := self.grade_maybes(self.matches, target):
return best_match
else:
- NFO(f"Query '{target}' not found when {len(self.database)}'{field}' options searched\n")
+ NFO(f"Query '{target}' not found when {len(self.db)}'{field}' options searched\n")
return None
diff --git a/mir/tag.py b/mir/tag.py
index 38b8929..495040e 100644
--- a/mir/tag.py
+++ b/mir/tag.py
@@ -1,43 +1,106 @@
# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
#
-from typing import Any
-
-
-def tag_model_from_repo(repo_title: str, decoder=False) -> tuple[str, Any]:
- """Create a mir label from a repo path\n
- :param mir_prefix: Known period-separated prefix and model type
- :param repo_path: Typical remote source repo path, A URL without domain
- :return: The assembled mir tag with compatibility pre-separated"""
- import re
- from mir import PARAMETERS, BREAKING
-
- root = "decoder" if decoder else "*"
- repo_title = repo_title.split(":latest")[0]
- repo_title = repo_title.split(":Q")[0]
- repo_title = repo_title.split(r"/")[-1].lower()
- pattern = r"^.*[v]?(\d{1}+\.\d).*"
- match = re.findall(pattern, repo_title)
- if match:
- if next(iter(match)):
- repo_title = repo_title.replace(next(iter(match))[-1], "")
- parts = repo_title.replace(".", "").split("-")
- if len(parts) == 1:
- parts = repo_title.split("_")
- subtraction_prefixes = r"\d.b-|\-rl|tiny|large|mlx|onnx|gguf|medium|base|multimodal|mini|instruct|full|:latest|preview|small|pro|beta|hybrid|plus|dpo|community"
-
- pattern_2 = re.compile(PARAMETERS)
- clean_parts = [re.sub(pattern_2, "", segment.lower()) for segment in parts]
- cleaned_string = "-".join([x for x in clean_parts if x])
- cleaned_string = re.sub(subtraction_prefixes, "", cleaned_string)
- cleaned_string = re.sub("-it", "", cleaned_string.replace("-bit", "")).replace("--", "-")
- cleaned_string = cleaned_string.replace("-b-", "")
- # print(cleaned_string)
- suffix_match = re.findall(BREAKING, cleaned_string) # Check for breaking suffixes first
- if suffix_match:
- suffix = next(iter(suffix for suffix in suffix_match[0] if suffix))
- cleaned_string = re.sub(suffix.lower(), "-", cleaned_string).rstrip("-,")
- else:
- suffix = root
- cleaned_string = re.sub(r"[._]+", "-", cleaned_string.lower()).strip("-_")
- return (cleaned_string, suffix)
+from dataclasses import dataclass, field
+from typing import Callable
+
+from mir.generate.transformers.raw_data import PrepareData
+
+
+@dataclass
+class MIRTag:
+ """Represents a MIR tag associated with a specific domain and model data.\n
+
+ Attributes:\n
+ domain: The domain of the MIR tag.
+ prepared_data: Object containing prepared model data.
+ arch: The architecture component of the MIR tag (generated).
+ series: The series component of the MIR tag (generated).
+ pkg Package information associated with the MIR tag (generated).
+ tokenizer_pkg Dependency package information associated with the MIR tag (generated).
+ """
+
+ domain: str
+ data: PrepareData
+ arch: str = field(init=False)
+ series: str = field(init=False)
+ pkg: str = field(default_factory=str)
+ tokenizer_pkg: str = field(default_factory=str)
+
+ def __post_init__(self) -> None:
+ """Initializes MIRTag instance, setting up database connection and generating package and MIR tag information."""
+ from mir.maid import MIRDatabase
+
+ self.mir_db = MIRDatabase()
+ self.pkg = self.generate_pkg(pkg=self.data.model)
+ if hasattr(self.data, "tokenizer") and self.data.tokenizer:
+ self.tokenizer_pkg = self.generate_pkg(pkg=self.data.tokenizer) # type:ignore
+ self.generate_arch()
+ self.generate_series_and_comp(repo_title=self.data.repo_path)
+
+ def generate_pkg(self, pkg: Callable) -> str:
+ """Generates package information for the MIR tag based on class.
+ :param pkg: A class object (model, tokenizer, etc) to build a tag from"""
+
+ return f"{pkg.__module__}.{pkg.__name__}"
+
+ def generate_arch(self) -> None:
+ """Generates the architecture part of the MIR tag based on prepared data.\n
+ :raises ValueError: If no suitable tag can be determined."""
+ from mir.generate.from_module import to_domain_tag
+
+ library = self.pkg.split(".")[0]
+ arch = to_domain_tag(library, **self.data.config_params)
+ if not arch:
+ if self.data.model_params:
+ if arch := to_domain_tag(library, **self.data.model_params):
+ pass
+ raise ValueError(f"Unable to determine MIR prefix from {self}")
+ else:
+ raise ValueError(
+ f"Unrecognized model type, \
+ no tag matched {self.data.name} \
+ with {self.data.config_params} or {self.data.model_params}",
+ )
+ self.arch = arch
+
+ def generate_series_and_comp(self, repo_title: str, decoder=False) -> None:
+ """Generates the MIR tag components from a repository title.\n
+ :param repo_title: The title of the repository from which to derive the MIR tag.
+ :param decoder: Boolean flag indicating if the model is a decoder.
+ :return: A tuple containing the cleaned tag string and suffix."""
+
+ import re
+
+ from mir import BREAKING, PARAMETERS
+
+ root = "decoder" if decoder else "*"
+ repo_title = repo_title.split(":latest")[0]
+ repo_title = repo_title.split(":Q")[0]
+ repo_title = repo_title.split(r"/")[-1].lower()
+ pattern = r"^.*[v]?(\d{1}+\.\d).*"
+ match = re.findall(pattern, repo_title)
+ if match:
+ if next(iter(match)):
+ repo_title = repo_title.replace(next(iter(match))[-1], "")
+ parts = repo_title.replace(".", "").split("-")
+ if len(parts) == 1:
+ parts = repo_title.split("_")
+ subtraction_prefixes = r"\d.b-|\-rl|tiny|large|mlx|onnx|gguf|medium|base|multimodal|mini|instruct|full|:latest|preview|small|pro|beta|hybrid|plus|dpo|community"
+
+ pattern_2 = re.compile(PARAMETERS)
+ clean_parts = [re.sub(pattern_2, "", segment.lower()) for segment in parts]
+ cleaned_string = "-".join([x for x in clean_parts if x])
+ cleaned_string = re.sub(subtraction_prefixes, "", cleaned_string)
+ cleaned_string = re.sub("-it", "", cleaned_string.replace("-bit", "")).replace("--", "-")
+ cleaned_string = cleaned_string.replace("-b-", "")
+ suffix_match = re.findall(BREAKING, cleaned_string) # Check for breaking suffixes first
+ if suffix_match:
+ suffix = next(iter(suffix for suffix in suffix_match[0] if suffix))
+ cleaned_string = re.sub(suffix.lower(), "-", cleaned_string).rstrip("-,")
+ else:
+ suffix = root
+ cleaned_string = re.sub(r"[.-]+", "_", cleaned_string.lower()).strip("-_")
+ self.series = cleaned_string
+ if suffix != "*":
+ self.comp = suffix
From 7f7bb0b3f67e77629c91a8497be00c77df28f5ff Mon Sep 17 00:00:00 2001
From: exdysa <91800957+exdysa@users.noreply.github.com>
Date: Sat, 17 Jan 2026 15:46:35 -0500
Subject: [PATCH 08/16] ~transformers complete
---
mir/__init__.py | 4 +-
mir/framework.py | 114 ++++++++++++++++++
mir/generate/transformers/harvest.py | 14 ++-
mir/maid.py | 47 ++------
mir/package.py | 0
mir/tag.py | 36 ++----
tests/{ => old}/test_class_parent.py | 0
tests/{ => old}/test_deconstructors_root.py | 0
tests/{ => old}/test_doc_parser.py | 0
tests/{ => old}/test_find_docstring_run.py | 0
.../test_gather_diffusers_metadata.py | 0
tests/{ => old}/test_json_io.py | 0
tests/{ => old}/test_mir_db_create_restore.py | 0
tests/{ => old}/test_mir_merge.py | 0
tests/{ => old}/test_mir_search.py | 0
tests/{ => old}/test_mir_tagging.py | 0
tests/{ => old}/test_regex_constants.py | 0
tests/{ => old}/test_resolve_code_names.py | 0
tests/{ => old}/test_seek_class.py | 0
tests/{ => old}/test_task.py | 0
tests/{ => old}/test_taskanalyzer.py | 0
tests/test_mir_generate.py | 21 ++++
22 files changed, 170 insertions(+), 66 deletions(-)
create mode 100644 mir/framework.py
create mode 100644 mir/package.py
rename tests/{ => old}/test_class_parent.py (100%)
rename tests/{ => old}/test_deconstructors_root.py (100%)
rename tests/{ => old}/test_doc_parser.py (100%)
rename tests/{ => old}/test_find_docstring_run.py (100%)
rename tests/{ => old}/test_gather_diffusers_metadata.py (100%)
rename tests/{ => old}/test_json_io.py (100%)
rename tests/{ => old}/test_mir_db_create_restore.py (100%)
rename tests/{ => old}/test_mir_merge.py (100%)
rename tests/{ => old}/test_mir_search.py (100%)
rename tests/{ => old}/test_mir_tagging.py (100%)
rename tests/{ => old}/test_regex_constants.py (100%)
rename tests/{ => old}/test_resolve_code_names.py (100%)
rename tests/{ => old}/test_seek_class.py (100%)
rename tests/{ => old}/test_task.py (100%)
rename tests/{ => old}/test_taskanalyzer.py (100%)
create mode 100644 tests/test_mir_generate.py
diff --git a/mir/__init__.py b/mir/__init__.py
index c688644..4e83592 100644
--- a/mir/__init__.py
+++ b/mir/__init__.py
@@ -22,8 +22,8 @@
IGNORE = read_json_file(os.path.join(ROOT_PATH, "spec", "regex.json"))["ignore"]
-tag_name = lambda path: path.rsplit(".", 1)
-mir_run = lambda parts: getattr(import_module(parts[0]), parts[1])
+tag = lambda path: path.rsplit(".", 1)
+run = lambda parts: getattr(import_module(parts[0]), parts[1])
Mir = HarvestClasses().db.db
diff --git a/mir/framework.py b/mir/framework.py
new file mode 100644
index 0000000..077f7ca
--- /dev/null
+++ b/mir/framework.py
@@ -0,0 +1,114 @@
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
+
+from typing import Any, Callable
+from dataclasses import dataclass, field
+from mir.generate.transformers.raw_data import PrepareData
+from mir.tag import MIRTag
+
+
+@dataclass
+class MIRPackage:
+ data: Callable | str | dict[str, str]
+ library: str = field(init=False, default_factory=str)
+ package: dict[str, dict[str, str]] = field(init=False, default_factory=dict[str, dict[str, str]])
+ framework: dict[str, dict[str, str]] = field(init=False, default_factory=dict[str, dict[str, str]])
+
+ def __init__(self):
+ pass
+
+ def __call__(self, data: Callable | str | dict[str, str]):
+ self.data = data
+ if isinstance(self.data, Callable):
+ self.generate_package()
+
+ def generate_package(self) -> None:
+ """Generates package information for the MIR tag based on class.
+ :param pkg: A class object (model, tokenizer, etc) to build a tag from"""
+ self.domain = "ops"
+ module_path = self.data.__module__
+ self.library = module_path.split(".")[0]
+ self.package: dict[str, dict[str, str]] = {self.library: {"model": f"{module_path}.{self.data.__name__}"}}
+
+ def add_framework(self, framework_data) -> None:
+ self.domain = "info"
+ self.framework = {self.library: framework_data}
+
+
+class MIRNesting:
+ """Build tag components from the extracted data\n
+ :param mir_tag: An instance of MIR tag with the necessary information
+ :param name: Identification string to store data underneath
+ :param mir_package: Instance of MIRPackage to store inside the nested dict
+ :param prepared_data: Instance of PrepareData to attribute the final information
+ :returns: The final, assembled MIR tag"""
+
+ loops: list[str]
+ framework_data: dict[str, str | dict[str, Any]] = {}
+ repo: str | None = field(default_factory=str | None)
+ framework: dict[str, str] = field(init=False)
+ tokenizer: str | None = field(default_factory=str)
+
+ def __init__(self, mir_tag: MIRTag) -> None:
+ self.mir_tag = mir_tag
+ self.loops = []
+ self.framework_data = {}
+
+ def __call__(self, mir_package: MIRPackage, prepared_data: PrepareData | None = None):
+ if hasattr(mir_package, "library"):
+ self.library = mir_package.library
+ if prepared_data:
+ self.framework_data.setdefault("repo", prepared_data.repo_path)
+ if hasattr(mir_package, "tokenizer"):
+ name = "tokenizer"
+ self.package = mir_package.package
+ self.nest_data(
+ name=name,
+ domain=mir_package.domain,
+ arch="encoder",
+ series="tokenizer",
+ comp=self.mir_tag.series,
+ )
+ self.framework_data.setdefault("tokenizer", f"{mir_package.domain}.encoder.tokenizer.{self.mir_tag.series}")
+ else:
+ data = f"{mir_package.domain}.{self.mir_tag.arch}.{self.mir_tag.series}"
+ if comp := getattr(self.mir_tag, "comp", None):
+ self.framework_data.setdefault("model", data + comp)
+ else:
+ self.framework_data.setdefault("model", data)
+
+ if hasattr(mir_package, "framework"):
+ name = "framework"
+ self.package = mir_package.framework
+ else:
+ name = "model"
+ self.package = mir_package.package
+ if hasattr(prepared_data, "tasks") and prepared_data.tasks:
+ self.package[mir_package.library].setdefault("tasks", prepared_data.tasks)
+ self.nest_data(
+ name=name,
+ domain=mir_package.domain,
+ arch=self.mir_tag.arch,
+ series=self.mir_tag.series,
+ comp=comp,
+ )
+ self.loops.append(name)
+
+ def nest_data(self, name: str, domain: str, arch: str, series: str, comp: str | None = None) -> None:
+ from chanfig import NestedDict
+
+ if comp:
+ nest = NestedDict({f"{domain}.{arch}.{series}": {comp: ""}})
+ nest[domain][arch][series] = self.package
+ else:
+ nest = NestedDict({f"{domain}.{arch}": {series: ""}})
+ nest[domain][arch][series] = self.package
+ setattr(self, name, nest)
+
+
+# data[domain][arch][series] = pkg_data
+# if tag_data.comp:
+# data[tag_datadomain][arch][series][comp_tag] = pkg_data
+# self.generate_pkg("pkg", self.raw_data.model)
+# self.generate_pkg("tokenizer_pkg", self.raw_data.tokenizer)
+# framework: dict[str,FrameworkBundle]
diff --git a/mir/generate/transformers/harvest.py b/mir/generate/transformers/harvest.py
index c36cd92..0e07255 100644
--- a/mir/generate/transformers/harvest.py
+++ b/mir/generate/transformers/harvest.py
@@ -3,6 +3,7 @@
from typing import Any, Callable
+from mir.framework import MIRNesting, MIRPackage
from mir.generate.transformers.raw_data import PrepareData
from mir.tag import MIRTag
@@ -29,8 +30,17 @@ def find_transformers_classes(self) -> None:
if config_data := self.extract_config_class_data(config_class):
if model_data := self.extract_model_class_data(model_class):
if prepared_data := PrepareData(**config_data, **model_data): # type:ignore
- mir_tag = MIRTag("info", prepared_data)
- self.db.add_tag(mir_tag)
+ mir_tag = MIRTag(prepared_data)
+ mir_package = MIRPackage()
+ mir_nest = MIRNesting(mir_tag)
+ mir_package(data=prepared_data.model)
+ mir_nest(mir_package, prepared_data)
+ if hasattr(prepared_data, "tokenizer") and prepared_data.tokenizer:
+ mir_package(data=prepared_data.tokenizer)
+ mir_nest(mir_package)
+ mir_package.add_framework(mir_nest.framework_data)
+ mir_nest(mir_package)
+ self.db.add_data(mir_nest, *mir_nest.loops)
def extract_config_class_data(self, config_class: Callable) -> dict[str, str | Callable | dict[str, Any]] | None:
"""Extracts information from config classes.\n
diff --git a/mir/maid.py b/mir/maid.py
index ef7ba78..5f0111d 100644
--- a/mir/maid.py
+++ b/mir/maid.py
@@ -8,13 +8,14 @@
from typing import Any, List, Optional
from mir import MIR_PATH_NAMED
+from mir.framework import MIRNesting
from mir.json_io import read_json_file, write_json_file
from mir.tag import MIRTag
class MIRDatabase:
- """Machine Intelligence Resource Database Object
- Database search and read/write operations"""
+ """Machine Intelligence Resource database object\n
+ Database query and read/write operations"""
def __init__(self, db: dict | None = None) -> None:
from chanfig import NestedDict
@@ -29,48 +30,24 @@ def __init__(self, db: dict | None = None) -> None:
DBUQ(error_log)
self.db = NestedDict()
- def add_tag(self, mir_tag: MIRTag):
- """Add or update entry to MIR Database
- :param prepared_data: An instance of PrepareData to convert into tags
- """
+ def add_data(self, mir_nest: MIRNesting, *args) -> None:
+ """Add entry to MIR Database\n
+ :param mir_tag: An instance of MIRTag to be added to the database"""
from chanfig import NestedDict
- library = mir_tag.pkg.split(".")[0]
- pkg = {library: (mir_tag.pkg,)}
- if hasattr(mir_tag.data, "tokenizer") and mir_tag.data.tokenizer:
- info = NestedDict({f"info.encoder.tokenizer.{mir_tag.series}": {mir_tag.tokenizer_pkg}})
- self._update_data(self.db, info)
- pkg = pkg | {"tokenizer": f"info.encoder.tokenizer.{mir_tag.series}"}
- if hasattr(mir_tag, "comp"):
- info = NestedDict({f"info.{mir_tag.arch}.{mir_tag.series}{mir_tag.comp}": pkg})
- else:
- info = NestedDict({f"info.{mir_tag.arch}.{mir_tag.series}": pkg})
- self._update_data(self.db, info)
-
+ for nested_tag in args:
+ self._include_data(self.db, getattr(mir_nest, nested_tag))
self.db = NestedDict(self.db)
- def _update_data(self, target, source):
- """Recursively merges `source` into `target` without overwriting nested dictionaries entirely."""
-
+ def _include_data(self, target: dict[str, Any], source: dict[str, Any]):
+ """Recursively merges `source` into `target` without overwriting nested dictionaries or their entries."""
for key, value in source.items():
- if isinstance(value, dict) and key in target and isinstance(target[key], dict):
- self._update_data(target[key], value)
+ if isinstance(value, dict) and key in target and isinstance(target[key], dict): # 递归 recurse
+ self._include_data(target[key], value)
else:
- # Update only if key doesn't exist or value is not a dict to avoid overwriting
if key not in target or not isinstance(target[key], dict):
target.setdefault(key, value)
- # Handle cases where source might have non-dict values that should update target's non-dict values
- for key in target:
- if key not in source and isinstance(target[key], dict):
- continue
- elif key not in source and not isinstance(target[key], dict):
- # If key exists in target but not in source and is not a dict, ensure it's preserved
- pass
- else:
- # Additional logic if needed for specific conditions
- pass
-
def write_to_disk(self, data: Optional[dict] = None) -> None: # pylint:disable=unused-argument
"""Save data to JSON file\n"""
diff --git a/mir/package.py b/mir/package.py
new file mode 100644
index 0000000..e69de29
diff --git a/mir/tag.py b/mir/tag.py
index 495040e..c31266f 100644
--- a/mir/tag.py
+++ b/mir/tag.py
@@ -2,7 +2,6 @@
#
from dataclasses import dataclass, field
-from typing import Callable
from mir.generate.transformers.raw_data import PrepareData
@@ -12,55 +11,38 @@ class MIRTag:
"""Represents a MIR tag associated with a specific domain and model data.\n
Attributes:\n
- domain: The domain of the MIR tag.
prepared_data: Object containing prepared model data.
arch: The architecture component of the MIR tag (generated).
series: The series component of the MIR tag (generated).
- pkg Package information associated with the MIR tag (generated).
- tokenizer_pkg Dependency package information associated with the MIR tag (generated).
+ comp The compatibility component of the MIR tag (generated, optional).
"""
- domain: str
- data: PrepareData
+ raw_data: PrepareData
arch: str = field(init=False)
series: str = field(init=False)
- pkg: str = field(default_factory=str)
- tokenizer_pkg: str = field(default_factory=str)
def __post_init__(self) -> None:
"""Initializes MIRTag instance, setting up database connection and generating package and MIR tag information."""
- from mir.maid import MIRDatabase
-
- self.mir_db = MIRDatabase()
- self.pkg = self.generate_pkg(pkg=self.data.model)
- if hasattr(self.data, "tokenizer") and self.data.tokenizer:
- self.tokenizer_pkg = self.generate_pkg(pkg=self.data.tokenizer) # type:ignore
self.generate_arch()
- self.generate_series_and_comp(repo_title=self.data.repo_path)
-
- def generate_pkg(self, pkg: Callable) -> str:
- """Generates package information for the MIR tag based on class.
- :param pkg: A class object (model, tokenizer, etc) to build a tag from"""
-
- return f"{pkg.__module__}.{pkg.__name__}"
+ self.generate_series_and_comp(repo_title=self.raw_data.repo_path)
def generate_arch(self) -> None:
"""Generates the architecture part of the MIR tag based on prepared data.\n
:raises ValueError: If no suitable tag can be determined."""
from mir.generate.from_module import to_domain_tag
- library = self.pkg.split(".")[0]
- arch = to_domain_tag(library, **self.data.config_params)
+ library = self.raw_data.model.__module__.split(".")[0]
+ arch = to_domain_tag(library, **self.raw_data.config_params)
if not arch:
- if self.data.model_params:
- if arch := to_domain_tag(library, **self.data.model_params):
+ if self.raw_data.model_params:
+ if arch := to_domain_tag(library, **self.raw_data.model_params):
pass
raise ValueError(f"Unable to determine MIR prefix from {self}")
else:
raise ValueError(
f"Unrecognized model type, \
- no tag matched {self.data.name} \
- with {self.data.config_params} or {self.data.model_params}",
+ no tag matched {self.raw_data.name} \
+ with {self.raw_data.config_params} or {self.raw_data.model_params}",
)
self.arch = arch
diff --git a/tests/test_class_parent.py b/tests/old/test_class_parent.py
similarity index 100%
rename from tests/test_class_parent.py
rename to tests/old/test_class_parent.py
diff --git a/tests/test_deconstructors_root.py b/tests/old/test_deconstructors_root.py
similarity index 100%
rename from tests/test_deconstructors_root.py
rename to tests/old/test_deconstructors_root.py
diff --git a/tests/test_doc_parser.py b/tests/old/test_doc_parser.py
similarity index 100%
rename from tests/test_doc_parser.py
rename to tests/old/test_doc_parser.py
diff --git a/tests/test_find_docstring_run.py b/tests/old/test_find_docstring_run.py
similarity index 100%
rename from tests/test_find_docstring_run.py
rename to tests/old/test_find_docstring_run.py
diff --git a/tests/test_gather_diffusers_metadata.py b/tests/old/test_gather_diffusers_metadata.py
similarity index 100%
rename from tests/test_gather_diffusers_metadata.py
rename to tests/old/test_gather_diffusers_metadata.py
diff --git a/tests/test_json_io.py b/tests/old/test_json_io.py
similarity index 100%
rename from tests/test_json_io.py
rename to tests/old/test_json_io.py
diff --git a/tests/test_mir_db_create_restore.py b/tests/old/test_mir_db_create_restore.py
similarity index 100%
rename from tests/test_mir_db_create_restore.py
rename to tests/old/test_mir_db_create_restore.py
diff --git a/tests/test_mir_merge.py b/tests/old/test_mir_merge.py
similarity index 100%
rename from tests/test_mir_merge.py
rename to tests/old/test_mir_merge.py
diff --git a/tests/test_mir_search.py b/tests/old/test_mir_search.py
similarity index 100%
rename from tests/test_mir_search.py
rename to tests/old/test_mir_search.py
diff --git a/tests/test_mir_tagging.py b/tests/old/test_mir_tagging.py
similarity index 100%
rename from tests/test_mir_tagging.py
rename to tests/old/test_mir_tagging.py
diff --git a/tests/test_regex_constants.py b/tests/old/test_regex_constants.py
similarity index 100%
rename from tests/test_regex_constants.py
rename to tests/old/test_regex_constants.py
diff --git a/tests/test_resolve_code_names.py b/tests/old/test_resolve_code_names.py
similarity index 100%
rename from tests/test_resolve_code_names.py
rename to tests/old/test_resolve_code_names.py
diff --git a/tests/test_seek_class.py b/tests/old/test_seek_class.py
similarity index 100%
rename from tests/test_seek_class.py
rename to tests/old/test_seek_class.py
diff --git a/tests/test_task.py b/tests/old/test_task.py
similarity index 100%
rename from tests/test_task.py
rename to tests/old/test_task.py
diff --git a/tests/test_taskanalyzer.py b/tests/old/test_taskanalyzer.py
similarity index 100%
rename from tests/test_taskanalyzer.py
rename to tests/old/test_taskanalyzer.py
diff --git a/tests/test_mir_generate.py b/tests/test_mir_generate.py
new file mode 100644
index 0000000..8ceacd0
--- /dev/null
+++ b/tests/test_mir_generate.py
@@ -0,0 +1,21 @@
+def test_info_key_exists_and_library_is_not_nested():
+ from mir import Mir
+
+ print(Mir.info.cnn.yolos)
+ result = Mir.info.cnn.yolos["transformers"] # should not throw
+ assert result == "ops.cnn.yolos"
+
+
+def test_ops_key_exists_and_library_is_not_tested():
+ from mir import Mir
+
+ print(Mir.ops.cnn.yolos)
+ result = Mir.ops.cnn.yolos["transformers"] # should not throw
+ assert result["model"] == "transformers.models.yolos.modeling_yolos.YolosModel"
+ expected_tasks = [
+ "YolosPreTrainedModel",
+ "YolosForObjectDetection",
+ "YolosImageProcessorFast",
+ "YolosImageProcessor",
+ ]
+ assert all(task in result["tasks"] for task in expected_tasks)
From 474426bc892ccf71c06dfb793a9fd755b63cc3ca Mon Sep 17 00:00:00 2001
From: exdysa <91800957+exdysa@users.noreply.github.com>
Date: Sat, 17 Jan 2026 17:59:21 -0500
Subject: [PATCH 09/16] ~patched_tokenizers
---
mir/__init__.py | 4 -
mir/__main__.py | 21 ++++
mir/framework.py | 133 +++++++++++++-------------
mir/generate/transformers/harvest.py | 9 +-
mir/generate/transformers/raw_data.py | 1 +
tests/test_mir_generate.py | 9 +-
6 files changed, 103 insertions(+), 74 deletions(-)
create mode 100644 mir/__main__.py
diff --git a/mir/__init__.py b/mir/__init__.py
index 4e83592..ba063fb 100644
--- a/mir/__init__.py
+++ b/mir/__init__.py
@@ -22,8 +22,4 @@
IGNORE = read_json_file(os.path.join(ROOT_PATH, "spec", "regex.json"))["ignore"]
-tag = lambda path: path.rsplit(".", 1)
-run = lambda parts: getattr(import_module(parts[0]), parts[1])
-
-
Mir = HarvestClasses().db.db
diff --git a/mir/__main__.py b/mir/__main__.py
new file mode 100644
index 0000000..4e892d4
--- /dev/null
+++ b/mir/__main__.py
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
+
+from typing import Callable
+from importlib import import_module
+
+tag = lambda path: path.rsplit(".", 1) # noqa
+run = lambda parts: getattr(import_module(parts[0]), parts[1])
+
+
+def get_attribute_chain(root_object: Callable, attribute_path: str):
+ """Retrieve a nested attribute from *root_object* using a dot‑separated string.\n
+ :param root_object : The object from which the attribute chain will be resolved.
+ :param attribute_path : Dot‑separated attribute names, e.g. ``"ops.cnn.yolos"``.
+ :returns: The final attribute value reached by following the chain.
+ :raises: AttributeError If any part of the chain does not exist on the current object."""
+
+ current = root_object
+ for part in attribute_path.split("."):
+ current = getattr(current, part)
+ return current
diff --git a/mir/framework.py b/mir/framework.py
index 077f7ca..aa0a440 100644
--- a/mir/framework.py
+++ b/mir/framework.py
@@ -10,36 +10,29 @@
@dataclass
class MIRPackage:
data: Callable | str | dict[str, str]
- library: str = field(init=False, default_factory=str)
- package: dict[str, dict[str, str]] = field(init=False, default_factory=dict[str, dict[str, str]])
- framework: dict[str, dict[str, str]] = field(init=False, default_factory=dict[str, dict[str, str]])
+ package: dict[str, str] = field(init=False, default_factory=dict[str, str])
- def __init__(self):
- pass
-
- def __call__(self, data: Callable | str | dict[str, str]):
+ def __init__(self, data: Callable | str | dict[str, str]):
+ self.package = {}
self.data = data
- if isinstance(self.data, Callable):
+ if not isinstance(self.data, dict):
self.generate_package()
def generate_package(self) -> None:
"""Generates package information for the MIR tag based on class.
:param pkg: A class object (model, tokenizer, etc) to build a tag from"""
self.domain = "ops"
- module_path = self.data.__module__
- self.library = module_path.split(".")[0]
- self.package: dict[str, dict[str, str]] = {self.library: {"model": f"{module_path}.{self.data.__name__}"}}
+ model = f"{self.data.__module__}.{self.data.__name__}"
+ self.package: dict[str, str] = {"model": model}
def add_framework(self, framework_data) -> None:
self.domain = "info"
- self.framework = {self.library: framework_data}
+ self.package = framework_data
class MIRNesting:
"""Build tag components from the extracted data\n
:param mir_tag: An instance of MIR tag with the necessary information
- :param name: Identification string to store data underneath
- :param mir_package: Instance of MIRPackage to store inside the nested dict
:param prepared_data: Instance of PrepareData to attribute the final information
:returns: The final, assembled MIR tag"""
@@ -49,66 +42,78 @@ class MIRNesting:
framework: dict[str, str] = field(init=False)
tokenizer: str | None = field(default_factory=str)
- def __init__(self, mir_tag: MIRTag) -> None:
+ def __init__(self, mir_tag: MIRTag, prepared_data: PrepareData) -> None:
+ """\nInitialize the framework with MIR tag and prepared data.\n
+ :param mir_tag : The MIR tag instance.
+ :param prepared_data : The prepared data for processing."""
self.mir_tag = mir_tag
+
+ self.prepared_data = prepared_data
self.loops = []
self.framework_data = {}
- def __call__(self, mir_package: MIRPackage, prepared_data: PrepareData | None = None):
- if hasattr(mir_package, "library"):
- self.library = mir_package.library
- if prepared_data:
- self.framework_data.setdefault("repo", prepared_data.repo_path)
- if hasattr(mir_package, "tokenizer"):
- name = "tokenizer"
- self.package = mir_package.package
- self.nest_data(
- name=name,
- domain=mir_package.domain,
- arch="encoder",
- series="tokenizer",
- comp=self.mir_tag.series,
- )
- self.framework_data.setdefault("tokenizer", f"{mir_package.domain}.encoder.tokenizer.{self.mir_tag.series}")
- else:
- data = f"{mir_package.domain}.{self.mir_tag.arch}.{self.mir_tag.series}"
+ def __call__(self, mir_package: MIRPackage) -> None:
+ """Dispatches a MIRPackage to the appropriate handler based on its domain.
+ :param mir_package: An instance of MIRPackage with the requisite data to tag"""
+
+ if (mir_package.domain == "ops" and
+ hasattr(self.prepared_data, "tokenizer") and
+ self.prepared_data.tokenizer and self.loops== ['model']):
+ self._process("tokenizer", mir_package)
+ elif mir_package.domain == "ops":
+ self._process("model", mir_package)
+ elif mir_package.domain == "info":
+ self._process("framework", mir_package)
+
+ def _process(self, name: str, mir_package: MIRPackage) -> None:
+ """Common routine for handling a package: store tag data, nest the package,
+ and record the name of the newly-created attribute.\n
+ :param name: Identification string to store data underneath
+ :param mir_package: An instance of MIRPackage with the requisite data"""
+
+ is_framework = name == "framework"
+ is_model = name == "model"
+
+
+ if is_framework:
+ package_data = {self.prepared_data.library: mir_package.package}
+ tag_data = f"{mir_package.domain}.{self.mir_tag.arch}.{self.mir_tag.series}"
+ if comp := getattr(self.mir_tag, "comp", None):
+ tag_data += comp
+ self.framework_data.setdefault("repo", self.prepared_data.repo_path)
+ elif is_model:
+ package_data = {self.prepared_data.library: mir_package.package}
+ if hasattr(self.prepared_data, "tasks") and self.prepared_data.tasks:
+ package_data[self.prepared_data.library].setdefault("tasks", self.prepared_data.tasks)
+ tag_data = f"{mir_package.domain}.{self.mir_tag.arch}.{self.mir_tag.series}"
if comp := getattr(self.mir_tag, "comp", None):
- self.framework_data.setdefault("model", data + comp)
- else:
- self.framework_data.setdefault("model", data)
-
- if hasattr(mir_package, "framework"):
- name = "framework"
- self.package = mir_package.framework
- else:
- name = "model"
- self.package = mir_package.package
- if hasattr(prepared_data, "tasks") and prepared_data.tasks:
- self.package[mir_package.library].setdefault("tasks", prepared_data.tasks)
- self.nest_data(
- name=name,
- domain=mir_package.domain,
- arch=self.mir_tag.arch,
- series=self.mir_tag.series,
- comp=comp,
- )
+ tag_data += comp
+ self.framework_data.setdefault(name, tag_data)
+ else: # tokenizer case
+ package_data = {self.prepared_data.library: mir_package.package}
+ tag_data = f"{mir_package.domain}.encoder.tokenizer.{self.mir_tag.series}"
+ self.framework_data.setdefault(name, tag_data)
+
+ self.nest_data(name=name, tag_data=tag_data, package_data=package_data)
self.loops.append(name)
- def nest_data(self, name: str, domain: str, arch: str, series: str, comp: str | None = None) -> None:
+ def nest_data(self, name: str, tag_data: str, package_data: dict) -> None:
+ """Nest data into a hierarchical attribute structure.\n
+ :param name: Attribute name to store the nested data
+ :param tag_data: Dotted path string for nesting
+ :param package_data: Data to be stored in the nested structure"""
+
from chanfig import NestedDict
- if comp:
+ tag_parts = tuple(x for x in tag_data.split("."))
+
+ if len(tag_parts) ==4:
+ domain, arch, series, comp = tag_parts
nest = NestedDict({f"{domain}.{arch}.{series}": {comp: ""}})
- nest[domain][arch][series] = self.package
+ nest[domain][arch][series][comp] = package_data
else:
+ domain, arch, series = tag_parts
nest = NestedDict({f"{domain}.{arch}": {series: ""}})
- nest[domain][arch][series] = self.package
- setattr(self, name, nest)
-
+ nest[domain][arch][series] = package_data
-# data[domain][arch][series] = pkg_data
-# if tag_data.comp:
-# data[tag_datadomain][arch][series][comp_tag] = pkg_data
-# self.generate_pkg("pkg", self.raw_data.model)
-# self.generate_pkg("tokenizer_pkg", self.raw_data.tokenizer)
-# framework: dict[str,FrameworkBundle]
+ setattr(self, name, nest)
diff --git a/mir/generate/transformers/harvest.py b/mir/generate/transformers/harvest.py
index 0e07255..235bf76 100644
--- a/mir/generate/transformers/harvest.py
+++ b/mir/generate/transformers/harvest.py
@@ -31,12 +31,11 @@ def find_transformers_classes(self) -> None:
if model_data := self.extract_model_class_data(model_class):
if prepared_data := PrepareData(**config_data, **model_data): # type:ignore
mir_tag = MIRTag(prepared_data)
- mir_package = MIRPackage()
- mir_nest = MIRNesting(mir_tag)
- mir_package(data=prepared_data.model)
- mir_nest(mir_package, prepared_data)
+ mir_nest = MIRNesting(mir_tag, prepared_data)
+ mir_package = MIRPackage(data=prepared_data.model)
+ mir_nest(mir_package)
if hasattr(prepared_data, "tokenizer") and prepared_data.tokenizer:
- mir_package(data=prepared_data.tokenizer)
+ mir_package = MIRPackage(data=prepared_data.tokenizer)
mir_nest(mir_package)
mir_package.add_framework(mir_nest.framework_data)
mir_nest(mir_package)
diff --git a/mir/generate/transformers/raw_data.py b/mir/generate/transformers/raw_data.py
index fdbe5dd..0664c45 100644
--- a/mir/generate/transformers/raw_data.py
+++ b/mir/generate/transformers/raw_data.py
@@ -27,6 +27,7 @@ def __post_init__(self) -> None:
self.tokenizer: tuple[type[Any] | None, type[Any] | None] = tokenizer
if internal_name := REVERSE_MAP.get(self.config):
self.internal_name = internal_name
+ self.library = self.model.__module__.split(".")[0]
self.model_to_tasks()
def model_to_tasks(self) -> None:
diff --git a/tests/test_mir_generate.py b/tests/test_mir_generate.py
index 8ceacd0..211790d 100644
--- a/tests/test_mir_generate.py
+++ b/tests/test_mir_generate.py
@@ -3,7 +3,7 @@ def test_info_key_exists_and_library_is_not_nested():
print(Mir.info.cnn.yolos)
result = Mir.info.cnn.yolos["transformers"] # should not throw
- assert result == "ops.cnn.yolos"
+ assert result == {"repo": "hustvl/yolos-base", "model": "ops.cnn.yolos"}
def test_ops_key_exists_and_library_is_not_tested():
@@ -19,3 +19,10 @@ def test_ops_key_exists_and_library_is_not_tested():
"YolosImageProcessor",
]
assert all(task in result["tasks"] for task in expected_tasks)
+
+
+def test_ops_tokenizer_created():
+ from mir import Mir
+
+ result = Mir.ops.encoder.tokenizer.zamba2['transformers']
+ assert result == {"model": "transformers.models.llama.tokenization_llama.LlamaTokenizer"}
From bdf37ec419dfd034a854e6e928b0888d01b63bc5 Mon Sep 17 00:00:00 2001
From: exdysa <91800957+exdysa@users.noreply.github.com>
Date: Sat, 17 Jan 2026 18:07:21 -0500
Subject: [PATCH 10/16] ~more elegant solution
---
mir/framework.py | 2 ++
mir/generate/transformers/harvest.py | 37 +++++++++++++++-------------
2 files changed, 22 insertions(+), 17 deletions(-)
diff --git a/mir/framework.py b/mir/framework.py
index aa0a440..d6e402b 100644
--- a/mir/framework.py
+++ b/mir/framework.py
@@ -17,6 +17,8 @@ def __init__(self, data: Callable | str | dict[str, str]):
self.data = data
if not isinstance(self.data, dict):
self.generate_package()
+ else:
+ self.add_framework(self.data)
def generate_package(self) -> None:
"""Generates package information for the MIR tag based on class.
diff --git a/mir/generate/transformers/harvest.py b/mir/generate/transformers/harvest.py
index 235bf76..d1fb779 100644
--- a/mir/generate/transformers/harvest.py
+++ b/mir/generate/transformers/harvest.py
@@ -22,24 +22,27 @@ def find_transformers_classes(self) -> None:
:return: List of PrepareData entries representing the transformer classes."""
from mir.generate.transformers import AUTO_MAP
- model_data = []
- for pair_map in AUTO_MAP.items():
- config_class, model_class = pair_map # type:ignore
+ for config_class, model_class in AUTO_MAP.items():
if isinstance(model_class, tuple):
- model_class: Callable = model_class[0]
- if config_data := self.extract_config_class_data(config_class):
- if model_data := self.extract_model_class_data(model_class):
- if prepared_data := PrepareData(**config_data, **model_data): # type:ignore
- mir_tag = MIRTag(prepared_data)
- mir_nest = MIRNesting(mir_tag, prepared_data)
- mir_package = MIRPackage(data=prepared_data.model)
- mir_nest(mir_package)
- if hasattr(prepared_data, "tokenizer") and prepared_data.tokenizer:
- mir_package = MIRPackage(data=prepared_data.tokenizer)
- mir_nest(mir_package)
- mir_package.add_framework(mir_nest.framework_data)
- mir_nest(mir_package)
- self.db.add_data(mir_nest, *mir_nest.loops)
+ model_class = model_class[0]
+ if not (config_data := self.extract_config_class_data(config_class)):
+ continue
+ if not (model_data := self.extract_model_class_data(model_class)):
+ continue
+ if not (prepared_data := PrepareData(**config_data, **model_data)): # type:ignore
+ continue
+
+ mir_tag = MIRTag(prepared_data)
+ mir_nest = MIRNesting(mir_tag, prepared_data)
+ packages = [MIRPackage(data=prepared_data.model)]
+ if hasattr(prepared_data, "tokenizer") and prepared_data.tokenizer:
+ packages.append(MIRPackage(data=prepared_data.tokenizer))
+ packages.append(MIRPackage(data=mir_nest.framework_data))
+ for pkg in packages:
+ mir_nest(pkg)
+
+
+ self.db.add_data(mir_nest, *mir_nest.loops)
def extract_config_class_data(self, config_class: Callable) -> dict[str, str | Callable | dict[str, Any]] | None:
"""Extracts information from config classes.\n
From 714f59d0cf872fe3c75c8167fbbcbf259de2ddcb Mon Sep 17 00:00:00 2001
From: exdysa <91800957+exdysa@users.noreply.github.com>
Date: Sat, 17 Jan 2026 23:18:43 -0500
Subject: [PATCH 11/16] ~noodle
---
mir/data/exclusions.json | 3 +-
mir/data/migrations.json | 12 ++--
mir/framework.py | 12 ++--
mir/generate/diffusers/__init__.py | 26 +------
mir/generate/diffusers/doc_parse.py | 92 +++++++++---------------
mir/generate/diffusers/harvest.py | 103 ++++++++++++++++++++++++++
mir/generate/diffusers/index.py | 27 ++-----
mir/generate/diffusers/raw_data.py | 59 +++++++++++++++
mir/generate/tasks.py | 104 ++++-----------------------
mir/generate/transformers/harvest.py | 8 +--
mir/tag.py | 61 ++++++++++++----
tests/subclasses_test.py | 0
12 files changed, 279 insertions(+), 228 deletions(-)
create mode 100644 mir/generate/diffusers/harvest.py
create mode 100644 mir/generate/diffusers/raw_data.py
create mode 100644 tests/subclasses_test.py
diff --git a/mir/data/exclusions.json b/mir/data/exclusions.json
index e386bb2..e35cfed 100644
--- a/mir/data/exclusions.json
+++ b/mir/data/exclusions.json
@@ -27,6 +27,7 @@
"text_to_video_synthesis",
"unclip",
"unidiffuser",
- "controlnet_hunyuandit"
+ "controlnet_hunyuandit",
+ "pipeline_stable_diffusion_xl_inpaint"
]
}
\ No newline at end of file
diff --git a/mir/data/migrations.json b/mir/data/migrations.json
index 5bc9929..755664f 100644
--- a/mir/data/migrations.json
+++ b/mir/data/migrations.json
@@ -46,13 +46,9 @@
"TimmWrapperConfig": "timm/resnet18.a1_in1k",
"VisionTextDualEncoderConfig": "hakuhodo-tech/japanese-clip-vit-h-14-bert-wider"
},
- "module": {
- "blip_diffusion": "blip_diffusion",
- "cogvideo": "cogvideox",
- "cogview3": "cogview3plus",
- "deepfloyd_if": "if",
- "cosmos": "cosmos2_text2image",
- "visualcloze": "visualcloze_generation",
- "marigold": "marigold_depth"
+ "migrated_pipes": {
+ "StableDiffusion3Pipeline": "stabilityai/stable-diffusion-3.5-medium",
+ "HunyuanDiTPipeline": "tencent-hunyuan/hunyuandiT-v1.2-diffusers",
+ "ChromaPipeline": "lodestones/Chroma"
}
}
\ No newline at end of file
diff --git a/mir/framework.py b/mir/framework.py
index d6e402b..fd5d2d4 100644
--- a/mir/framework.py
+++ b/mir/framework.py
@@ -3,6 +3,7 @@
from typing import Any, Callable
from dataclasses import dataclass, field
+from mir.generate.diffusers.raw_data import DPrepareData
from mir.generate.transformers.raw_data import PrepareData
from mir.tag import MIRTag
@@ -12,7 +13,7 @@ class MIRPackage:
data: Callable | str | dict[str, str]
package: dict[str, str] = field(init=False, default_factory=dict[str, str])
- def __init__(self, data: Callable | str | dict[str, str]):
+ def __init__(self, data: Callable | str | dict[str, str] | dict[str, Any]):
self.package = {}
self.data = data
if not isinstance(self.data, dict):
@@ -44,7 +45,7 @@ class MIRNesting:
framework: dict[str, str] = field(init=False)
tokenizer: str | None = field(default_factory=str)
- def __init__(self, mir_tag: MIRTag, prepared_data: PrepareData) -> None:
+ def __init__(self, mir_tag: MIRTag, prepared_data: PrepareData | DPrepareData) -> None:
"""\nInitialize the framework with MIR tag and prepared data.\n
:param mir_tag : The MIR tag instance.
:param prepared_data : The prepared data for processing."""
@@ -58,9 +59,7 @@ def __call__(self, mir_package: MIRPackage) -> None:
"""Dispatches a MIRPackage to the appropriate handler based on its domain.
:param mir_package: An instance of MIRPackage with the requisite data to tag"""
- if (mir_package.domain == "ops" and
- hasattr(self.prepared_data, "tokenizer") and
- self.prepared_data.tokenizer and self.loops== ['model']):
+ if mir_package.domain == "ops" and hasattr(self.prepared_data, "tokenizer") and self.prepared_data.tokenizer and self.loops == ["model"]: # type: ignore
self._process("tokenizer", mir_package)
elif mir_package.domain == "ops":
self._process("model", mir_package)
@@ -76,7 +75,6 @@ def _process(self, name: str, mir_package: MIRPackage) -> None:
is_framework = name == "framework"
is_model = name == "model"
-
if is_framework:
package_data = {self.prepared_data.library: mir_package.package}
tag_data = f"{mir_package.domain}.{self.mir_tag.arch}.{self.mir_tag.series}"
@@ -109,7 +107,7 @@ def nest_data(self, name: str, tag_data: str, package_data: dict) -> None:
tag_parts = tuple(x for x in tag_data.split("."))
- if len(tag_parts) ==4:
+ if len(tag_parts) == 4:
domain, arch, series, comp = tag_parts
nest = NestedDict({f"{domain}.{arch}.{series}": {comp: ""}})
nest[domain][arch][series][comp] = package_data
diff --git a/mir/generate/diffusers/__init__.py b/mir/generate/diffusers/__init__.py
index 2f50daa..c19bcec 100644
--- a/mir/generate/diffusers/__init__.py
+++ b/mir/generate/diffusers/__init__.py
@@ -1,31 +1,7 @@
# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
#
-
-from dataclasses import dataclass
from typing import Callable
+from dataclasses import dataclass, field
from diffusers.pipelines import _import_structure as IMPORT_STRUCTURE
from diffusers.pipelines.auto_pipeline import SUPPORTED_TASKS_MAPPINGS, _get_task_class as GET_TASK_CLASS
-
-
-@dataclass
-class DocStringEntry:
- """Represents a structured entry of package name, file name, and docstring."""
-
- package_name: str
- doc_string: str
- file_name: str
- pipe_module: Callable
-
-
-class DocParseData:
- pipe_class: str
- pipe_repo: str
- staged_class: str | None = None
- staged_repo: str | None = None
-
- def __init__(self, pipe_class: str, pipe_repo: str, staged_class: str | None = None, staged_repo: str | None = None):
- self.pipe_class = pipe_class
- self.pipe_repo = pipe_repo
- self.staged_class = staged_class
- self.staged_repo = staged_repo
diff --git a/mir/generate/diffusers/doc_parse.py b/mir/generate/diffusers/doc_parse.py
index 18e091b..0e70ba3 100644
--- a/mir/generate/diffusers/doc_parse.py
+++ b/mir/generate/diffusers/doc_parse.py
@@ -1,11 +1,9 @@
# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
#
-from typing import List, Optional, Tuple
-
+from typing import List, Optional, Callable
from pydantic import BaseModel, field_validator
from mir import NFO
-from mir.generate.diffusers import DocParseData
from mir.data import PIPE_MARKERS
@@ -40,84 +38,69 @@ def validate_repo_path(repo_path: Optional[str], segment: str) -> Optional[str]:
return None
return repo_path
- @staticmethod
- def validate_pipe_class(pipe_class: Optional[str]) -> bool:
- """Validate that a pipe class name is present.\n
- :param pipe_class: Pipe class name to validate
- :returns: True if class name is valid, False otherwise
- """
- return pipe_class is not None and pipe_class.strip() != ""
-
class DocStringParser(BaseModel):
doc_string: str
+ model: Callable
@field_validator("doc_string")
def normalize_doc(cls, docs: str) -> str:
return DocStringValidator.normalize_doc_string(docs)
- def doc_match(self, prefix_set: List[str] | None = None):
- if prefix_set is None:
- prefix_set = PIPE_MARKERS["pipe_variables"]
- candidate = None
- staged = None
- prior_candidate = ""
- for prefix in prefix_set:
- candidate = self.doc_string.partition(prefix)[2]
- prior_candidate = self.doc_string.partition(prefix)[0]
- if candidate:
- staged = candidate if any(call_method in candidate for call_method in PIPE_MARKERS["staged_call_methods"]) else None
- break
-
- return candidate, prior_candidate, staged
+ def __init__(self, doc_string, model) -> None:
+ self.doc_string = doc_string
+ self.model = model
- def parse(self) -> DocParseData | None:
- candidate, prior_candidate, staged = self.doc_match(PIPE_MARKERS["pipe_prefixes"])
+ def __post_init__(self) -> dict[str, str] | None:
+ candidate, prior_candidate, staged = self.doc_match(PIPE_MARKERS["pipe_variables"])
if candidate:
- pipe_class, pipe_repo = self._extract_class_and_repo(
+ pipe_repo = self._extract_class_and_repo(
segment=candidate,
- call_methods=PIPE_MARKERS["call_types"],
+ call_methods=PIPE_MARKERS["call_methods"],
prior_text=prior_candidate,
)
motion_adapter = "motion_adapter" in candidate or "adapter" in candidate
if motion_adapter and pipe_repo:
- staged, prior_candidate, _ = self.doc_match(PIPE_MARKERS["pipe_prefixes"][2:]) # skip the adapter statements
+ staged, prior_candidate, _ = self.doc_match(PIPE_MARKERS["pipe_variables"][2:]) # skip the adapter statements
- staged_class, staged_repo = (
+ staged_repo = (
self._extract_class_and_repo(
segment=staged,
- call_methods=PIPE_MARKERS["staged_call_types"] if not motion_adapter else PIPE_MARKERS["call_types"],
+ call_methods=PIPE_MARKERS["staged_call_methods"] if not motion_adapter else PIPE_MARKERS["call_methods"],
prior_text=prior_candidate,
- prior_class=pipe_class,
)
if staged
- else (None, None)
+ else None
)
- if motion_adapter and pipe_class and staged_class is not None:
- pipe_class = staged_class
- staged_repo = None
- staged_class = None
- if DocStringValidator.validate_pipe_class(pipe_class):
- # dbuq(f"class :{pipe_class}, repo : {pipe_repo}, staged_class: {staged_class}, staged_repo:{staged_repo} \n")
- return DocParseData(pipe_class=pipe_class, pipe_repo=pipe_repo, staged_class=staged_class, staged_repo=staged_repo)
+ self.pipe_repo = pipe_repo
+ self.staged_repo = staged_repo
+
+ def doc_match(self, prefix_set: List[str] | None = None):
+ if prefix_set is None:
+ prefix_set = PIPE_MARKERS["pipe_variables"]
+ candidate = None
+ staged = None
+ prior_candidate = ""
+ for prefix in prefix_set:
+ candidate = self.doc_string.partition(prefix)[2]
+ prior_candidate = self.doc_string.partition(prefix)[0]
+ if candidate:
+ staged = candidate if any(call_method in candidate for call_method in PIPE_MARKERS["staged_call_methods"]) else None
+ break
+
+ return candidate, prior_candidate, staged
def _extract_class_and_repo(
self,
segment: str,
call_methods: List[str],
prior_text: str,
- prior_class: Optional[str] = None,
- ) -> Tuple[Optional[str], Optional[str]]:
- pipe_class = None
+ ) -> str | None:
pipe_repo = None
for method_name in call_methods:
if method_name in segment:
- pipe_class = segment.partition(method_name)[0].strip().split("= ")[-1].split(".")[-1]
- if prior_class == pipe_class and prior_text.split(method_name)[-1].strip().replace(")", ""):
- pipe_class = prior_text.partition(method_name)[0].strip().split("= ")[-1]
- repo_segment = segment.partition(method_name)[2].partition(")")[0]
- else:
+ if not (repo_segment := segment.partition(method_name)[2].partition(")")[0]):
repo_segment = segment.partition(method_name)[2].partition(")")[0]
pipe_repo = repo_segment.replace("...", "").partition('",')[0].strip('" ')
if not DocStringValidator.is_valid_repo_path(pipe_repo):
@@ -126,11 +109,11 @@ def _extract_class_and_repo(
pipe_repo = self._resolve_variable(reference, prior_text)
break # Not empty!! 确保解析的路径不是空的!!
pipe_repo = DocStringValidator.validate_repo_path(pipe_repo, segment)
- return pipe_class, pipe_repo
+ return pipe_repo
- return pipe_class, pipe_repo
+ return pipe_repo
- def _resolve_variable(self, reference: str, prior_text: str) -> Optional[str]:
+ def _resolve_variable(self, reference: str, prior_text: str) -> str | None:
"""Try to find the variable from other lines / 尝试从其他行中找到它(例如,多行定义)"""
var_name = reference
search = f"{var_name} ="
@@ -156,8 +139,3 @@ def _resolve_variable(self, reference: str, prior_text: str) -> Optional[str]:
NFO(f"Warning: {search} not found in docstring.")
return None
-
-
-def parse_docs(doc_string: str) -> DocParseData | None:
- parser = DocStringParser(doc_string=doc_string)
- return parser.parse()
diff --git a/mir/generate/diffusers/harvest.py b/mir/generate/diffusers/harvest.py
new file mode 100644
index 0000000..6ed0d9b
--- /dev/null
+++ b/mir/generate/diffusers/harvest.py
@@ -0,0 +1,103 @@
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
+
+from importlib import import_module
+from pkgutil import walk_packages
+from inspect import getmro
+
+from mir.framework import MIRNesting
+from mir.generate.diffusers.raw_data import DPrepareData
+from mir.tag import MIRTag
+
+
+class HarvestClasses:
+ def __init__(self) -> None:
+ self.parsed_docs = []
+ pass
+
+ def find_diffusers_docstrings(self) -> None:
+ """Pull down docstrings from 🤗Diffusers pipelines, minimizing internet requests\n
+ :return: Docstrings for common diffusers models"""
+
+ self.extract_model_data()
+
+ def extract_model_data(self):
+ from mir.generate.diffusers import EXCLUSIONS
+ from mir.generate.tasks import TaskAnalyzer
+
+ subclasses = self.subclasses_of("diffusers", "DiffusionPipeline")
+ for path, class_obj in subclasses.items():
+ if path.rsplit(".", 1)[-1] in EXCLUSIONS["exclusion_list"].get("model_path", "."):
+ continue
+ base_path = path.rsplit(".", 1)[0]
+ model_path = import_module(base_path)
+ if doc_string := getattr(model_path, "EXAMPLE_DOC_STRING", None):
+ prepared_data = DPrepareData(doc_string=doc_string, model=class_obj, model_path=base_path)
+ mir_tag = MIRTag(prepared_data)
+ task_analysis = TaskAnalyzer()
+ mir_nest = MIRNesting(mir_tag, prepared_data)
+
+ def subclasses_of(self, package_name: str, base_class_name: str):
+ """
+ Return a dict mapping `.` → class object
+ for every class in `package_name` that subclasses a class named
+ `base_class_name`.
+
+ The implementation is intentionally defensive: it avoids
+ triggering `__getattr__` on lazy‑loaded submodules that might
+ raise a `RuntimeError`. Instead of `inspect.getmembers`, it
+ iterates over the module's `__dict__` which contains only
+ attributes that have already been imported.
+ """
+
+ results = {}
+ root_pkg = import_module(package_name)
+ for finder, mod_name, is_pkg in walk_packages(root_pkg.__path__, root_pkg.__name__ + "."):
+ try:
+ module = import_module(mod_name)
+ except (ImportError, ModuleNotFoundError, RuntimeError):
+ continue
+
+ # Iterate over all *already* imported members in the module
+ for name, obj in module.__dict__.items():
+ if not isinstance(obj, type):
+ continue
+ # Ensure the class is defined in this module, not imported
+ if obj.__module__ != mod_name:
+ continue
+ try:
+ bases = getmro(obj)[1:] # skip the class itself
+ except ValueError:
+ continue
+ for base in bases:
+ if base.__name__ == base_class_name:
+ fqcn = f"{mod_name}.{name}"
+ results[fqcn] = obj
+ break
+
+ return results
+
+ # def extract_model_data(self,pipe_name, file_name: str) -> dict | None:
+ # migrated_pipes = MIGRATIONS["migrated_pipes"]
+ # pkg_path = f"diffusers.pipelines.{pipe_name}.{file_name}"
+ # pipe_file: Callable = import_object_named(file_name, pkg_path) or import_module(pkg_path)
+ # if pipe_file and (doc_string := getattr(pipe_file, "EXAMPLE_DOC_STRING", None)): #where pipe class and repo are
+ # docstrings= DocStringEntry(package_name=pipe_name, file_name=file_name, pipe_module=pipe_file, doc_string=doc_string)
+ # DocStringParser(doc_string=docstrings.doc_string)
+ # self.parsed_docs.pipe_repo = migrated_pipes.get(self.parsed_docs.pipe_class, self.parsed_docs.pipe_repo)
+ # model = import_object_named(parsed_data.pipe_class, docstrings.pipe_module.__name__)
+ # model_data = show_init_fields_for(model,"diffusers")
+ # return {"model_params": model_data}
+
+
+# for pipe_name in IMPORT_STRUCTURE.keys():
+# if pipe_name not in exclusion_list and (import_name := getattr(diffusers_pipelines, str(pipe_name))):
+# file_specific = uncommon_naming.get(pipe_name, pipe_name)
+# file_names:list[str] = [getattr(import_name, "_import_structure", {})] or [f"pipeline_{file_specific}"]
+# for file_name in file_names:
+# if not file_name in exclusion_list or not (model_data := self.extract_model_data(pipe_name, file_name)):
+# continue
+# if not (prepared_data := PrepareData( **model_data)):
+# continue
+# else:
+# continue
diff --git a/mir/generate/diffusers/index.py b/mir/generate/diffusers/index.py
index 06628e8..852fc24 100644
--- a/mir/generate/diffusers/index.py
+++ b/mir/generate/diffusers/index.py
@@ -7,11 +7,9 @@
from mir import DBUQ, NFO
from mir.data import EXCLUSIONS
-from mir.generate.diffusers import GET_TASK_CLASS, IMPORT_STRUCTURE, SUPPORTED_TASKS_MAPPINGS, DocParseData, DocStringEntry
-from mir.generate.diffusers.doc_parse import parse_docs
+from mir.generate.diffusers import GET_TASK_CLASS, IMPORT_STRUCTURE, SUPPORTED_TASKS_MAPPINGS
from mir.generate.from_module import import_object_named, show_init_fields_for, to_domain_tag
from mir.generate.indexers import migrations
-from mir.tag import tag_model_from_repo
def retrieve_diffusers_docstrings(
@@ -128,25 +126,6 @@ def find_diffusers_docstrings() -> Generator[list[DocStringEntry]]:
continue
-def show_diffusers_tasks(code_name: str, class_name: str | None = None) -> list[str]:
- """Return Diffusers task pipes based on package-specific query\n
- :param class_name: To find task pipes from a Diffusers class pipe, defaults to None
- :param code_name: To find task pipes from a Transformers class pipe, defaults to None
- :return: A list of alternate class pipelines derived from the specified class"""
-
- alt_tasks = set()
- for task_map in SUPPORTED_TASKS_MAPPINGS:
- task_class = GET_TASK_CLASS(task_map, class_name, False)
- if task_class:
- alt_tasks.add(task_class.__name__)
- DBUQ(task_class)
- for model_code, pipe_class_obj in task_map.items():
- if code_name in model_code:
- alt_tasks.add(pipe_class_obj.__name__)
-
- return list(alt_tasks)
-
-
def diffusers_index() -> dict[str, dict[str, dict[str, Any]]]:
"""Generate diffusion model data for MIR index\n
:return: Dictionary ready to be applied to MIR data fields
@@ -160,7 +139,9 @@ def diffusers_index() -> dict[str, dict[str, dict[str, Any]]]:
"HunyuanDiTPipeline": "tencent-hunyuan/hunyuandiT-v1.2-diffusers", # NOT hyd .ckpt
"ChromaPipeline": "lodestones/Chroma",
}
-
+ for class_name, swap_repo in special_classes.items():
+ if parsed_data.pipe_class == class_name:
+ parsed_data.pipe_repo = swap_repo
extracted_docstrings = find_diffusers_docstrings()
model_info = [extract for pipeline in extracted_docstrings for extract in pipeline]
pipe_data = {} # pipeline_stable_diffusion_xl_inpaint
diff --git a/mir/generate/diffusers/raw_data.py b/mir/generate/diffusers/raw_data.py
new file mode 100644
index 0000000..3e37836
--- /dev/null
+++ b/mir/generate/diffusers/raw_data.py
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
+
+
+from dataclasses import dataclass, field
+from typing import Callable, get_type_hints
+
+
+@dataclass
+class DPrepareData:
+ name: str
+ doc_string: str
+ model: Callable
+ model_path: str
+ repo_path: str = field(init=False, default_factory=str)
+ model_name: str = field(init=False, default_factory=str)
+ staged_repo: str | None = field(init=False, default_factory=str | None)
+ tasks: list[str] = field(init=False, default_factory=lambda: [""])
+
+ def __init__(self, **kwargs) -> None:
+ for key, value in kwargs.items():
+ setattr(self, key, value)
+
+ def __post_init__(self) -> None:
+ from mir.data import MIGRATIONS
+ from mir.generate.diffusers.doc_parse import DocStringParser
+ from mir.generate.from_module import show_init_fields_for
+
+ self.model_name = self.model.__name__
+ self.library = self.model.__module__.split(".", 1)[0]
+ self.model_params = show_init_fields_for(self.model, "diffusers")
+ self.type_params = get_type_hints(self.model.__init__)
+ doc_parser = DocStringParser(self.doc_string, self.model)
+ if repo_path := MIGRATIONS["migrated_pipes"].get(self.model.__name__, False):
+ self.repo_path = repo_path
+ else:
+ if repo_path := doc_parser.pipe_repo:
+ self.repo_path = repo_path
+ if staged_repo := doc_parser.staged_repo:
+ self.staged_repo = staged_repo
+
+ def show_diffusers_tasks(self) -> list[str]:
+ """Return Diffusers task pipes based on package-specific query\n
+ :param class_name: To find task pipes from a Diffusers class pipe, defaults to None
+ :param code_name: To find task pipes from a Transformers class pipe, defaults to None
+ :return: A list of alternate class pipelines derived from the specified class"""
+ from mir.generate.diffusers import SUPPORTED_TASKS_MAPPINGS, GET_TASK_CLASS
+
+ alt_tasks = set()
+ internal_name = self.model_path.rsplit(".", 2)[-2]
+ for task_map in SUPPORTED_TASKS_MAPPINGS:
+ task_class = GET_TASK_CLASS(task_map, self.model, False)
+ if task_class:
+ alt_tasks.add(task_class.__name__)
+ for model_code, pipe_class_obj in task_map.items():
+ if internal_name in model_code:
+ alt_tasks.add(pipe_class_obj.__name__)
+
+ return list(alt_tasks)
diff --git a/mir/generate/tasks.py b/mir/generate/tasks.py
index 5da5834..b037bdf 100644
--- a/mir/generate/tasks.py
+++ b/mir/generate/tasks.py
@@ -3,12 +3,11 @@
from typing import Any, Callable, List, get_type_hints
-from mir.generate.from_module import get_internal_name_for, import_object_named
-from mir.generate.transformers.index import show_transformers_tasks
from mir.maid import MIRDatabase
from mir.generate.diffusers.index import show_diffusers_tasks
from mir.generate.diffusers.schedulers import tag_scheduler
from mir import DBUQ
+from mir.tag import MIRTag
flatten_map: List[Any] = lambda nested, unpack: [element for iterative in getattr(nested, unpack)() for element in iterative]
flatten_map.__annotations__ = {"nested": List[str], "unpack": str}
@@ -29,36 +28,7 @@ def __init__(self) -> None:
self.skip_types = ["int", "bool", "float", "Optional", "NoneType", "List", "UNet2DConditionModel"]
self.mflux_tasks = ["Image", "Redux", "Kontext", "Depth", "Fill", "ConceptAttention", "ControlNet", "CavTon", "IC-Edit"]
- async def detect_tasks(self, mir_db: MIRDatabase, field_name: str = "pkg") -> dict:
- """Detects and traces tasks MIR data\n
- :param mir_db:: An instance of MIRDatabase containing the database of information.
- :type mir_db: MIRDatabase
- :param field_name: The name of the field in compatibility data to process for task detection, defaults to "pkg".
- :type field_name: str, optional
- :return: A dictionary mapping series names to their respective compatibility and traced tasks.
- :rtype: dict"""
-
- data_tuple = []
- for series, compatibility_data in mir_db.db.items():
- if (
- series.startswith("info.") # formatting comment
- and not any(tag for tag in self.skip_series if series.startswith(tag))
- and not any(tag for tag in self.skip_classes if tag in series)
- ):
- for compatibility, field_data in compatibility_data.items():
- if field_data and field_data.get(field_name, {}).get("0"):
- tasks_for_class = {"tasks": []}
- for _, pkg_tree in field_data[field_name].items():
- detected_tasks = await self.trace_tasks(pkg_tree=pkg_tree)
- if detected_tasks:
- for task in detected_tasks:
- if task not in tasks_for_class["tasks"]:
- tasks_for_class["tasks"].append(task)
- data_tuple.append((*series.rsplit(".", 1), {compatibility: tasks_for_class}))
-
- return data_tuple
-
- async def detect_pipes(self, mir_db: MIRDatabase, field_name: str = "pkg") -> dict:
+ async def detect_pipes(self, mir_tag: MIRTag, model: Callable, type_params: dict) -> dict:
"""Detects and traces Pipes MIR data\n
:param mir_db:: An instance of MIRDatabase containing the database of information.
:type mir_db: MIRDatabase
@@ -68,29 +38,18 @@ async def detect_pipes(self, mir_db: MIRDatabase, field_name: str = "pkg") -> di
:rtype: dict"""
data_tuple = []
- for series, compatibility_data in mir_db.db.items():
- if (
- series.startswith("info.") # formatting comment
- and not any(series.startswith(tag) for tag in self.skip_series)
- and not any(tag for tag in self.skip_classes if tag in series)
- ):
- for compatibility, field_data in compatibility_data.items():
- if field_data and field_data.get(field_name, {}).get("0"):
- for _, pkg_tree in field_data[field_name].items():
- if pkg_tree and next(iter(pkg_tree)) == "diffusers":
- module_name = pkg_tree[next(iter(pkg_tree))]
- DBUQ(f"{module_name} pipe originator")
- class_obj = import_object_named(module_name, "diffusers")
- pipe_args = get_type_hints(class_obj.__init__)
- detected_pipe = await self.hyperlink_to_mir(pipe_args, series, mir_db)
- data_tuple.append((*series.rsplit(".", 1), {compatibility: detected_pipe}))
+ tasks = show_diffusers_tasks(code_name= class_name=model.__name__)
+ detected_pipe = await self.hyperlink_to_mir(type_params, mir_tag.series)
+ if hasattr(mir_tag, "comp") and mir_tag.comp:
+ data_tuple.append((*mir_tag.series, {mir_tag.comp: detected_pipe}))
+ else:
+ data_tuple.append(({mir_tag.series: detected_pipe}))
return data_tuple
- async def hyperlink_to_mir(self, pipe_args: dict, series: str, mir_db: MIRDatabase):
+ async def hyperlink_to_mir(self, pipe_args: dict, series: str):
"""Maps pipeline components to MIR tags/IDs based on class names and roles.\n
:param pipe_args: Dictionary of pipeline roles to their corresponding classes
- :param mir_db: MIRDatabase instance for querying tags/IDs
:return: Dictionary mapping pipeline roles to associated MIR tags/IDs"""
mir_tag: None | list[str] = None
@@ -108,23 +67,22 @@ async def hyperlink_to_mir(self, pipe_args: dict, series: str, mir_db: MIRDataba
mir_tag = None
class_name = union_class.__name__
if not any(segment for segment in self.skip_types if class_name == segment):
- mir_tag, class_name = await self.tag_class(pipe_class=union_class, pipe_role=pipe_role, series=series, mir_db=mir_db)
+ mir_tag, class_name = await self.tag_class(pipe_class=union_class, pipe_role=pipe_role, series=series)
# mir_tag = mir_db.find_tag(field="tasks", target=class_name)
# dbuq(f"{mir_tag} {class_name}")
detected_links["pipe_names"][pipe_role].append(mir_tag if mir_tag else class_name)
else:
- mir_tag, class_name = await self.tag_class(pipe_class=pipe_class, pipe_role=pipe_role, series=series, mir_db=mir_db)
+ mir_tag, class_name = await self.tag_class(pipe_class=pipe_class, pipe_role=pipe_role, series=series)
detected_links["pipe_names"][pipe_role] = mir_tag if mir_tag else [class_name]
mir_tag = None
class_name = None
return detected_links
- async def tag_class(self, pipe_class: Callable, pipe_role: str, series: str, mir_db: MIRDatabase) -> tuple[str | None]:
+ async def tag_class(self, pipe_class: Callable, pipe_role: str, series: str) -> tuple[str | None]:
"""Maps a class to MIR tags/IDs based on its name and role.\n
:param pipe_class: Class to be mapped
:param pipe_role: Role of the class in the pipeline
:param series: Series identifier for the component
- :param mir_db: MIRDatabase instance for querying tags/IDs
:return: Tuple containing MIR tag and class name"""
mir_tag = None
@@ -133,47 +91,11 @@ async def tag_class(self, pipe_class: Callable, pipe_role: str, series: str, mir
sub_field = pipe_class.__module__.split(".")[0]
scheduler_series, scheduler_comp = tag_scheduler(class_name)
mir_tag = [f"ops.scheduler.{scheduler_series}", scheduler_comp]
- if not mir_db.db.get(mir_tag[0], {}).get(mir_tag[1]):
- mir_tag = mir_db.find_tag(field="pkg", target=class_name, sub_field=sub_field, domain="ops.scheduler")
DBUQ(f"scheduler {mir_tag} {class_name} {sub_field} ")
elif pipe_role == "vae":
sub_field = pipe_class.__module__.split(".")[0]
mir_comp = series.rsplit(".", 1)[-1]
DBUQ(mir_comp)
- mir_tag = [mir_id for mir_id, comp_data in mir_db.db.items() if "info.vae" in mir_id and next(iter(comp_data)) == mir_comp]
- if mir_tag:
- mir_tag.append(mir_comp) # keep mir tag as single list
- elif class_name != "AutoencoderKL":
- DBUQ(pipe_class)
- mir_tag = mir_db.find_tag(field="pkg", target=class_name, sub_field=sub_field, domain="info.vae")
- DBUQ(f"vae {mir_tag} {class_name} {sub_field} ")
- else:
- mir_tag = mir_db.find_tag(field="tasks", target=class_name)
+ mir_tag = "info.vae"
return mir_tag, class_name
- async def trace_tasks(self, pkg_tree: dict[str, str | int | list[str | int]]) -> List[str]:
- """Trace tasks for a given MIR entry.\n
- :param entry: The object containing the model information.
- :return: A sorted list of tasks applicable to the model."""
-
- preformatted_task_data = None
- filtered_tasks = None
- snip_words: set[str] = {"load_tf_weights_in"}
- package_name = next(iter(pkg_tree))
- DBUQ(pkg_tree)
- class_name = pkg_tree[package_name]
- DBUQ(f"{package_name}, {class_name}")
- if class_name not in self.skip_auto:
- if isinstance(class_name, dict):
- class_name = next(iter(list(class_name)))
- if package_name == "transformers":
- preformatted_task_data = show_transformers_tasks(class_name=class_name)
- elif package_name == "diffusers":
- code_name = get_internal_name_for(class_name, package_name)
- preformatted_task_data = show_diffusers_tasks(code_name=code_name, class_name=class_name)
- preformatted_task_data.sort()
- elif package_name == "mflux":
- preformatted_task_data = self.mflux_tasks
- if preformatted_task_data:
- filtered_tasks = [task for task in preformatted_task_data for snip in snip_words if snip not in task]
- return filtered_tasks # package_name, class_name
diff --git a/mir/generate/transformers/harvest.py b/mir/generate/transformers/harvest.py
index d1fb779..7c33dc5 100644
--- a/mir/generate/transformers/harvest.py
+++ b/mir/generate/transformers/harvest.py
@@ -22,21 +22,21 @@ def find_transformers_classes(self) -> None:
:return: List of PrepareData entries representing the transformer classes."""
from mir.generate.transformers import AUTO_MAP
- for config_class, model_class in AUTO_MAP.items():
+ for config_class, model_class in AUTO_MAP.items(): #type: ignore
if isinstance(model_class, tuple):
- model_class = model_class[0]
+ model_class: Callable = model_class[0]
if not (config_data := self.extract_config_class_data(config_class)):
continue
if not (model_data := self.extract_model_class_data(model_class)):
continue
- if not (prepared_data := PrepareData(**config_data, **model_data)): # type:ignore
+ if not (prepared_data := PrepareData(**config_data, **model_data)): # type:ignore , _Lazyautomapping tuple
continue
mir_tag = MIRTag(prepared_data)
mir_nest = MIRNesting(mir_tag, prepared_data)
packages = [MIRPackage(data=prepared_data.model)]
if hasattr(prepared_data, "tokenizer") and prepared_data.tokenizer:
- packages.append(MIRPackage(data=prepared_data.tokenizer))
+ packages.append(MIRPackage(data=prepared_data.tokenizer)) #type: ignore , _Lazyautomapping tuple
packages.append(MIRPackage(data=mir_nest.framework_data))
for pkg in packages:
mir_nest(pkg)
diff --git a/mir/tag.py b/mir/tag.py
index c31266f..3df4200 100644
--- a/mir/tag.py
+++ b/mir/tag.py
@@ -4,6 +4,7 @@
from dataclasses import dataclass, field
from mir.generate.transformers.raw_data import PrepareData
+from mir.generate.diffusers.raw_data import DPrepareData
@dataclass
@@ -17,14 +18,15 @@ class MIRTag:
comp The compatibility component of the MIR tag (generated, optional).
"""
- raw_data: PrepareData
+ raw_data: PrepareData | DPrepareData
arch: str = field(init=False)
series: str = field(init=False)
+ decoder: bool = False
def __post_init__(self) -> None:
"""Initializes MIRTag instance, setting up database connection and generating package and MIR tag information."""
self.generate_arch()
- self.generate_series_and_comp(repo_title=self.raw_data.repo_path)
+ self.generate_series_and_comp(repo_path=self.raw_data.repo_path)
def generate_arch(self) -> None:
"""Generates the architecture part of the MIR tag based on prepared data.\n
@@ -32,7 +34,11 @@ def generate_arch(self) -> None:
from mir.generate.from_module import to_domain_tag
library = self.raw_data.model.__module__.split(".")[0]
- arch = to_domain_tag(library, **self.raw_data.config_params)
+ if hasattr(self.raw_data, "config_params"):
+ arch = to_domain_tag(library, **self.raw_data.config_params) # type: ignore
+ else:
+ arch = None
+ self.decoder = "decoder" in [self.raw_data.model_params]
if not arch:
if self.raw_data.model_params:
if arch := to_domain_tag(library, **self.raw_data.model_params):
@@ -42,11 +48,11 @@ def generate_arch(self) -> None:
raise ValueError(
f"Unrecognized model type, \
no tag matched {self.raw_data.name} \
- with {self.raw_data.config_params} or {self.raw_data.model_params}",
+ with {self.raw_data}",
)
self.arch = arch
- def generate_series_and_comp(self, repo_title: str, decoder=False) -> None:
+ def generate_series_and_comp(self, repo_path: str, decoder=decoder) -> None:
"""Generates the MIR tag components from a repository title.\n
:param repo_title: The title of the repository from which to derive the MIR tag.
:param decoder: Boolean flag indicating if the model is a decoder.
@@ -57,17 +63,17 @@ def generate_series_and_comp(self, repo_title: str, decoder=False) -> None:
from mir import BREAKING, PARAMETERS
root = "decoder" if decoder else "*"
- repo_title = repo_title.split(":latest")[0]
- repo_title = repo_title.split(":Q")[0]
- repo_title = repo_title.split(r"/")[-1].lower()
+ repo_path = repo_path.split(":latest")[0]
+ repo_path = repo_path.split(":Q")[0]
+ repo_path = repo_path.split(r"/")[-1].lower()
pattern = r"^.*[v]?(\d{1}+\.\d).*"
- match = re.findall(pattern, repo_title)
+ match = re.findall(pattern, repo_path)
if match:
if next(iter(match)):
- repo_title = repo_title.replace(next(iter(match))[-1], "")
- parts = repo_title.replace(".", "").split("-")
+ repo_path = repo_path.replace(next(iter(match))[-1], "")
+ parts = repo_path.replace(".", "").split("-")
if len(parts) == 1:
- parts = repo_title.split("_")
+ parts = repo_path.split("_")
subtraction_prefixes = r"\d.b-|\-rl|tiny|large|mlx|onnx|gguf|medium|base|multimodal|mini|instruct|full|:latest|preview|small|pro|beta|hybrid|plus|dpo|community"
pattern_2 = re.compile(PARAMETERS)
@@ -86,3 +92,34 @@ def generate_series_and_comp(self, repo_title: str, decoder=False) -> None:
self.series = cleaned_string
if suffix != "*":
self.comp = suffix
+
+ # def generate_pipe_tag(repo_path: str, class_name: str, model_class_obj: Callable | None = None) -> tuple[str, dict[str, dict[Any, Any]]]:
+ # """Create a pipeline article and generate corresponding information according to the provided repo path and pipeline category\n
+ # :param repo_path (str): Repository path.
+ # :param model_class_obj (str): The model class function
+ # :raises TypeError: If 'repo_path' or 'class_name' are not set.
+ # :return: Tuple: The data structure containing mir_series and mir_comp is used for subsequent processing.
+ # """
+ # import diffusers # pyright: ignore[reportMissingImports] # pylint:disable=redefined-outer-name
+
+ # if hasattr(diffusers, class_name):
+ # model_class_obj = getattr(diffusers, class_name)
+ # sub_segments = show_init_fields_for(model_class_obj, "diffusers")
+
+ # else:
+ # mir_prefix = to_domain_tag(**sub_segments)
+ # if mir_prefix is None and class_name not in ["AutoPipelineForImage2Image", "DiffusionPipeline"]:
+ # NFO(f"Failed to detect type for {class_name} {list(sub_segments)}\n")
+ # else:
+ # mir_prefix = "info." + mir_prefix
+
+ # mir_series, mir_comp = list(tag_model_from_repo(repo_path, decoder))
+ # mir_series = mir_prefix + "." + mir_series
+ # repo_path = migrations(repo_path)
+ # # modalities = add_mode_types(mir_tag=[mir_series, mir_comp])
+ # prefixed_data = {
+ # "repo": repo_path,
+ # "pkg": {0: {"diffusers": class_name}},
+ # # "mode": modalities.get("mode"),
+ # }
+ # return mir_series, {mir_comp: prefixed_data}
diff --git a/tests/subclasses_test.py b/tests/subclasses_test.py
new file mode 100644
index 0000000..e69de29
From a49a4054725f2abf4e58a53480560bfd8c62a687 Mon Sep 17 00:00:00 2001
From: exdysa <91800957+exdysa@users.noreply.github.com>
Date: Sun, 18 Jan 2026 21:35:43 -0500
Subject: [PATCH 12/16] ~put together diffusers initial parts
---
MIR.egg-info/SOURCES.txt | 41 +--
mir/__init__.py | 4 +-
.../automata.py => _deprecated/_automata.py} | 0
mir/{generate => _deprecated}/_extras.py | 93 ++++--
.../guiders.py => _deprecated/_guiders.py} | 27 ++
mir/_deprecated/_index.py | 270 ++++++++++++++++++
.../_schedulers.py} | 0
mir/data/__init__.py | 1 +
mir/data/component_names.json | 20 ++
mir/data/nn_filter.json | 15 +-
mir/framework.py | 119 --------
mir/generate/{tasks.py => _tasks.py} | 27 +-
mir/generate/diffusers/attention.py | 26 --
mir/generate/diffusers/doc_parse.py | 10 +-
mir/generate/diffusers/harvest.py | 110 +++----
mir/generate/diffusers/index.py | 214 --------------
mir/generate/diffusers/raw_data.py | 34 ++-
mir/generate/from_module.py | 77 +----
mir/generate/indexers.py | 46 ---
mir/generate/mlx/{index.py => harvest.py} | 0
mir/generate/transformers/__init__.py | 2 -
mir/generate/transformers/harvest.py | 16 +-
mir/maid.py | 3 +-
mir/package.py | 109 +++++++
mir/tag.py | 105 ++++---
pyproject.toml | 5 +
tests/test_mir_generate_diffusers.py | 6 +
...e.py => test_mir_generate_transformers.py} | 14 +-
28 files changed, 742 insertions(+), 652 deletions(-)
rename mir/{generate/automata.py => _deprecated/_automata.py} (100%)
rename mir/{generate => _deprecated}/_extras.py (65%)
rename mir/{generate/diffusers/guiders.py => _deprecated/_guiders.py} (68%)
create mode 100644 mir/_deprecated/_index.py
rename mir/{generate/diffusers/schedulers.py => _deprecated/_schedulers.py} (100%)
create mode 100644 mir/data/component_names.json
delete mode 100644 mir/framework.py
rename mir/generate/{tasks.py => _tasks.py} (86%)
delete mode 100644 mir/generate/diffusers/attention.py
delete mode 100644 mir/generate/diffusers/index.py
delete mode 100644 mir/generate/indexers.py
rename mir/generate/mlx/{index.py => harvest.py} (100%)
create mode 100644 tests/test_mir_generate_diffusers.py
rename tests/{test_mir_generate.py => test_mir_generate_transformers.py} (69%)
diff --git a/MIR.egg-info/SOURCES.txt b/MIR.egg-info/SOURCES.txt
index e7d1cc2..0867f5a 100644
--- a/MIR.egg-info/SOURCES.txt
+++ b/MIR.egg-info/SOURCES.txt
@@ -13,9 +13,12 @@ MIR.egg-info/entry_points.txt
MIR.egg-info/requires.txt
MIR.egg-info/top_level.txt
mir/__init__.py
+mir/__main__.py
+mir/framework.py
mir/json_io.py
mir/maid.py
mir/mir.json
+mir/package.py
mir/tag.py
mir/data/__init__.py
mir/data/diffusers_adds.json
@@ -37,30 +40,34 @@ mir/generate/diffusers/__init__.py
mir/generate/diffusers/attention.py
mir/generate/diffusers/doc_parse.py
mir/generate/diffusers/guiders.py
+mir/generate/diffusers/harvest.py
mir/generate/diffusers/index.py
+mir/generate/diffusers/raw_data.py
mir/generate/diffusers/schedulers.py
mir/generate/mlx/__init__.py
mir/generate/mlx/index.py
mir/generate/torch/__init__.py
mir/generate/torch/dtypes.py
mir/generate/transformers/__init__.py
-mir/generate/transformers/index.py
+mir/generate/transformers/harvest.py
mir/generate/transformers/raw_data.py
-mir/generate/transformers/tokenizers.py
mir/spec/__init__.py
mir/spec/regex.json
-tests/test_class_parent.py
-tests/test_deconstructors_root.py
-tests/test_doc_parser.py
-tests/test_find_docstring_run.py
-tests/test_gather_diffusers_metadata.py
-tests/test_json_io.py
-tests/test_mir_db_create_restore.py
-tests/test_mir_merge.py
-tests/test_mir_search.py
-tests/test_mir_tagging.py
-tests/test_regex_constants.py
-tests/test_resolve_code_names.py
-tests/test_seek_class.py
-tests/test_task.py
-tests/test_taskanalyzer.py
\ No newline at end of file
+tests/subclasses_test.py
+tests/test_mir_generate_diffusers.py
+tests/test_mir_generate_transformers.py
+tests/old/test_class_parent.py
+tests/old/test_deconstructors_root.py
+tests/old/test_doc_parser.py
+tests/old/test_find_docstring_run.py
+tests/old/test_gather_diffusers_metadata.py
+tests/old/test_json_io.py
+tests/old/test_mir_db_create_restore.py
+tests/old/test_mir_merge.py
+tests/old/test_mir_search.py
+tests/old/test_mir_tagging.py
+tests/old/test_regex_constants.py
+tests/old/test_resolve_code_names.py
+tests/old/test_seek_class.py
+tests/old/test_task.py
+tests/old/test_taskanalyzer.py
\ No newline at end of file
diff --git a/mir/__init__.py b/mir/__init__.py
index ba063fb..3405a0f 100644
--- a/mir/__init__.py
+++ b/mir/__init__.py
@@ -6,7 +6,6 @@
from logging import DEBUG, INFO, Logger
from mir.json_io import read_json_file
-from mir.generate.transformers.harvest import HarvestClasses
NFO = Logger(INFO).info
DBUQ = Logger(DEBUG).debug
@@ -21,5 +20,8 @@
SUFFIX = read_json_file(os.path.join(ROOT_PATH, "spec", "regex.json"))["suffix"]
IGNORE = read_json_file(os.path.join(ROOT_PATH, "spec", "regex.json"))["ignore"]
+# from mir.generate.transformers.harvest import HarvestClasses
+# Mir = HarvestClasses().db.db
+from mir.generate.diffusers.harvest import HarvestClasses
Mir = HarvestClasses().db.db
diff --git a/mir/generate/automata.py b/mir/_deprecated/_automata.py
similarity index 100%
rename from mir/generate/automata.py
rename to mir/_deprecated/_automata.py
diff --git a/mir/generate/_extras.py b/mir/_deprecated/_extras.py
similarity index 65%
rename from mir/generate/_extras.py
rename to mir/_deprecated/_extras.py
index c1b0366..39af779 100644
--- a/mir/generate/_extras.py
+++ b/mir/_deprecated/_extras.py
@@ -36,27 +36,6 @@ def _class_parent(code_name: str, pkg_name: str) -> Optional[List[str]]:
return import_path
-def _extract_inherited_classes(model_class: Union[Callable, str], pkg_name: Optional[str] = None) -> Optional[Dict[str, List[str]]]:
- """Strips tags from module's base classes and extracts inherited class members.\n
- If `module` is a string, it requires the `library` argument to convert it into a callable.\n
- :param module: A module or string representing a module.
- :param library: Library name required if `module` is a string. Defaults to None.
- :returns: Mapping indices to class path segments, or None if invalid input."""
-
- if isinstance(model_class, str):
- if not pkg_name:
- NFO("Provide a library type argument to process strings")
- return None
- model_class = import_object_named(model_class, pkg_name)
- signature = model_class.__bases__
- class_names = []
- for index, class_annotation in enumerate(signature):
- tag_stripped = str(class_annotation)[8:-2]
- module_segments = tag_stripped.split(".")
- class_names.append(module_segments)
- return class_names
-
-
def _trace_classes(pipe_class: str, pkg_name: str) -> Dict[str, List[str]]:
"""Retrieve all compatible pipe forms\n
NOTE: Mainly for Diffusers
@@ -189,3 +168,75 @@ def tag_transformers_model(repo_path: str, class_name: str, addendum: dict | Non
else:
mir_prefix = f"info.{mir_prefix}"
return mir_prefix, base_series, {base_comp: addendum}
+
+
+# def extract_model_data(self,pipe_name, file_name: str) -> dict | None:
+# migrated_pipes = MIGRATIONS["migrated_pipes"]
+# pkg_path = f"diffusers.pipelines.{pipe_name}.{file_name}"
+# pipe_file: Callable = import_object_named(file_name, pkg_path) or import_module(pkg_path)
+# if pipe_file and (doc_string := getattr(pipe_file, "EXAMPLE_DOC_STRING", None)): #where pipe class and repo are
+# docstrings= DocStringEntry(package_name=pipe_name, file_name=file_name, pipe_module=pipe_file, doc_string=doc_string)
+# DocStringParser(doc_string=docstrings.doc_string)
+# self.parsed_docs.pipe_repo = migrated_pipes.get(self.parsed_docs.pipe_class, self.parsed_docs.pipe_repo)
+# model = import_object_named(parsed_data.pipe_class, docstrings.pipe_module.__name__)
+# model_data = show_init_fields_for(model,"diffusers")
+# return {"model_params": model_data}
+
+
+# for pipe_name in IMPORT_STRUCTURE.keys():
+# if pipe_name not in exclusion_list and (import_name := getattr(diffusers_pipelines, str(pipe_name))):
+# file_specific = uncommon_naming.get(pipe_name, pipe_name)
+# file_names:list[str] = [getattr(import_name, "_import_structure", {})] or [f"pipeline_{file_specific}"]
+# for file_name in file_names:
+# if not file_name in exclusion_list or not (model_data := self.extract_model_data(pipe_name, file_name)):
+# continue
+# if not (prepared_data := PrepareData( **model_data)):
+# continue
+# else:
+# continue
+
+
+# def show_path_for(code_name: str, pkg_name: str) -> list[str] | str | None:
+# """Retrieve the folder path within a class. Only returns if it is a valid path in the system\n
+# ### NOTE: in most cases `__module__` makes this redundant
+# :param code_name: The internal name for the model in the third-party API.
+# :param pkg_name: The API Package
+# :return: A list corresponding to the path of the model, or None if not found
+# :raises KeyError: for invalid pkg_name
+# """
+
+# pkg_paths = {
+# "diffusers": "pipelines",
+# "transformers": "models",
+# }
+# folder_name = code_name.replace("-", "_")
+# pkg_name = pkg_name.lower()
+# folder_path = pkg_paths[pkg_name]
+# package_obj = import_module(pkg_name)
+# folder_path_named = [folder_path, folder_name]
+# pkg_folder = os.path.dirname(getattr(package_obj, "__file__"))
+# # dbuq(os.path.exists(os.path.join(pkg_folder, *folder_path_named)))
+# if os.path.exists(os.path.join(pkg_folder, *folder_path_named)) is True:
+# import_path = [pkg_name]
+# import_path.extend(folder_path_named)
+# return import_path
+
+
+# def get_internal_name_for(module_name: str | Type | None = None, pkg_name: str = "transformers", path_format: bool | None = False) -> list[str] | str | None:
+# """Reveal code names for class names from Diffusers or Transformers (formerly get code names)\n
+# :param class_name: To return only one class, defaults to None
+# :param pkg_name: optional field for library, defaults to "transformers"
+# :param path_format: Retrieve just the code name, or the full module path and code name within the package
+# :return: A list of all code names, or the one corresponding to the provided class"""
+# from mir.generate.diffusers import IMPORT_STRUCTURE
+# from mir.generate.transformers import MODEL_MAPPING_NAMES
+
+# package_imports = IMPORT_STRUCTURE if pkg_name == "diffusers" else MODEL_MAPPING_NAMES
+# pkg_name = pkg_name.lower()
+# MAPPING_NAMES: dict[str, str] = import_object_named(*package_imports[pkg_name])
+# if module_name:
+# if isinstance(module_name, Type):
+# module_name = module_name.__name__
+# code_name = next(iter(key for key, value in MAPPING_NAMES.items() if module_name in str(value)), "")
+# return show_path_for(code_name, pkg_name) if path_format else code_name.replace("_", "-")
+# return list(MAPPING_NAMES)
diff --git a/mir/generate/diffusers/guiders.py b/mir/_deprecated/_guiders.py
similarity index 68%
rename from mir/generate/diffusers/guiders.py
rename to mir/_deprecated/_guiders.py
index 39789af..b791829 100644
--- a/mir/generate/diffusers/guiders.py
+++ b/mir/_deprecated/_guiders.py
@@ -59,3 +59,30 @@
# },
# ),
+
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
+
+
+# def gen_attention_processors(mir_db: MIRDatabase): # upstream not quite ready for this yet
+# from diffusers.models.attention_processor import AttentionProcessor
+
+# mir_data
+# for series, comp_name in mir_data.items():
+# id_segment = series.split(".")
+# for compatibility in comp_name:
+# dbug(id_segment)
+# try:
+# mir_db.add(
+# mir_entry(
+# domain=id_segment[0],
+# arch=id_segment[1],
+# series=id_segment[2],
+# comp=compatibility,
+# **mir_data[series][compatibility],
+# ),
+# )
+# except IndexError as error_log:
+# nfo(f"Failed to create series: {series} compatibility: {comp_name} ")
+# dbug(error_log)
+
diff --git a/mir/_deprecated/_index.py b/mir/_deprecated/_index.py
new file mode 100644
index 0000000..813bcdd
--- /dev/null
+++ b/mir/_deprecated/_index.py
@@ -0,0 +1,270 @@
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
+
+# import os
+# from importlib import import_module
+# from typing import Any, Generator
+
+# from mir import DBUQ, NFO
+# from mir.data import EXCLUSIONS
+# from mir.generate.diffusers import GET_TASK_CLASS, IMPORT_STRUCTURE, SUPPORTED_TASKS_MAPPINGS
+# from mir.generate.from_module import import_object_named, show_init_fields_for, to_domain_tag
+# from mir.generate.indexers import migrations
+
+
+# def retrieve_diffusers_docstrings(
+# package_name: str,
+# file_names: list[str],
+# ) -> Generator[DocStringEntry]:
+# """Yield (pkg, file, EXAMPLE_DOC_STRING) from a folder or a single file.\n
+# :param pkg_name: Package under ``diffusers.pipelines``.\n
+# :param file_names: A list of related file names.\n
+# :param use_folder: True → treat ``source`` as a folder with ``_import_structure``.\n
+# :return: DocString Entry class.\n
+# """
+
+# module_location: str | None = import_module("diffusers.pipelines").__file__
+# module_path = os.path.dirname(module_location)
+
+# for file_name in file_names:
+# assert isinstance(file_name, str), f"Expected path to be string, got {file_name} type {type(file_name)}"
+# if file_name == "pipeline_stable_diffusion_xl_inpaint":
+# continue
+
+# pkg_path = f"diffusers.pipelines.{package_name}.{file_name}"
+# DBUQ(pkg_path)
+
+# if os.path.exists(os.path.join(module_path, package_name, f"{file_name}.py")):
+# pipe_file = import_object_named(file_name, pkg_path) or import_module(pkg_path) or NFO(f"Failed to import {pkg_path}")
+# if doc_string := getattr(pipe_file, "EXAMPLE_DOC_STRING", None):
+# yield DocStringEntry(package_name=package_name, file_name=file_name, pipe_module=pipe_file, doc_string=doc_string)
+# else:
+# NFO(f"Doc string attribute missing for {package_name}/{file_name}")
+# else:
+# NFO(f"Path not found for {package_name}/{file_name}")
+
+# return
+
+
+# def create_pipe_entry(repo_path: str, class_name: str, model_class_obj: Callable | None = None) -> tuple[str, dict[str, dict[Any, Any]]]:
+# """Create a pipeline article and generate corresponding information according to the provided repo path and pipeline category\n
+# :param repo_path (str): Repository path.
+# :param model_class_obj (str): The model class function
+# :raises TypeError: If 'repo_path' or 'class_name' are not set.
+# :return: Tuple: The data structure containing mir_series and mir_comp is used for subsequent processing.
+# """
+# import diffusers # pyright: ignore[reportMissingImports] # pylint:disable=redefined-outer-name
+
+# control_net = ["Control", "Controlnet"] #
+# mir_prefix = "info"
+# if hasattr(diffusers, class_name):
+# model_class_obj = getattr(diffusers, class_name)
+# sub_segments = show_init_fields_for(model_class_obj, "diffusers")
+# decoder = "decoder" in sub_segments
+# if repo_path in ["kandinsky-community/kandinsky-3"]:
+# mir_prefix = "info.unet"
+# if repo_path in ["openai/shap-e"]:
+# mir_prefix = "info.unet"
+# class_name = "ShapEPipeline"
+# elif class_name == "MotionAdapter":
+# mir_prefix = "info.lora"
+# elif class_name == "WanPipeline":
+# mir_prefix = "info.dit"
+# elif class_name == "CogVideoXVideoToVideoPipeline":
+# class_name = "CogVideoXPipeline"
+# elif any(maybe for maybe in control_net if maybe.lower() in class_name.lower()):
+# mir_prefix = "info.controlnet"
+# else:
+# mir_prefix = to_domain_tag(**sub_segments)
+# if mir_prefix is None and class_name not in ["AutoPipelineForImage2Image", "DiffusionPipeline"]:
+# NFO(f"Failed to detect type for {class_name} {list(sub_segments)}\n")
+# else:
+# mir_prefix = "info." + mir_prefix
+# if class_name == "StableDiffusion3InpaintPipeline" or repo_path in ["stabilityai/stable-diffusion-3-medium-diffusers"]:
+# class_name = "StableDiffusion3Pipeline"
+# repo_path = "stabilityai/stable-diffusion-3.5-medium"
+# if class_name == "HunyuanVideoFramepackPipeline" or repo_path in ["hunyuanvideo-community/HunyuanVideo"]:
+# class_name = "HunyuanVideoPipeline"
+# mir_series, mir_comp = list(tag_model_from_repo(repo_path, decoder))
+# mir_series = mir_prefix + "." + mir_series
+# repo_path = migrations(repo_path)
+# # modalities = add_mode_types(mir_tag=[mir_series, mir_comp])
+# prefixed_data = {
+# "repo": repo_path,
+# "pkg": {0: {"diffusers": class_name}},
+# # "mode": modalities.get("mode"),
+# }
+# return mir_series, {mir_comp: prefixed_data}
+
+
+# def tag_pipe(repo_path: str, class_name: str, addendum: dict) -> tuple:
+# """Convert model repo pipes to MIR tags, classifying by feature\n
+# :param name: Repo path
+# :param class_name: The HF Diffusers class for the model
+# :return: A segmented MIR tag useful for appending index entries"""
+# mir_series, mir_data = create_pipe_entry(repo_path=repo_path, class_name=class_name)
+# mir_prefix, mir_series = mir_series.rsplit(".", 1)
+# mir_comp = list(mir_data)[0]
+# return mir_prefix, mir_series, {mir_comp: addendum}
+
+
+# def find_diffusers_docstrings() -> Generator[list[DocStringEntry]]:
+# """Pull down docstrings from 🤗Diffusers pipelines, minimizing internet requests\n
+# :return: Docstrings for common diffusers models"""
+# import diffusers.pipelines as diffusers_pipelines
+
+# docstring_patterns = EXCLUSIONS
+# exclusion_list = docstring_patterns["exclusion_list"]
+# uncommon_naming = docstring_patterns["uncommon_naming"]
+# for pipe_name in IMPORT_STRUCTURE.keys():
+# if pipe_name not in exclusion_list:
+# file_specific = uncommon_naming.get(pipe_name, pipe_name)
+# if import_name := getattr(diffusers_pipelines, str(pipe_name)):
+# file_names = list(getattr(import_name, "_import_structure", {}).keys()) or [f"pipeline_{file_specific}"]
+# yield list(retrieve_diffusers_docstrings(pipe_name, file_names))
+# else:
+# continue
+
+
+# def diffusers_index() -> dict[str, dict[str, dict[str, Any]]]:
+# """Generate diffusion model data for MIR index\n
+# :return: Dictionary ready to be applied to MIR data fields
+# """
+# special_repos = {
+# "black-forest-labs/FLUX.1-schnell": "black-forest-labs/FLUX.1-dev",
+# # "stabilityai/stable-diffusion-3-medium-diffusers": "stabilityai/stable-diffusion-3.5-medium",
+# }
+# special_classes = {
+# # "StableDiffusion3Pipeline": "stabilityai/stable-diffusion-3.5-medium", # NOT sd3
+# "HunyuanDiTPipeline": "tencent-hunyuan/hunyuandiT-v1.2-diffusers", # NOT hyd .ckpt
+# "ChromaPipeline": "lodestones/Chroma",
+# }
+# for class_name, swap_repo in special_classes.items():
+# if parsed_data.pipe_class == class_name:
+# parsed_data.pipe_repo = swap_repo
+# extracted_docstrings = find_diffusers_docstrings()
+# model_info = [extract for pipeline in extracted_docstrings for extract in pipeline]
+# pipe_data = {} # pipeline_stable_diffusion_xl_inpaint
+
+# for extracted in model_info:
+# parsed_data: DocParseData = parse_docs(extracted.doc_string)
+# if parsed_data is None:
+# print(f"Doc string not found in '{extracted.package_name}' in {extracted.file_name}")
+# continue
+# for class_name, swap_repo in special_classes.items():
+# if parsed_data.pipe_class == class_name:
+# parsed_data.pipe_repo = swap_repo
+# break
+# model_class_obj = import_object_named(parsed_data.pipe_class, extracted.pipe_module.__name__)
+# if not model_class_obj:
+# continue
+# try:
+# series, comp_data = create_pipe_entry(parsed_data.pipe_repo, parsed_data.pipe_class)
+# except TypeError:
+# pass # Attempt 1
+# if pipe_data.get(series):
+# if "img2img" in parsed_data.pipe_class.lower():
+# continue
+# pipe_data.setdefault(series, {}).update(comp_data)
+# special_conditions = special_repos | special_classes
+# if parsed_data.staged_class or parsed_data.pipe_repo in list(special_conditions):
+# test = special_conditions.get(parsed_data.pipe_repo)
+# if test:
+# staged_repo = test
+# parsed_data.staged_class = parsed_data.pipe_class
+# try:
+# series, comp_data = create_pipe_entry(
+# staged_repo if parsed_data.staged_repo else parsed_data.pipe_repo,
+# parsed_data.staged_class #
+# if parsed_data.staged_class
+# else parsed_data.pipe_class,
+# )
+# except TypeError as error_log:
+# NFO(series, comp_data)
+# NFO(error_log)
+# continue # Attempt 2,
+# pipe_data.setdefault(series, {}).update(comp_data)
+# return dict(pipe_data)
+
+
+# def pull_weight_map(repo_id: str, arch: str) -> Dict[str, str]:
+# from nnll.download.hub_cache import download_hub_file
+
+# model_file = download_hub_file(
+# repo_id=f"{repo_id}/tree/main/{arch}",
+# source="huggingface",
+# file_name="diffusion_pytorch_model.safetensors.index.json",
+# local_dir=".tmp",
+# )
+
+
+# @MODE_DATA.decorator
+# def add_mode_types(mir_tag: list[str], data: dict | None = None) -> dict[str, list[str] | str]:
+# """_summary_\n
+# :param mir_tag: _description_
+# :param data: _description_, defaults to None
+# :return: _description_"""
+# fused_tag = ".".join(mir_tag)
+
+# mir_details = {
+# "mode": data.get(fused_tag, {}).get("pipeline_tag"),
+# "pkg_type": data.get(fused_tag, {}).get("library_type"),
+# "tags": data.get(fused_tag, {}).get("tags"),
+# }
+# return mir_details
+
+
+# def generate_pipe_tag(repo_path: str, class_name: str, model_class_obj: Callable | None = None) -> tuple[str, dict[str, dict[Any, Any]]]:
+# """Create a pipeline article and generate corresponding information according to the provided repo path and pipeline category\n
+# :param repo_path (str): Repository path.
+# :param model_class_obj (str): The model class function
+# :raises TypeError: If 'repo_path' or 'class_name' are not set.
+# :return: Tuple: The data structure containing mir_series and mir_comp is used for subsequent processing.
+# """
+# import diffusers # pyright: ignore[reportMissingImports] # pylint:disable=redefined-outer-name
+
+# if hasattr(diffusers, class_name):
+# model_class_obj = getattr(diffusers, class_name)
+# sub_segments = show_init_fields_for(model_class_obj, "diffusers")
+
+# else:
+# mir_prefix = to_domain_tag(**sub_segments)
+# if mir_prefix is None and class_name not in ["AutoPipelineForImage2Image", "DiffusionPipeline"]:
+# NFO(f"Failed to detect type for {class_name} {list(sub_segments)}\n")
+# else:
+# mir_prefix = "info." + mir_prefix
+
+# mir_series, mir_comp = list(tag_model_from_repo(repo_path, decoder))
+# mir_series = mir_prefix + "." + mir_series
+# repo_path = migrations(repo_path)
+# # modalities = add_mode_types(mir_tag=[mir_series, mir_comp])
+# prefixed_data = {
+# "repo": repo_path,
+# "pkg": {0: {"diffusers": class_name}},
+# # "mode": modalities.get("mode"),
+# }
+# return mir_series, {mir_comp: prefixed_data}
+
+
+# def write_to_mir(new_data: dict, mir_db: MIRDatabase) -> None:
+# """Generate MIR HF Hub model database
+# :param new_data: Data for the MIR database
+# :param mir_database: MIRDatabase instance
+# """
+# for series, comp_name in new_data.items():
+# id_segment = series.split(".")
+# for compatibility in comp_name:
+# # dbug(id_segment)
+# try:
+# mir_db.add(
+# mir_entry(
+# domain=id_segment[0],
+# arch=id_segment[1],
+# series=id_segment[2],
+# comp=compatibility,
+# **new_data[series][compatibility],
+# ),
+# )
+# except IndexError: # as error_log:
+# NFO(f"Failed to create series: {series} compatibility: {comp_name} ")
+# # dbug(error_log)
diff --git a/mir/generate/diffusers/schedulers.py b/mir/_deprecated/_schedulers.py
similarity index 100%
rename from mir/generate/diffusers/schedulers.py
rename to mir/_deprecated/_schedulers.py
diff --git a/mir/data/__init__.py b/mir/data/__init__.py
index a8f596e..2e0dc48 100644
--- a/mir/data/__init__.py
+++ b/mir/data/__init__.py
@@ -17,3 +17,4 @@
PIPE_MARKERS = read_json_file(os.path.join(ROOT_PATH, "data", "pipe_markers.json"))
TAG_SCRAPE = read_json_file(os.path.join(ROOT_PATH, "data", "tag_scrape.json"))
TRANSFORMERS_ADDS = read_json_file(os.path.join(ROOT_PATH, "data", "transformers_adds.json"))
+COMPONENT_NAMES = read_json_file(os.path.join(ROOT_PATH, "data", "component_names.json"))
diff --git a/mir/data/component_names.json b/mir/data/component_names.json
new file mode 100644
index 0000000..b371ec3
--- /dev/null
+++ b/mir/data/component_names.json
@@ -0,0 +1,20 @@
+{
+ "components": [
+ "scheduler",
+ "vae",
+ "unet",
+ "transformer",
+ "transformer_2",
+ "transformer_3",
+ "text_model",
+ "text_model_2",
+ "text_model_3",
+ "text_model_4",
+ "tokenizer",
+ "tokenizer_1",
+ "tokenizer_2",
+ "tokenizer_3",
+ "tokenizer_4",
+ "feature_extractor"
+ ]
+}
\ No newline at end of file
diff --git a/mir/data/nn_filter.json b/mir/data/nn_filter.json
index 4638ce1..25399b5 100644
--- a/mir/data/nn_filter.json
+++ b/mir/data/nn_filter.json
@@ -140,19 +140,30 @@
]
},
"diffusers": {
+ "vae": [
+ "autoencoder",
+ "autoencoders"
+ ],
+ "scheduler": [
+ "scheduler",
+ "schedulers"
+ ],
"lora": [
"motion_adapter"
],
"controlnet": [
- "controlnet"
+ "controlnet",
+ "controlnets"
],
"unet": [
"unet",
+ "unets",
"prior",
"decoder"
],
"dit": [
- "transformer"
+ "transformer",
+ "transformers"
]
}
}
diff --git a/mir/framework.py b/mir/framework.py
deleted file mode 100644
index fd5d2d4..0000000
--- a/mir/framework.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-
-from typing import Any, Callable
-from dataclasses import dataclass, field
-from mir.generate.diffusers.raw_data import DPrepareData
-from mir.generate.transformers.raw_data import PrepareData
-from mir.tag import MIRTag
-
-
-@dataclass
-class MIRPackage:
- data: Callable | str | dict[str, str]
- package: dict[str, str] = field(init=False, default_factory=dict[str, str])
-
- def __init__(self, data: Callable | str | dict[str, str] | dict[str, Any]):
- self.package = {}
- self.data = data
- if not isinstance(self.data, dict):
- self.generate_package()
- else:
- self.add_framework(self.data)
-
- def generate_package(self) -> None:
- """Generates package information for the MIR tag based on class.
- :param pkg: A class object (model, tokenizer, etc) to build a tag from"""
- self.domain = "ops"
- model = f"{self.data.__module__}.{self.data.__name__}"
- self.package: dict[str, str] = {"model": model}
-
- def add_framework(self, framework_data) -> None:
- self.domain = "info"
- self.package = framework_data
-
-
-class MIRNesting:
- """Build tag components from the extracted data\n
- :param mir_tag: An instance of MIR tag with the necessary information
- :param prepared_data: Instance of PrepareData to attribute the final information
- :returns: The final, assembled MIR tag"""
-
- loops: list[str]
- framework_data: dict[str, str | dict[str, Any]] = {}
- repo: str | None = field(default_factory=str | None)
- framework: dict[str, str] = field(init=False)
- tokenizer: str | None = field(default_factory=str)
-
- def __init__(self, mir_tag: MIRTag, prepared_data: PrepareData | DPrepareData) -> None:
- """\nInitialize the framework with MIR tag and prepared data.\n
- :param mir_tag : The MIR tag instance.
- :param prepared_data : The prepared data for processing."""
- self.mir_tag = mir_tag
-
- self.prepared_data = prepared_data
- self.loops = []
- self.framework_data = {}
-
- def __call__(self, mir_package: MIRPackage) -> None:
- """Dispatches a MIRPackage to the appropriate handler based on its domain.
- :param mir_package: An instance of MIRPackage with the requisite data to tag"""
-
- if mir_package.domain == "ops" and hasattr(self.prepared_data, "tokenizer") and self.prepared_data.tokenizer and self.loops == ["model"]: # type: ignore
- self._process("tokenizer", mir_package)
- elif mir_package.domain == "ops":
- self._process("model", mir_package)
- elif mir_package.domain == "info":
- self._process("framework", mir_package)
-
- def _process(self, name: str, mir_package: MIRPackage) -> None:
- """Common routine for handling a package: store tag data, nest the package,
- and record the name of the newly-created attribute.\n
- :param name: Identification string to store data underneath
- :param mir_package: An instance of MIRPackage with the requisite data"""
-
- is_framework = name == "framework"
- is_model = name == "model"
-
- if is_framework:
- package_data = {self.prepared_data.library: mir_package.package}
- tag_data = f"{mir_package.domain}.{self.mir_tag.arch}.{self.mir_tag.series}"
- if comp := getattr(self.mir_tag, "comp", None):
- tag_data += comp
- self.framework_data.setdefault("repo", self.prepared_data.repo_path)
- elif is_model:
- package_data = {self.prepared_data.library: mir_package.package}
- if hasattr(self.prepared_data, "tasks") and self.prepared_data.tasks:
- package_data[self.prepared_data.library].setdefault("tasks", self.prepared_data.tasks)
- tag_data = f"{mir_package.domain}.{self.mir_tag.arch}.{self.mir_tag.series}"
- if comp := getattr(self.mir_tag, "comp", None):
- tag_data += comp
- self.framework_data.setdefault(name, tag_data)
- else: # tokenizer case
- package_data = {self.prepared_data.library: mir_package.package}
- tag_data = f"{mir_package.domain}.encoder.tokenizer.{self.mir_tag.series}"
- self.framework_data.setdefault(name, tag_data)
-
- self.nest_data(name=name, tag_data=tag_data, package_data=package_data)
- self.loops.append(name)
-
- def nest_data(self, name: str, tag_data: str, package_data: dict) -> None:
- """Nest data into a hierarchical attribute structure.\n
- :param name: Attribute name to store the nested data
- :param tag_data: Dotted path string for nesting
- :param package_data: Data to be stored in the nested structure"""
-
- from chanfig import NestedDict
-
- tag_parts = tuple(x for x in tag_data.split("."))
-
- if len(tag_parts) == 4:
- domain, arch, series, comp = tag_parts
- nest = NestedDict({f"{domain}.{arch}.{series}": {comp: ""}})
- nest[domain][arch][series][comp] = package_data
- else:
- domain, arch, series = tag_parts
- nest = NestedDict({f"{domain}.{arch}": {series: ""}})
- nest[domain][arch][series] = package_data
-
- setattr(self, name, nest)
diff --git a/mir/generate/tasks.py b/mir/generate/_tasks.py
similarity index 86%
rename from mir/generate/tasks.py
rename to mir/generate/_tasks.py
index b037bdf..5c746ef 100644
--- a/mir/generate/tasks.py
+++ b/mir/generate/_tasks.py
@@ -2,9 +2,8 @@
#
-from typing import Any, Callable, List, get_type_hints
-from mir.maid import MIRDatabase
-from mir.generate.diffusers.index import show_diffusers_tasks
+from typing import Any, Callable, List
+from mir.generate.diffusers.raw_data import DPrepareData
from mir.generate.diffusers.schedulers import tag_scheduler
from mir import DBUQ
from mir.tag import MIRTag
@@ -14,7 +13,13 @@
class TaskAnalyzer:
- def __init__(self) -> None:
+ prepared_data: DPrepareData
+ mir_tag: MIRTag
+ tasks: dict[str, str] | None = None
+
+ def __init__(self, prepared_data: DPrepareData, mir_tag: MIRTag) -> None:
+ self.prepared_data = prepared_data
+ self.mir_tag = mir_tag
self.skip_series = [
"info.lora",
"info.vae",
@@ -28,7 +33,7 @@ def __init__(self) -> None:
self.skip_types = ["int", "bool", "float", "Optional", "NoneType", "List", "UNet2DConditionModel"]
self.mflux_tasks = ["Image", "Redux", "Kontext", "Depth", "Fill", "ConceptAttention", "ControlNet", "CavTon", "IC-Edit"]
- async def detect_pipes(self, mir_tag: MIRTag, model: Callable, type_params: dict) -> dict:
+ async def __post_init__(self) -> None:
"""Detects and traces Pipes MIR data\n
:param mir_db:: An instance of MIRDatabase containing the database of information.
:type mir_db: MIRDatabase
@@ -38,14 +43,13 @@ async def detect_pipes(self, mir_tag: MIRTag, model: Callable, type_params: dict
:rtype: dict"""
data_tuple = []
- tasks = show_diffusers_tasks(code_name= class_name=model.__name__)
- detected_pipe = await self.hyperlink_to_mir(type_params, mir_tag.series)
- if hasattr(mir_tag, "comp") and mir_tag.comp:
- data_tuple.append((*mir_tag.series, {mir_tag.comp: detected_pipe}))
+ detected_pipe = await self.hyperlink_to_mir(self.prepared_data.model_params, self.mir_tag.series)
+ if hasattr(self.mir_tag, "comp") and self.mir_tag.comp:
+ self.tasks(*self.mir_tag.series, {self.mir_tag.comp: detected_pipe})
else:
- data_tuple.append(({mir_tag.series: detected_pipe}))
+ self.tasks({self.mir_tag.series: self.prepared_data.model_path})
- return data_tuple
+ self.tasks = data_tuple
async def hyperlink_to_mir(self, pipe_args: dict, series: str):
"""Maps pipeline components to MIR tags/IDs based on class names and roles.\n
@@ -98,4 +102,3 @@ async def tag_class(self, pipe_class: Callable, pipe_role: str, series: str) ->
DBUQ(mir_comp)
mir_tag = "info.vae"
return mir_tag, class_name
-
diff --git a/mir/generate/diffusers/attention.py b/mir/generate/diffusers/attention.py
deleted file mode 100644
index 00df941..0000000
--- a/mir/generate/diffusers/attention.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-
-
-# def gen_attention_processors(mir_db: MIRDatabase): # upstream not quite ready for this yet
-# from diffusers.models.attention_processor import AttentionProcessor
-
-# mir_data
-# for series, comp_name in mir_data.items():
-# id_segment = series.split(".")
-# for compatibility in comp_name:
-# dbug(id_segment)
-# try:
-# mir_db.add(
-# mir_entry(
-# domain=id_segment[0],
-# arch=id_segment[1],
-# series=id_segment[2],
-# comp=compatibility,
-# **mir_data[series][compatibility],
-# ),
-# )
-# except IndexError as error_log:
-# nfo(f"Failed to create series: {series} compatibility: {comp_name} ")
-# dbug(error_log)
-
diff --git a/mir/generate/diffusers/doc_parse.py b/mir/generate/diffusers/doc_parse.py
index 0e70ba3..2d7aa7b 100644
--- a/mir/generate/diffusers/doc_parse.py
+++ b/mir/generate/diffusers/doc_parse.py
@@ -42,16 +42,15 @@ def validate_repo_path(repo_path: Optional[str], segment: str) -> Optional[str]:
class DocStringParser(BaseModel):
doc_string: str
model: Callable
+ model_path: str
+ pipe_repo: str | None = None
+ staged_repo: str | None = None
@field_validator("doc_string")
def normalize_doc(cls, docs: str) -> str:
return DocStringValidator.normalize_doc_string(docs)
- def __init__(self, doc_string, model) -> None:
- self.doc_string = doc_string
- self.model = model
-
- def __post_init__(self) -> dict[str, str] | None:
+ def parse(self) -> dict[str, str] | None:
candidate, prior_candidate, staged = self.doc_match(PIPE_MARKERS["pipe_variables"])
if candidate:
pipe_repo = self._extract_class_and_repo(
@@ -79,6 +78,7 @@ def __post_init__(self) -> dict[str, str] | None:
def doc_match(self, prefix_set: List[str] | None = None):
if prefix_set is None:
prefix_set = PIPE_MARKERS["pipe_variables"]
+ assert prefix_set is not None
candidate = None
staged = None
prior_candidate = ""
diff --git a/mir/generate/diffusers/harvest.py b/mir/generate/diffusers/harvest.py
index 6ed0d9b..db40d91 100644
--- a/mir/generate/diffusers/harvest.py
+++ b/mir/generate/diffusers/harvest.py
@@ -2,42 +2,82 @@
#
from importlib import import_module
-from pkgutil import walk_packages
from inspect import getmro
+from typing import Any, Callable, get_type_hints
-from mir.framework import MIRNesting
from mir.generate.diffusers.raw_data import DPrepareData
+from mir.package import MIRNesting, MIRPackage
from mir.tag import MIRTag
class HarvestClasses:
def __init__(self) -> None:
- self.parsed_docs = []
- pass
+ """Initializes the HarvestClasses instance with an empty list to store raw class data."""
+ from mir.maid import MIRDatabase
+
+ self.db = MIRDatabase()
+ self.raw_data = []
+ self.find_diffusers_docstrings()
def find_diffusers_docstrings(self) -> None:
"""Pull down docstrings from 🤗Diffusers pipelines, minimizing internet requests\n
:return: Docstrings for common diffusers models"""
- self.extract_model_data()
-
- def extract_model_data(self):
- from mir.generate.diffusers import EXCLUSIONS
- from mir.generate.tasks import TaskAnalyzer
+ # from mir.generate.tasks import TaskAnalyzer
- subclasses = self.subclasses_of("diffusers", "DiffusionPipeline")
- for path, class_obj in subclasses.items():
- if path.rsplit(".", 1)[-1] in EXCLUSIONS["exclusion_list"].get("model_path", "."):
+ subclasses = self.extract_subclass_data("diffusers", "DiffusionPipeline")
+ for module_path, model in subclasses.items():
+ if not (base_data := self.extract_base_data(module_path)):
+ continue
+ if not (model_data := self.extract_model_class_data(model)):
continue
- base_path = path.rsplit(".", 1)[0]
- model_path = import_module(base_path)
- if doc_string := getattr(model_path, "EXAMPLE_DOC_STRING", None):
- prepared_data = DPrepareData(doc_string=doc_string, model=class_obj, model_path=base_path)
- mir_tag = MIRTag(prepared_data)
- task_analysis = TaskAnalyzer()
- mir_nest = MIRNesting(mir_tag, prepared_data)
-
- def subclasses_of(self, package_name: str, base_class_name: str):
+ if not (prepared_data := DPrepareData(**base_data, **model_data)):
+ continue
+ mir_tag = MIRTag(prepared_data)
+ # task_analysis = TaskAnalyzer(prepared_data=prepared_data, mir_tag=mir_tag)
+ mir_nest = MIRNesting(mir_tag, prepared_data)
+ packages = {"model": MIRPackage(data=prepared_data.model)}
+ for component_name, component_model in prepared_data.model_params.items():
+ if hasattr(prepared_data, component_name):
+ packages.setdefault(component_name, MIRPackage(data=component_model))
+ packages.setdefault("framework", MIRPackage(data=mir_nest.framework_data))
+ # print(packages)
+ mir_nest(packages)
+
+ self.db.add_data(mir_nest, *mir_nest.loops)
+
+ def extract_base_data(self, module_path: str) -> dict[str, str] | None:
+ from mir.data import EXCLUSIONS
+
+ if module_path.rsplit(".", 1)[-1] in EXCLUSIONS["exclusion_list"]:
+ return None
+ base_path = module_path.rsplit(".", 1)[0]
+ model_path = import_module(base_path)
+ if doc_string := getattr(model_path, "EXAMPLE_DOC_STRING", None):
+ return {
+ "doc_string": doc_string,
+ "model_path": base_path,
+ }
+ return None
+
+ def extract_model_class_data(self, model: Callable) -> dict[str, str | Callable | dict[str, Any]] | None:
+ model_name: str = model.__name__
+ library: str = model.__module__.split(".", 1)[0]
+ model_params: dict[str, Any] = get_type_hints(model.__init__)
+ for module in model_params.values():
+ module_name = module.__module__
+ library_path = f"{library}.models."
+ if library_path in module_name:
+ module_name = module_name.replace(library_path, "").split(".")[0]
+ return {
+ "model": model,
+ "model_name": model_name,
+ "model_params": model_params,
+ "library": library,
+ }
+ return None
+
+ def extract_subclass_data(self, package_name: str, base_class_name: str):
"""
Return a dict mapping `.` → class object
for every class in `package_name` that subclasses a class named
@@ -49,6 +89,7 @@ def subclasses_of(self, package_name: str, base_class_name: str):
iterates over the module's `__dict__` which contains only
attributes that have already been imported.
"""
+ from pkgutil import walk_packages
results = {}
root_pkg = import_module(package_name)
@@ -58,11 +99,9 @@ def subclasses_of(self, package_name: str, base_class_name: str):
except (ImportError, ModuleNotFoundError, RuntimeError):
continue
- # Iterate over all *already* imported members in the module
for name, obj in module.__dict__.items():
if not isinstance(obj, type):
continue
- # Ensure the class is defined in this module, not imported
if obj.__module__ != mod_name:
continue
try:
@@ -76,28 +115,3 @@ def subclasses_of(self, package_name: str, base_class_name: str):
break
return results
-
- # def extract_model_data(self,pipe_name, file_name: str) -> dict | None:
- # migrated_pipes = MIGRATIONS["migrated_pipes"]
- # pkg_path = f"diffusers.pipelines.{pipe_name}.{file_name}"
- # pipe_file: Callable = import_object_named(file_name, pkg_path) or import_module(pkg_path)
- # if pipe_file and (doc_string := getattr(pipe_file, "EXAMPLE_DOC_STRING", None)): #where pipe class and repo are
- # docstrings= DocStringEntry(package_name=pipe_name, file_name=file_name, pipe_module=pipe_file, doc_string=doc_string)
- # DocStringParser(doc_string=docstrings.doc_string)
- # self.parsed_docs.pipe_repo = migrated_pipes.get(self.parsed_docs.pipe_class, self.parsed_docs.pipe_repo)
- # model = import_object_named(parsed_data.pipe_class, docstrings.pipe_module.__name__)
- # model_data = show_init_fields_for(model,"diffusers")
- # return {"model_params": model_data}
-
-
-# for pipe_name in IMPORT_STRUCTURE.keys():
-# if pipe_name not in exclusion_list and (import_name := getattr(diffusers_pipelines, str(pipe_name))):
-# file_specific = uncommon_naming.get(pipe_name, pipe_name)
-# file_names:list[str] = [getattr(import_name, "_import_structure", {})] or [f"pipeline_{file_specific}"]
-# for file_name in file_names:
-# if not file_name in exclusion_list or not (model_data := self.extract_model_data(pipe_name, file_name)):
-# continue
-# if not (prepared_data := PrepareData( **model_data)):
-# continue
-# else:
-# continue
diff --git a/mir/generate/diffusers/index.py b/mir/generate/diffusers/index.py
deleted file mode 100644
index 852fc24..0000000
--- a/mir/generate/diffusers/index.py
+++ /dev/null
@@ -1,214 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-
-import os
-from importlib import import_module
-from typing import Any, Generator
-
-from mir import DBUQ, NFO
-from mir.data import EXCLUSIONS
-from mir.generate.diffusers import GET_TASK_CLASS, IMPORT_STRUCTURE, SUPPORTED_TASKS_MAPPINGS
-from mir.generate.from_module import import_object_named, show_init_fields_for, to_domain_tag
-from mir.generate.indexers import migrations
-
-
-def retrieve_diffusers_docstrings(
- package_name: str,
- file_names: list[str],
-) -> Generator[DocStringEntry]:
- """Yield (pkg, file, EXAMPLE_DOC_STRING) from a folder or a single file.\n
- :param pkg_name: Package under ``diffusers.pipelines``.\n
- :param file_names: A list of related file names.\n
- :param use_folder: True → treat ``source`` as a folder with ``_import_structure``.\n
- :return: DocString Entry class.\n
- """
-
- module_location: str | None = import_module("diffusers.pipelines").__file__
- module_path = os.path.dirname(module_location)
-
- for file_name in file_names:
- assert isinstance(file_name, str), f"Expected path to be string, got {file_name} type {type(file_name)}"
- if file_name == "pipeline_stable_diffusion_xl_inpaint":
- continue
-
- pkg_path = f"diffusers.pipelines.{package_name}.{file_name}"
- DBUQ(pkg_path)
-
- if os.path.exists(os.path.join(module_path, package_name, f"{file_name}.py")):
- pipe_file = import_object_named(file_name, pkg_path) or import_module(pkg_path) or NFO(f"Failed to import {pkg_path}")
- if doc_string := getattr(pipe_file, "EXAMPLE_DOC_STRING", None):
- yield DocStringEntry(package_name=package_name, file_name=file_name, pipe_module=pipe_file, doc_string=doc_string)
- else:
- NFO(f"Doc string attribute missing for {package_name}/{file_name}")
- else:
- NFO(f"Path not found for {package_name}/{file_name}")
-
- return
-
-
-def create_pipe_entry(repo_path: str, class_name: str, model_class_obj: Callable | None = None) -> tuple[str, dict[str, dict[Any, Any]]]:
- """Create a pipeline article and generate corresponding information according to the provided repo path and pipeline category\n
- :param repo_path (str): Repository path.
- :param model_class_obj (str): The model class function
- :raises TypeError: If 'repo_path' or 'class_name' are not set.
- :return: Tuple: The data structure containing mir_series and mir_comp is used for subsequent processing.
- """
- import diffusers # pyright: ignore[reportMissingImports] # pylint:disable=redefined-outer-name
-
- control_net = ["Control", "Controlnet"] #
- mir_prefix = "info"
- if hasattr(diffusers, class_name):
- model_class_obj = getattr(diffusers, class_name)
- sub_segments = show_init_fields_for(model_class_obj, "diffusers")
- decoder = "decoder" in sub_segments
- if repo_path in ["kandinsky-community/kandinsky-3"]:
- mir_prefix = "info.unet"
- if repo_path in ["openai/shap-e"]:
- mir_prefix = "info.unet"
- class_name = "ShapEPipeline"
- elif class_name == "MotionAdapter":
- mir_prefix = "info.lora"
- elif class_name == "WanPipeline":
- mir_prefix = "info.dit"
- elif class_name == "CogVideoXVideoToVideoPipeline":
- class_name = "CogVideoXPipeline"
- elif any(maybe for maybe in control_net if maybe.lower() in class_name.lower()):
- mir_prefix = "info.controlnet"
- else:
- mir_prefix = to_domain_tag(**sub_segments)
- if mir_prefix is None and class_name not in ["AutoPipelineForImage2Image", "DiffusionPipeline"]:
- NFO(f"Failed to detect type for {class_name} {list(sub_segments)}\n")
- else:
- mir_prefix = "info." + mir_prefix
- if class_name == "StableDiffusion3InpaintPipeline" or repo_path in ["stabilityai/stable-diffusion-3-medium-diffusers"]:
- class_name = "StableDiffusion3Pipeline"
- repo_path = "stabilityai/stable-diffusion-3.5-medium"
- if class_name == "HunyuanVideoFramepackPipeline" or repo_path in ["hunyuanvideo-community/HunyuanVideo"]:
- class_name = "HunyuanVideoPipeline"
- mir_series, mir_comp = list(tag_model_from_repo(repo_path, decoder))
- mir_series = mir_prefix + "." + mir_series
- repo_path = migrations(repo_path)
- # modalities = add_mode_types(mir_tag=[mir_series, mir_comp])
- prefixed_data = {
- "repo": repo_path,
- "pkg": {0: {"diffusers": class_name}},
- # "mode": modalities.get("mode"),
- }
- return mir_series, {mir_comp: prefixed_data}
-
-
-def tag_pipe(repo_path: str, class_name: str, addendum: dict) -> tuple:
- """Convert model repo pipes to MIR tags, classifying by feature\n
- :param name: Repo path
- :param class_name: The HF Diffusers class for the model
- :return: A segmented MIR tag useful for appending index entries"""
- mir_series, mir_data = create_pipe_entry(repo_path=repo_path, class_name=class_name)
- mir_prefix, mir_series = mir_series.rsplit(".", 1)
- mir_comp = list(mir_data)[0]
- return mir_prefix, mir_series, {mir_comp: addendum}
-
-
-def find_diffusers_docstrings() -> Generator[list[DocStringEntry]]:
- """Pull down docstrings from 🤗Diffusers pipelines, minimizing internet requests\n
- :return: Docstrings for common diffusers models"""
- import diffusers.pipelines as diffusers_pipelines
-
- docstring_patterns = EXCLUSIONS
- exclusion_list = docstring_patterns["exclusion_list"]
- uncommon_naming = docstring_patterns["uncommon_naming"]
- for pipe_name in IMPORT_STRUCTURE.keys():
- if pipe_name not in exclusion_list:
- file_specific = uncommon_naming.get(pipe_name, pipe_name)
- if import_name := getattr(diffusers_pipelines, str(pipe_name)):
- file_names = list(getattr(import_name, "_import_structure", {}).keys()) or [f"pipeline_{file_specific}"]
- yield list(retrieve_diffusers_docstrings(pipe_name, file_names))
- else:
- continue
-
-
-def diffusers_index() -> dict[str, dict[str, dict[str, Any]]]:
- """Generate diffusion model data for MIR index\n
- :return: Dictionary ready to be applied to MIR data fields
- """
- special_repos = {
- "black-forest-labs/FLUX.1-schnell": "black-forest-labs/FLUX.1-dev",
- # "stabilityai/stable-diffusion-3-medium-diffusers": "stabilityai/stable-diffusion-3.5-medium",
- }
- special_classes = {
- # "StableDiffusion3Pipeline": "stabilityai/stable-diffusion-3.5-medium", # NOT sd3
- "HunyuanDiTPipeline": "tencent-hunyuan/hunyuandiT-v1.2-diffusers", # NOT hyd .ckpt
- "ChromaPipeline": "lodestones/Chroma",
- }
- for class_name, swap_repo in special_classes.items():
- if parsed_data.pipe_class == class_name:
- parsed_data.pipe_repo = swap_repo
- extracted_docstrings = find_diffusers_docstrings()
- model_info = [extract for pipeline in extracted_docstrings for extract in pipeline]
- pipe_data = {} # pipeline_stable_diffusion_xl_inpaint
-
- for extracted in model_info:
- parsed_data: DocParseData = parse_docs(extracted.doc_string)
- if parsed_data is None:
- print(f"Doc string not found in '{extracted.package_name}' in {extracted.file_name}")
- continue
- for class_name, swap_repo in special_classes.items():
- if parsed_data.pipe_class == class_name:
- parsed_data.pipe_repo = swap_repo
- break
- model_class_obj = import_object_named(parsed_data.pipe_class, extracted.pipe_module.__name__)
- if not model_class_obj:
- continue
- try:
- series, comp_data = create_pipe_entry(parsed_data.pipe_repo, parsed_data.pipe_class)
- except TypeError:
- pass # Attempt 1
- if pipe_data.get(series):
- if "img2img" in parsed_data.pipe_class.lower():
- continue
- pipe_data.setdefault(series, {}).update(comp_data)
- special_conditions = special_repos | special_classes
- if parsed_data.staged_class or parsed_data.pipe_repo in list(special_conditions):
- test = special_conditions.get(parsed_data.pipe_repo)
- if test:
- staged_repo = test
- parsed_data.staged_class = parsed_data.pipe_class
- try:
- series, comp_data = create_pipe_entry(
- staged_repo if parsed_data.staged_repo else parsed_data.pipe_repo,
- parsed_data.staged_class #
- if parsed_data.staged_class
- else parsed_data.pipe_class,
- )
- except TypeError as error_log:
- NFO(series, comp_data)
- NFO(error_log)
- continue # Attempt 2,
- pipe_data.setdefault(series, {}).update(comp_data)
- return dict(pipe_data)
-
-
-# def pull_weight_map(repo_id: str, arch: str) -> Dict[str, str]:
-# from nnll.download.hub_cache import download_hub_file
-
-# model_file = download_hub_file(
-# repo_id=f"{repo_id}/tree/main/{arch}",
-# source="huggingface",
-# file_name="diffusion_pytorch_model.safetensors.index.json",
-# local_dir=".tmp",
-# )
-
-
-# @MODE_DATA.decorator
-# def add_mode_types(mir_tag: list[str], data: dict | None = None) -> dict[str, list[str] | str]:
-# """_summary_\n
-# :param mir_tag: _description_
-# :param data: _description_, defaults to None
-# :return: _description_"""
-# fused_tag = ".".join(mir_tag)
-
-# mir_details = {
-# "mode": data.get(fused_tag, {}).get("pipeline_tag"),
-# "pkg_type": data.get(fused_tag, {}).get("library_type"),
-# "tags": data.get(fused_tag, {}).get("tags"),
-# }
-# return mir_details
diff --git a/mir/generate/diffusers/raw_data.py b/mir/generate/diffusers/raw_data.py
index 3e37836..e86dbfb 100644
--- a/mir/generate/diffusers/raw_data.py
+++ b/mir/generate/diffusers/raw_data.py
@@ -8,29 +8,23 @@
@dataclass
class DPrepareData:
- name: str
doc_string: str
model: Callable
model_path: str
+ library: str
+ model_name: str
+ model_params: dict[str, list[str]] = field(init=True, default_factory=lambda: {"": [""]})
repo_path: str = field(init=False, default_factory=str)
- model_name: str = field(init=False, default_factory=str)
- staged_repo: str | None = field(init=False, default_factory=str | None)
+ staged_repo: str | None = field(init=False, default_factory=str)
tasks: list[str] = field(init=False, default_factory=lambda: [""])
-
- def __init__(self, **kwargs) -> None:
- for key, value in kwargs.items():
- setattr(self, key, value)
+ name: str = field(init=False, default_factory=str)
def __post_init__(self) -> None:
from mir.data import MIGRATIONS
from mir.generate.diffusers.doc_parse import DocStringParser
- from mir.generate.from_module import show_init_fields_for
- self.model_name = self.model.__name__
- self.library = self.model.__module__.split(".", 1)[0]
- self.model_params = show_init_fields_for(self.model, "diffusers")
- self.type_params = get_type_hints(self.model.__init__)
- doc_parser = DocStringParser(self.doc_string, self.model)
+ doc_parser = DocStringParser(doc_string=self.doc_string, model=self.model, model_path=self.model_path)
+ doc_parser.parse()
if repo_path := MIGRATIONS["migrated_pipes"].get(self.model.__name__, False):
self.repo_path = repo_path
else:
@@ -38,22 +32,26 @@ def __post_init__(self) -> None:
self.repo_path = repo_path
if staged_repo := doc_parser.staged_repo:
self.staged_repo = staged_repo
+ self.show_diffusers_tasks()
+ for name, model in self.model_params.items():
+ setattr(self, name, model)
+ print(name, model)
- def show_diffusers_tasks(self) -> list[str]:
+ def show_diffusers_tasks(self) -> None:
"""Return Diffusers task pipes based on package-specific query\n
:param class_name: To find task pipes from a Diffusers class pipe, defaults to None
:param code_name: To find task pipes from a Transformers class pipe, defaults to None
:return: A list of alternate class pipelines derived from the specified class"""
from mir.generate.diffusers import SUPPORTED_TASKS_MAPPINGS, GET_TASK_CLASS
- alt_tasks = set()
- internal_name = self.model_path.rsplit(".", 2)[-2]
+ alt_tasks = set({})
+ self.internal_name = self.model_path.rsplit(".", 2)[-1]
for task_map in SUPPORTED_TASKS_MAPPINGS:
task_class = GET_TASK_CLASS(task_map, self.model, False)
if task_class:
alt_tasks.add(task_class.__name__)
for model_code, pipe_class_obj in task_map.items():
- if internal_name in model_code:
+ if self.internal_name in model_code:
alt_tasks.add(pipe_class_obj.__name__)
- return list(alt_tasks)
+ self.tasks = [x for x in alt_tasks]
diff --git a/mir/generate/from_module.py b/mir/generate/from_module.py
index a39778f..fffb820 100644
--- a/mir/generate/from_module.py
+++ b/mir/generate/from_module.py
@@ -4,9 +4,22 @@
# 模块发现和解构
import inspect
-import os
+
from importlib import import_module
-from typing import Callable, Type
+from typing import Callable
+
+
+def migrations(repo_path: str):
+ """Replaces old organization names in repository paths with new ones.\n
+ :param repo_path: Original repository path containing old organization names
+ :return: Updated repository path with new organization names"""
+ from mir.data import MIGRATIONS
+
+ repo_migrations = MIGRATIONS
+ for old_name, new_name in repo_migrations.items():
+ if old_name in repo_path:
+ repo_path = repo_path.replace(old_name, new_name)
+ return repo_path
def import_object_named(module: str, pkg_name_or_abs_path: str) -> Callable | None:
@@ -57,63 +70,3 @@ def show_init_fields_for(module: Callable | str, package_name: str | None = None
class_names = dict(class_names)
return class_names
-
-
-def show_path_for(code_name: str, pkg_name: str) -> list[str] | str | None:
- """Retrieve the folder path within a class. Only returns if it is a valid path in the system\n
- ### NOTE: in most cases `__module__` makes this redundant
- :param code_name: The internal name for the model in the third-party API.
- :param pkg_name: The API Package
- :return: A list corresponding to the path of the model, or None if not found
- :raises KeyError: for invalid pkg_name
- """
-
- pkg_paths = {
- "diffusers": "pipelines",
- "transformers": "models",
- }
- folder_name = code_name.replace("-", "_")
- pkg_name = pkg_name.lower()
- folder_path = pkg_paths[pkg_name]
- package_obj = import_module(pkg_name)
- folder_path_named = [folder_path, folder_name]
- pkg_folder = os.path.dirname(getattr(package_obj, "__file__"))
- # dbuq(os.path.exists(os.path.join(pkg_folder, *folder_path_named)))
- if os.path.exists(os.path.join(pkg_folder, *folder_path_named)) is True:
- import_path = [pkg_name]
- import_path.extend(folder_path_named)
- return import_path
-
-
-# def get_internal_name_for(module_name: str | Type | None = None, pkg_name: str = "transformers", path_format: bool | None = False) -> list[str] | str | None:
-# """Reveal code names for class names from Diffusers or Transformers (formerly get code names)\n
-# :param class_name: To return only one class, defaults to None
-# :param pkg_name: optional field for library, defaults to "transformers"
-# :param path_format: Retrieve just the code name, or the full module path and code name within the package
-# :return: A list of all code names, or the one corresponding to the provided class"""
-# from mir.generate.diffusers import IMPORT_STRUCTURE
-# from mir.generate.transformers import MODEL_MAPPING_NAMES
-
-# package_imports = IMPORT_STRUCTURE if pkg_name == "diffusers" else MODEL_MAPPING_NAMES
-# pkg_name = pkg_name.lower()
-# MAPPING_NAMES: dict[str, str] = import_object_named(*package_imports[pkg_name])
-# if module_name:
-# if isinstance(module_name, Type):
-# module_name = module_name.__name__
-# code_name = next(iter(key for key, value in MAPPING_NAMES.items() if module_name in str(value)), "")
-# return show_path_for(code_name, pkg_name) if path_format else code_name.replace("_", "-")
-# return list(MAPPING_NAMES)
-
-
-def to_domain_tag(library: str, **kwargs):
- """Set type of MIR prefix depending on model type\n
- :param transformers: Use transformers data instead of diffusers data, defaults to False
- :raises ValueError: Model type not detected
- :return: MIR prefix based on model configuration"""
- from mir.data import NN_FILTER
-
- flags = NN_FILTER["arch"][library] # pylint:disable=unsubscriptable-object
- for mir_prefix, key_match in flags.items():
- if any(kwargs.get(param, None) for param in key_match):
- return mir_prefix
- return None
diff --git a/mir/generate/indexers.py b/mir/generate/indexers.py
deleted file mode 100644
index 51f755a..0000000
--- a/mir/generate/indexers.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-
-"""類發現和拆卸"""
-# pylint:disable=no-name-in-module
-
-from mir import NFO
-from mir.data import MIGRATIONS
-from mir.maid import MIRDatabase
-from mir.spec import mir_entry
-
-
-def write_to_mir(new_data: dict, mir_db: MIRDatabase) -> None:
- """Generate MIR HF Hub model database
- :param new_data: Data for the MIR database
- :param mir_database: MIRDatabase instance
- """
- for series, comp_name in new_data.items():
- id_segment = series.split(".")
- for compatibility in comp_name:
- # dbug(id_segment)
- try:
- mir_db.add(
- mir_entry(
- domain=id_segment[0],
- arch=id_segment[1],
- series=id_segment[2],
- comp=compatibility,
- **new_data[series][compatibility],
- ),
- )
- except IndexError: # as error_log:
- NFO(f"Failed to create series: {series} compatibility: {comp_name} ")
- # dbug(error_log)
-
-
-def migrations(repo_path: str):
- """Replaces old organization names in repository paths with new ones.\n
- :param repo_path: Original repository path containing old organization names
- :return: Updated repository path with new organization names"""
-
- repo_migrations = MIGRATIONS
- for old_name, new_name in repo_migrations.items():
- if old_name in repo_path:
- repo_path = repo_path.replace(old_name, new_name)
- return repo_path
diff --git a/mir/generate/mlx/index.py b/mir/generate/mlx/harvest.py
similarity index 100%
rename from mir/generate/mlx/index.py
rename to mir/generate/mlx/harvest.py
diff --git a/mir/generate/transformers/__init__.py b/mir/generate/transformers/__init__.py
index 9eaedd4..7cd0886 100644
--- a/mir/generate/transformers/__init__.py
+++ b/mir/generate/transformers/__init__.py
@@ -10,7 +10,5 @@
)
from transformers.models.auto.tokenization_auto import TOKENIZER_MAPPING
-from mir.generate.transformers.harvest import HarvestClasses
-
AUTO_MAP = AutoModel._model_mapping
REVERSE_MAP = AUTO_MAP._reverse_config_mapping
diff --git a/mir/generate/transformers/harvest.py b/mir/generate/transformers/harvest.py
index 7c33dc5..90de8f6 100644
--- a/mir/generate/transformers/harvest.py
+++ b/mir/generate/transformers/harvest.py
@@ -3,7 +3,7 @@
from typing import Any, Callable
-from mir.framework import MIRNesting, MIRPackage
+from mir.package import MIRNesting, MIRPackage
from mir.generate.transformers.raw_data import PrepareData
from mir.tag import MIRTag
@@ -14,7 +14,6 @@ def __init__(self) -> None:
from mir.maid import MIRDatabase
self.db = MIRDatabase()
- self.raw_data = []
self.find_transformers_classes()
def find_transformers_classes(self) -> None:
@@ -22,7 +21,7 @@ def find_transformers_classes(self) -> None:
:return: List of PrepareData entries representing the transformer classes."""
from mir.generate.transformers import AUTO_MAP
- for config_class, model_class in AUTO_MAP.items(): #type: ignore
+ for config_class, model_class in AUTO_MAP.items(): # type: ignore
if isinstance(model_class, tuple):
model_class: Callable = model_class[0]
if not (config_data := self.extract_config_class_data(config_class)):
@@ -34,13 +33,12 @@ def find_transformers_classes(self) -> None:
mir_tag = MIRTag(prepared_data)
mir_nest = MIRNesting(mir_tag, prepared_data)
- packages = [MIRPackage(data=prepared_data.model)]
- if hasattr(prepared_data, "tokenizer") and prepared_data.tokenizer:
- packages.append(MIRPackage(data=prepared_data.tokenizer)) #type: ignore , _Lazyautomapping tuple
- packages.append(MIRPackage(data=mir_nest.framework_data))
- for pkg in packages:
- mir_nest(pkg)
+ packages = {"model": MIRPackage(data=prepared_data.model)}
+ if hasattr(prepared_data, "tokenizer") and prepared_data.tokenizer:
+ packages.setdefault("tokenizer", MIRPackage(data=prepared_data.tokenizer)) # type: ignore , _Lazyautomapping tuple
+ packages.setdefault("framework", MIRPackage(data=mir_nest.framework_data))
+ mir_nest(packages)
self.db.add_data(mir_nest, *mir_nest.loops)
diff --git a/mir/maid.py b/mir/maid.py
index 5f0111d..14ef49f 100644
--- a/mir/maid.py
+++ b/mir/maid.py
@@ -8,9 +8,8 @@
from typing import Any, List, Optional
from mir import MIR_PATH_NAMED
-from mir.framework import MIRNesting
+from mir.package import MIRNesting
from mir.json_io import read_json_file, write_json_file
-from mir.tag import MIRTag
class MIRDatabase:
diff --git a/mir/package.py b/mir/package.py
index e69de29..97187e9 100644
--- a/mir/package.py
+++ b/mir/package.py
@@ -0,0 +1,109 @@
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
+
+from typing import Any, Callable
+from dataclasses import dataclass, field
+from mir.generate.diffusers.raw_data import DPrepareData
+from mir.generate.transformers.raw_data import PrepareData
+from mir.tag import MIRTag
+
+
+@dataclass
+class MIRPackage:
+ data: Callable | str | dict[str, str]
+ package: dict[str, str] = field(init=False, default_factory=dict[str, str])
+
+ def __init__(self, data: Callable | str | dict[str, str] | dict[str, Any]):
+ self.package = {}
+ self.data = data
+ if not isinstance(self.data, dict):
+ self.generate_package()
+ else:
+ self.add_framework(self.data)
+
+ def generate_package(self) -> None:
+ """Generates package information for the MIR tag based on class.
+ :param pkg: A class object (model, tokenizer, etc) to build a tag from"""
+ self.domain = "ops"
+ model = f"{self.data.__module__}.{self.data.__name__}"
+ self.package: dict[str, str] = {"model": model}
+
+ def add_framework(self, framework_data) -> None:
+ self.domain = "info"
+ self.package = framework_data
+
+
+class MIRNesting:
+ """Build tag components from the extracted data\n
+ :param mir_tag: An instance of MIR tag with the necessary information
+ :param prepared_data: Instance of PrepareData to attribute the final information
+ :returns: The final, assembled MIR tag"""
+
+ loops: list[str]
+ framework_data: dict[str, str | dict[str, Any]] = {}
+ repo: str | None = field(default_factory=str | None)
+ framework: dict[str, str] = field(init=False)
+ tokenizer: str | None = field(default_factory=str)
+
+ def __init__(self, mir_tag: MIRTag, prepared_data: PrepareData | DPrepareData) -> None:
+ """\nInitialize the framework with MIR tag and prepared data.\n
+ :param mir_tag : The MIR tag instance.
+ :param prepared_data : The prepared data for processing."""
+ self.mir_tag = mir_tag
+
+ self.prepared_data = prepared_data
+ self.loops = []
+ self.framework_data = {}
+
+ def __call__(self, packages: dict[str, MIRPackage]) -> None:
+ """Common routine for handling a package: store tag data, nest the package,
+ and record the name of the newly-created attribute.\n
+ :param name: Identification string to store data underneath
+ :param mir_package: An instance of MIRPackage with the requisite data"""
+
+ for name, mir_package in packages.items():
+ is_framework = name == "framework"
+ is_model = name == "model"
+
+ if is_framework:
+ package_data = {self.prepared_data.library: mir_package.package}
+ tag_data = f"{mir_package.domain}.{self.mir_tag.arch}.{self.mir_tag.series}"
+ if comp := getattr(self.mir_tag, "comp", None):
+ tag_data += comp
+ self.framework_data.setdefault("repo", self.prepared_data.repo_path)
+ elif is_model:
+ package_data = {self.prepared_data.library: mir_package.package}
+ if hasattr(self.prepared_data, "tasks") and self.prepared_data.tasks:
+ package_data[self.prepared_data.library].setdefault("tasks", self.prepared_data.tasks)
+ tag_data = f"{mir_package.domain}.{self.mir_tag.arch}.{self.mir_tag.series}"
+ if comp := getattr(self.mir_tag, "comp", None):
+ tag_data += comp
+ self.framework_data.setdefault(name, tag_data)
+ elif is_tokenizer: # tokenizer case
+ package_data = {self.prepared_data.library: mir_package.package}
+ tag_data = f"{mir_package.domain}.encoder.tokenizer.{self.mir_tag.series}"
+ self.framework_data.setdefault(name, tag_data)
+
+ self.nest_data(name=name, tag_data=tag_data, package_data=package_data)
+ self.loops.append(name)
+
+ def nest_data(self, name: str, tag_data: str, package_data: dict) -> None:
+ """Nest data into a hierarchical attribute structure.\n
+ :param name: Attribute name to store the nested data
+ :param tag_data: Dotted path string for nesting
+ :param package_data: Data to be stored in the nested structure"""
+
+ from chanfig import NestedDict
+
+ tag_parts = tuple(x for x in tag_data.split("."))
+
+ if len(tag_parts) == 4:
+ domain, arch, series, comp = tag_parts
+ nest = NestedDict({f"{domain}.{arch}.{series}": {comp: ""}})
+ nest[domain][arch][series][comp] = package_data
+ else:
+ domain, arch, series = tag_parts
+ nest = NestedDict({f"{domain}.{arch}": {series: ""}})
+ nest[domain][arch][series] = package_data
+
+ setattr(self, name, nest)
diff --git a/mir/tag.py b/mir/tag.py
index 3df4200..82f7b8b 100644
--- a/mir/tag.py
+++ b/mir/tag.py
@@ -31,26 +31,19 @@ def __post_init__(self) -> None:
def generate_arch(self) -> None:
"""Generates the architecture part of the MIR tag based on prepared data.\n
:raises ValueError: If no suitable tag can be determined."""
- from mir.generate.from_module import to_domain_tag
+ arch = None
library = self.raw_data.model.__module__.split(".")[0]
if hasattr(self.raw_data, "config_params"):
- arch = to_domain_tag(library, **self.raw_data.config_params) # type: ignore
- else:
+ arch = self.tag_architecture(library, **self.raw_data.config_params) # type: ignore
+ elif hasattr(self.raw_data, "model_params"):
arch = None
self.decoder = "decoder" in [self.raw_data.model_params]
+ arch = self.tag_architecture(library, **self.raw_data.model_params) # type: ignore
if not arch:
- if self.raw_data.model_params:
- if arch := to_domain_tag(library, **self.raw_data.model_params):
- pass
- raise ValueError(f"Unable to determine MIR prefix from {self}")
- else:
- raise ValueError(
- f"Unrecognized model type, \
- no tag matched {self.raw_data.name} \
- with {self.raw_data}",
- )
- self.arch = arch
+ print(f"Unrecognized model type, no tag matched {self.raw_data.name} with {self.raw_data.model_name}")
+ else:
+ self.arch = arch
def generate_series_and_comp(self, repo_path: str, decoder=decoder) -> None:
"""Generates the MIR tag components from a repository title.\n
@@ -93,33 +86,57 @@ def generate_series_and_comp(self, repo_path: str, decoder=decoder) -> None:
if suffix != "*":
self.comp = suffix
- # def generate_pipe_tag(repo_path: str, class_name: str, model_class_obj: Callable | None = None) -> tuple[str, dict[str, dict[Any, Any]]]:
- # """Create a pipeline article and generate corresponding information according to the provided repo path and pipeline category\n
- # :param repo_path (str): Repository path.
- # :param model_class_obj (str): The model class function
- # :raises TypeError: If 'repo_path' or 'class_name' are not set.
- # :return: Tuple: The data structure containing mir_series and mir_comp is used for subsequent processing.
- # """
- # import diffusers # pyright: ignore[reportMissingImports] # pylint:disable=redefined-outer-name
-
- # if hasattr(diffusers, class_name):
- # model_class_obj = getattr(diffusers, class_name)
- # sub_segments = show_init_fields_for(model_class_obj, "diffusers")
-
- # else:
- # mir_prefix = to_domain_tag(**sub_segments)
- # if mir_prefix is None and class_name not in ["AutoPipelineForImage2Image", "DiffusionPipeline"]:
- # NFO(f"Failed to detect type for {class_name} {list(sub_segments)}\n")
- # else:
- # mir_prefix = "info." + mir_prefix
-
- # mir_series, mir_comp = list(tag_model_from_repo(repo_path, decoder))
- # mir_series = mir_prefix + "." + mir_series
- # repo_path = migrations(repo_path)
- # # modalities = add_mode_types(mir_tag=[mir_series, mir_comp])
- # prefixed_data = {
- # "repo": repo_path,
- # "pkg": {0: {"diffusers": class_name}},
- # # "mode": modalities.get("mode"),
- # }
- # return mir_series, {mir_comp: prefixed_data}
+ def tag_architecture(self, library: str, **kwargs) -> str | None:
+ """Set type of MIR prefix depending on model type\n
+ :param library: Library source of the original data
+ :raises ValueError: Model type not detected
+ :return: MIR prefix based on model configuration"""
+ from mir.data import NN_FILTER
+
+ flags = NN_FILTER["arch"][library] # pylint:disable=unsubscriptable-object
+ if library == "diffusers":
+ for module_type, module_obj in kwargs.items():
+ module_name = module_obj.__module__
+ library_path = f"{library}.models."
+ if library_path in module_name:
+ module_name = module_name.replace(library_path, "").split(".")[0]
+ if mir_prefix := [match for match in flags if module_name in flags[match]]:
+ return mir_prefix[0]
+ for mir_prefix, key_match in flags.items():
+ if any(kwargs.get(param, None) for param in key_match):
+ return mir_prefix
+ return None
+
+
+def tag_scheduler(self, scheduler_name: str) -> tuple[str, str]:
+ """Create a mir label from a scheduler operation\n
+ :param class_name: Known period-separated prefix and model type
+ :return: The assembled mir tag with compatibility pre-separated"""
+ import re
+
+ series_name = None
+ comp_name = None
+ patterns = [r"Schedulers", r"Multistep", r"Solver", r"Discrete", r"Scheduler"]
+ for scheduler in patterns:
+ compiled = re.compile(scheduler)
+ match = re.search(compiled, scheduler_name)
+ if match:
+ comp_name = match.group()
+ comp_name = comp_name.lower()
+ break
+ for pattern in patterns:
+ series_name = re.sub(pattern, "", scheduler_name)
+ if not series_name:
+ series_name = scheduler_name
+ series_name.lower()
+ assert series_name is not None, "Expected series tag but got None"
+ assert comp_name is not None, "Expected compatibility tag but got None"
+ return series_name, comp_name
+
+
+def tag_tokenizer():
+ pass
+
+
+def tag_tokenizer():
+ pass
diff --git a/pyproject.toml b/pyproject.toml
index 7d95cc5..0b71665 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -70,3 +70,8 @@ dev = [
[tool.ruff.lint]
ignore = ["E731"]
+
+[tool.pytest.ini_options]
+filterwarnings = [
+ "ignore::DeprecationWarning",
+]
\ No newline at end of file
diff --git a/tests/test_mir_generate_diffusers.py b/tests/test_mir_generate_diffusers.py
new file mode 100644
index 0000000..2db66d0
--- /dev/null
+++ b/tests/test_mir_generate_diffusers.py
@@ -0,0 +1,6 @@
+def test_info_key_exists_and_library_is_not_nested():
+ from mir.generate.diffusers.harvest import HarvestClasses
+
+ Mir = HarvestClasses().db.db
+
+ # print(Mir)
diff --git a/tests/test_mir_generate.py b/tests/test_mir_generate_transformers.py
similarity index 69%
rename from tests/test_mir_generate.py
rename to tests/test_mir_generate_transformers.py
index 211790d..2fd0a11 100644
--- a/tests/test_mir_generate.py
+++ b/tests/test_mir_generate_transformers.py
@@ -1,5 +1,7 @@
def test_info_key_exists_and_library_is_not_nested():
- from mir import Mir
+ from mir.generate.transformers.harvest import HarvestClasses
+
+ Mir = HarvestClasses().db.db
print(Mir.info.cnn.yolos)
result = Mir.info.cnn.yolos["transformers"] # should not throw
@@ -7,7 +9,9 @@ def test_info_key_exists_and_library_is_not_nested():
def test_ops_key_exists_and_library_is_not_tested():
- from mir import Mir
+ from mir.generate.transformers.harvest import HarvestClasses
+
+ Mir = HarvestClasses().db.db
print(Mir.ops.cnn.yolos)
result = Mir.ops.cnn.yolos["transformers"] # should not throw
@@ -22,7 +26,9 @@ def test_ops_key_exists_and_library_is_not_tested():
def test_ops_tokenizer_created():
- from mir import Mir
+ from mir.generate.transformers.harvest import HarvestClasses
+
+ Mir = HarvestClasses().db.db
- result = Mir.ops.encoder.tokenizer.zamba2['transformers']
+ result = Mir.ops.encoder.tokenizer.zamba2["transformers"]
assert result == {"model": "transformers.models.llama.tokenization_llama.LlamaTokenizer"}
From c45dc1a6231bf41b89f46847575dac8760855b25 Mon Sep 17 00:00:00 2001
From: exdysa <91800957+exdysa@users.noreply.github.com>
Date: Tue, 20 Jan 2026 18:59:07 -0500
Subject: [PATCH 13/16] ~stepping through progress
---
mir/__init__.py | 4 +-
mir/_deprecated/_automata.py | 1996 -------------------
mir/_deprecated/_extras.py | 242 ---
mir/_deprecated/_guiders.py | 88 -
mir/_deprecated/_index.py | 270 ---
mir/_deprecated/_schedulers.py | 74 -
mir/generate/_tasks.py | 2 +-
mir/generate/diffusers/harvest.py | 94 +-
mir/generate/diffusers/package.py | 69 +
mir/generate/diffusers/raw_data.py | 55 +-
mir/generate/diffusers/tasks.py | 34 +
mir/generate/transformers/harvest.py | 118 +-
mir/generate/transformers/package.py | 56 +
mir/generate/transformers/raw_data.py | 41 +-
mir/generate/transformers/tasks.py | 32 +
mir/package.py | 28 +-
mir/tag.py | 20 +-
tests/old/test_class_parent.py | 35 -
tests/old/test_deconstructors_root.py | 22 -
tests/old/test_doc_parser.py | 143 --
tests/old/test_find_docstring_run.py | 5 -
tests/old/test_gather_diffusers_metadata.py | 49 -
tests/old/test_json_io.py | 42 -
tests/old/test_mir_db_create_restore.py | 160 --
tests/old/test_mir_merge.py | 122 --
tests/old/test_mir_search.py | 98 -
tests/old/test_mir_tagging.py | 44 -
tests/old/test_regex_constants.py | 27 -
tests/old/test_resolve_code_names.py | 44 -
tests/old/test_seek_class.py | 18 -
tests/old/test_task.py | 11 -
tests/old/test_taskanalyzer.py | 320 ---
tests/test_harvest_transformers.py | 6 +
tests/test_inspect.py | 7 +
tests/test_mir_generate_diffusers.py | 4 +-
tests/test_mir_generate_transformers.py | 12 +-
36 files changed, 303 insertions(+), 4089 deletions(-)
delete mode 100644 mir/_deprecated/_automata.py
delete mode 100644 mir/_deprecated/_extras.py
delete mode 100644 mir/_deprecated/_guiders.py
delete mode 100644 mir/_deprecated/_index.py
delete mode 100644 mir/_deprecated/_schedulers.py
create mode 100644 mir/generate/diffusers/package.py
create mode 100644 mir/generate/diffusers/tasks.py
create mode 100644 mir/generate/transformers/package.py
create mode 100644 mir/generate/transformers/tasks.py
delete mode 100644 tests/old/test_class_parent.py
delete mode 100644 tests/old/test_deconstructors_root.py
delete mode 100644 tests/old/test_doc_parser.py
delete mode 100644 tests/old/test_find_docstring_run.py
delete mode 100644 tests/old/test_gather_diffusers_metadata.py
delete mode 100644 tests/old/test_json_io.py
delete mode 100644 tests/old/test_mir_db_create_restore.py
delete mode 100644 tests/old/test_mir_merge.py
delete mode 100644 tests/old/test_mir_search.py
delete mode 100644 tests/old/test_mir_tagging.py
delete mode 100644 tests/old/test_regex_constants.py
delete mode 100644 tests/old/test_resolve_code_names.py
delete mode 100644 tests/old/test_seek_class.py
delete mode 100644 tests/old/test_task.py
delete mode 100644 tests/old/test_taskanalyzer.py
create mode 100644 tests/test_harvest_transformers.py
create mode 100644 tests/test_inspect.py
diff --git a/mir/__init__.py b/mir/__init__.py
index 3405a0f..c43d89b 100644
--- a/mir/__init__.py
+++ b/mir/__init__.py
@@ -22,6 +22,6 @@
# from mir.generate.transformers.harvest import HarvestClasses
# Mir = HarvestClasses().db.db
-from mir.generate.diffusers.harvest import HarvestClasses
+# from mir.generate.diffusers.harvest import HarvestClasses
-Mir = HarvestClasses().db.db
+# Mir = HarvestClasses().db.db
diff --git a/mir/_deprecated/_automata.py b/mir/_deprecated/_automata.py
deleted file mode 100644
index ad5b0c8..0000000
--- a/mir/_deprecated/_automata.py
+++ /dev/null
@@ -1,1996 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-
-"""自動化索引"""
-# regex to remove \[[^\]]*\]
-# (?m)^\s*"[^"]+"(?=\s*:)
-# (?m)^\s*"[^"]+"\s?:
-# modelspec sai https://github.com/Stability-AI/ModelSpec
-
-from importlib import import_module
-import re
-from typing import Dict, List, Tuple, Any
-
-import torch
-
-from mir.indexers import diffusers_index, transformers_index
-from mir.maid import MIRDatabase
-from mir.spec import mir_entry
-from mir.tag import tag_model_from_repo, tag_scheduler, tag_base_model, tag_pipe
-
-
-sd1_series, sd1_comp = tag_model_from_repo("stable-diffusion-v1-5/stable-diffusion-v1-5")
-sdxl_series, sdxl_comp = tag_model_from_repo("stabilityai/stable-diffusion-xl-base-1.0")
-dev_series, dev_comp = tag_model_from_repo("black-forest-labs/FLUX.1-dev")
-schnell_series, schnell_comp = tag_model_from_repo("black-forest-labs/FLUX.1-schnell")
-ssd_series, ssd_comp = tag_model_from_repo("segmind/SSD-1B")
-vega_series, vega_comp = tag_model_from_repo("segmind/Segmind-Vega")
-sd3_series, sd3_comp = tag_model_from_repo("stable-diffusion-3.5-medium") #
-
-
-# def auto_gan etc etc
-# ai-forever/Real-ESRGAN
-
-
-def add_mir_diffusion(mir_db: MIRDatabase):
- """Create MIR entries missing from the database"""
-
- repo = "microsoft/speecht5_hifigan"
- series, comp = tag_model_from_repo(repo)
- mir_db.add(
- mir_entry(
- domain="info",
- arch="gan",
- series=series,
- comp=comp,
- file_256=[
- "d9dc6513c30a5b86c2497712690c04fe74b4aa79fdab6d490b34fcb4e24c590c",
- ],
- layer_b3=[
- "85b5acdf29ad04c63f885383340d8e3445ae0055521f82cabb82bd09cfb9a956",
- ],
- layer_256=[
- "bd52b538e7ac05711be9321cfb7619d4056996ce32923c9c91ee02cf69154770",
- ],
- )
- )
- series, comp = tag_model_from_repo("lodestones/Chroma")
- repo = "lodestones/Chroma1-HD"
- mir_db.add(
- mir_entry(
- domain="info",
- arch="dit",
- series=series,
- comp=tag_model_from_repo(repo)[0],
- repo=repo,
- pkg={
- "0": {
- # "diffusers": "ChromaPipeline",
- "generation": {
- "num_inference_steps": 40,
- # "guidance_scale": 3.0,
- # "num_images_per_prompt": 1,
- },
- }
- },
- file_256=[
- "d845553f11e6afe8139c41ca73678f9f03eab2e68d2e1c6f03ae19509a4d546", # sai
- "1b2993a44e63b2250496f69edce643bac2fb79833cf92ba8dd95cbd764d970c7", # annealed sai
- "2dd46f08516246df1f582047cc09268ce4f747357baff05b13148e71519029fc", # diffusers
- ],
- # layer_b3=[
- # "8da38c3719e77a38a20356c9f92f5ca0101c17406d7a9817323cf67b74088520", # diffusers
- # ],
- # layer_256=[
- # "267798815e0855c2253061c6a6ab70edf9590e8ea1ba9b4621eeb0f6615ee37b",
- # ],
- )
- )
- repo = "lodestones/Chroma1-Flash"
- mir_db.add(
- mir_entry(
- domain="info",
- arch="dit",
- series=series,
- comp=tag_model_from_repo(repo)[0],
- repo=repo,
- pkg={
- "0": {
- "diffusers": "ChromaPipeline",
- "generation": {
- "num_inference_steps": 8,
- "guidance_scale": 1.0,
- "num_images_per_prompt": 1,
- },
- },
- },
- file_256=[
- "2c0c7d908d04418a48b453c293237a9826d54472cf0ba76e28697d1309d1021b", # sai
- "c88f6794753ba23e8f6bf8c84cf220daa35a6aa16d54ea0c3e0136f52e5da7e1", # sai delta
- "c759d67ca3ef50a9a1c242e3291c57f406646f226a95f43f66577996494986db", # diffusers
- ],
- # layer_b3= [""],
- # "layer_256"= [""],
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="unet",
- series=sdxl_series,
- comp="pony-diffusion",
- file_256=["67ab2fd8ec439a89b3fedb15cc65f54336af163c7eb5e4f2acc98f090a29b0b3"],
- layer_b3=["bf4c2154daa4ece7292277b210d081f98759e9ed4d5c889564632e3ccc4a1071"],
- layer_256=["465425d4420dcf5aa4b4d5b456db11a1fcc7c8f61b2e4a87e2470297c98bb96e"],
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="unet",
- series=sdxl_series,
- comp="pony-diffusion-turbo",
- file_256=[
- "7555ac941f3a767833830ba5cc9a4508a9777cbf97b487b6baf0400ab7000587", # turbomerge
- "9322f9d91b28abf09e4137bc02ec806af23510221a164e71b81778e61cc3b4b2", # turbosimple
- ],
- layer_b3=[
- "1e8f23fcd4be0f00eb52368b91c709fffa8a3b8e21772b92b2e0671eed9117d0",
- "5c8b3f34f9d0a58135cf72fbfe9b5d75b5545a10e3d726478543fa7cc510a8bc",
- ],
- layer_256=[
- "7edf51ef09b39c46937a4e4141707c040cd12af0d95299a4d3cd2b7d3fabe035",
- "74e4dbc89d57d61ff7e8af8b0fddcf7466ba233d53ca4ffb7777138991bc3d52",
- ],
- )
- )
- repo = "cagliostrolab/animagine-xl-4.0"
- mir_db.add(
- mir_entry(
- domain="info",
- arch="unet",
- series=sdxl_series,
- comp=tag_model_from_repo(repo)[0],
- repo=repo,
- file_256=[
- "8ece83aa1bed1fb39a2b81f1660f0ce6889218e493c1f2ed55e9f15f59a7e03f", # v4
- "6327eca98bfb6538dd7a4edce22484a1bbc57a8cff6b11d075d40da1afb847ac", # v4 opt
- "1449e5b0b9de87b0f414c5f29cb11ce3b3dc61fa2b320e784c9441720bf7b766", # v3
- "e3c47aedb06418c6c331443cd89f2b3b3b34b7ed2102a3d4c4408a8d35aad6b0", # v3.1
- ],
- layer_b3=[
- "268ffbb120670b9c4b25158bd474c787740884b7738b48203aa03c4c3f00028f",
- "18fda1a55cad137d62c81d4328f5ece85d88b126261e06b9e14ab68055d5d484",
- "bae9bc8a5c43145bcf92ee3391618d9eaddd689f626991bae202de9cf5f1e70e",
- "d6bc5ccafa2b97c867b13a1e7a8c2c7ad9c4877055a66c71bb773557bc306447",
- ],
- layer_256=[
- "c21d1c38813e078817122e12866ab39f5aa7f56945dd4a8beee3cae1e0f139e7",
- "b916c162c981155aaf74e93d5314038af6767bb5a129c51ee05a1fb6a206c6ac",
- "ecc6bfc73824a2d7c3b0ca184854a235859f329c83768f017b07a19a535d17b4",
- "97f6ca05de7fbdae7aacb2427a552f924492176c474a23dd252c192e1c0e9d65",
- ],
- )
- )
- repo = "OnomaAIResearch/Illustrious-XL-v2.0"
- mir_db.add(
- mir_entry(
- domain="info",
- arch="unet",
- series=sdxl_series,
- comp=tag_model_from_repo(repo)[0],
- repo=repo,
- file_256=[
- "c2a1a3eaa13d4c107dc7e00c3fe830cab427aa026362740ea094745b3422a331", # v2
- "536863e9f0c13b0ce834e2f8a19ada425ee4f722c0ad3d0051ec7e6adaa8156c", # 1.1
- "3e15ba00387db678ab4a099f75771c4f5ac67fda9e7100a01d263eaf30145aa9", # 0.1
- "e3d12d0f76d61aa31d2668a2217e5b642592193f2946842c44d7056ea5469cce", # 0.1 guided
- "735cf3fefcbdc4f7817f53247e38b836ffd27c7641af6d8daa21d245242cb4bd", # 1.0
- ],
- layer_b3=[
- "93b061baf21d743d592327a61f027d099d8e18da9808a76c7704ad123eba4a29",
- "dc05fed2acbc73cef4c377cfa2a681c5cf6d065b88d8bf70d371bbcce6a223a8",
- "8eb1c30327e5b71b35b9a4513dc5f2cac9f244667393c0eedb10a26aa9991cd8",
- "3dafbe31f6ebaffa3d054e1b37049e1147faa2474ceb6dab7bc3c4cded0c845e",
- "892533778ee14454938f7b50830093f58e12f1e14560a148f71927e4ccff5f5c",
- ],
- layer_256=[
- "397791b3d77affb7bd35c5ded7377493c6bf456920a41388ba95bd0157109803",
- "b23c02b8519c6777a1f271662f4251a59468c4b3e11184a2d722fa8929b4ea48",
- "a373981494f5508c124a1960bdd096bbc96935fbb54b1218f563206d3892c176",
- "b709df257c40d9d981f686f2880bbe64f43b78805b7213768d659a142a593efd",
- "f1e6b4cab0fce608dca6fa851384e8728202449f16270fbd1f0c4c5ec4946c10",
- ],
- )
- )
- repo = "playgroundai/playground-v2.5-1024px-aesthetic"
- mir_db.add(
- mir_entry(
- domain="info",
- arch="unet",
- series=sdxl_series,
- comp=tag_model_from_repo(repo)[0],
- repo=repo,
- file_256=[
- "11b6d7bce65674659cc6b7ea960658436edfd80e566cb240ebd4bfbc3e2076c8", # 2.5 diffusers
- "bcaa7dd6780974f000b17b5a6c63e6f867a75c51ffa85c67d6b196882c69b992", # 2.5 aes sai fp16
- "956dca99114aaa5c3eb526381309d37ee96737e78ed64c8ae613409f47c3f65a", # 2.5 aes sai
- "933778ce76c1fc0ca918b37e1488411b8a99bbd3279c12f527a3ac995a340864", # 2.5 fp16 diffusers
- "5c7d38880d0940e6795158b7608ccef89217272b1f2a9331c5b0a2adffcd82c4", # v2 sai
- "0411e988479884b1a3ecd184123efe38d051d8d0ef24270585a7d1d57499464a", # v2 sai fp16
- ],
- layer_b3=[
- "d55b22740da2d5b98020ad2390cdc0a7ee08cf9e0d98c11957f16cc20c49815b", # 2.5 diffusers
- "7e9be9bd9a3aed1ad7207e2f77c98c24c3a75f6adcc9b53514033c6c3365d289", # 2.5 aes sai fp16
- "5c6dfcc8d01dfb64723f8f5785caa080e2987859c0a050470bfdbe5312be9efc", # 2.5 aes sai
- "703f775c6e48ed5b0eba6e847414f047bcd4adc677dbc1bf221b3ef05b2ac471", # 2.5 diffusers fp16
- "72d4ebe4af61f8a7add8fe36b8acd16602894279fb5a744ad50b5b5bac7067b8", # v2 sai
- "acb757b851db12cdf9d4365a45ee0d6e64afa77ac95583bb82711baf7c4125fd", # v2 sai fp16
- ],
- layer_256=[
- "adb7be228d4ee6e583c3e5ae4ddb579fef64c3987617ce4d4aff3eb7f8d6a3f7",
- "d4813e9f984aa76cb4ac9bf0972d55442923292d276e97e95cb2f49a57227843", # 2.5 aes sai fp16
- "fe2e9edf7e3923a80e64c2552139d8bae926cc3b028ca4773573a6ba60e67c20",
- "bc7021473a04a6de3fe0d0fed600875d852ad1ad9d47c445278f66ce9e8ec7a0" # 2.5 fp16 diffusers
- "fc94481f0c52b21c5ac1fdade8d9c5b210f7239253f86ef21e6198fe393ed60e", # v2 sai
- "a6f31493ceeb51c88c5239188b9078dc64ba66d3fc5958ad48c119115b06120c", # v2 sai fp16
- ],
- pkg={
- 0: {
- "diffusers": "DiffusionPipeline",
- "precision": "ops.precision.float.F16",
- "generation": {"num_inference_steps": 50, "guidance_scale": 3},
- }
- },
- identifiers=[
- "edm_mean",
- [1, 4, 1, 1],
- 2516,
- ],
- )
- )
- repo = "segmind/Segmind-Vega"
- mir_db.add(
- mir_entry(
- domain="info",
- arch="unet",
- series=sdxl_series,
- comp=tag_model_from_repo(repo)[0],
- repo=repo,
- file_256=[
- "94762e983e5942056be73c5c1d4464b8ffa1ada500b4fef1267550e2447953ce", # modelspec sai
- "1ab33e37fbb2566c55cd729e4ab79cc2f99cd9d0a578fabc7a2cf4ee47968be1", # diffusers
- "8cfa375669b1222d6fecf470f41b2abb370c76a90ab9568964c4bb15b34ec8a2", # diffusers fp16
- ],
- layer_b3=[
- "2f353c5e6ed0a2c05af00d014e18e65f69f1ce8c48f8eefbf8ad71b34f940fbf",
- "cc34bd3135d7cafc3cb6e3f6e7cb6896c98277bad52877a952ddbd2ffe222e01",
- "b90efdc848f5386d5250b6fb233ce380cf6cc299f497cfa1d2feaef22f87c9d1",
- ],
- layer_256=[
- "029b89ee311110c8f945dbdfc52c1d5daeb1e78c353c38aa3141ec68ce28e7cc",
- "5cdb948e5f3873300679073391d48fc648171f02093d7737d078557ff75762bb",
- "f73afbe43cc76571cb86ebcfced618668a2fb2252b0bc6ba88d6e942bae75741",
- ],
- )
- )
- repo = "segmind/SSD-1B"
-
- mir_db.add(
- mir_entry(
- domain="info",
- arch="unet",
- series=sdxl_series,
- comp=tag_model_from_repo(repo)[0],
- repo=repo,
- file_256=[
- "7cb406ec0662e91570a79f3c4fb8f0ea5325bffe6af5d9382edae838698f72bd", # modelspec sai
- "1895a00bfc769a00b0c0c43a95e433e79e9db8a85402b45a33e8448785bde94d", # a1111 aio
- "0bf1ce6b065a6b969ab02dc8e8fa21eb20ee189b10935c49ce68c77a7e432c1c",
- "02ed8ebd0ed55aec686fcf20946d7a1659a31f9f8d9c3798cd254ba6b67434ca", # diffusers
- "40d8ea9159f3e875278dacc7879442d58c45850cf13c62f5e26681061c51829a", # diffusers fp16
- ],
- layer_b3=[
- "c074dc38e8ec836816b91cbcc2ca17f80d6106de8d196d416ef9a27c8837ee45", # modelspec sai
- "1d6c0216da57fe98e7ad29e9653566725f5b2a87845fdbdcda257b3be817b5f4", # a1111 aio
- "c074dc38e8ec836816b91cbcc2ca17f80d6106de8d196d416ef9a27c8837ee45",
- "89f86d9c846495870416b4945b6a46a517f28405e5bab666feb4057f012340be",
- "535b47e9b70da6494878ca6d45af3f2e201b7f17748432911c12232e586855e6",
- ],
- layer_256=[
- "52267d5d327a2ba92c7a14261a9d081df621b8366819b1bb3a47d130523a813c",
- "b365a3631c6c74532f3a571c84c68e088be35496d35be1e932031713ddd2a2f4",
- "52267d5d327a2ba92c7a14261a9d081df621b8366819b1bb3a47d130523a813c",
- "89f86d9c846495870416b4945b6a46a517f28405e5bab666feb4057f012340be",
- "535b47e9b70da6494878ca6d45af3f2e201b7f17748432911c12232e586855e6",
- ],
- )
- )
- repo = "shuttleai/shuttle-3.1-aesthetic"
- mir_db.add(
- mir_entry(
- domain="info",
- arch="dit",
- series=schnell_series,
- comp=tag_model_from_repo(repo)[0],
- repo=repo,
- pkg={
- 2: {
- "diffusers": "DiffusionPipeline",
- "generation": {"guidance_scale": 3.5, "num_inference_steps": 4},
- }
- },
- file_256=[
- "176871da1d5d2d511a52ae9b0dd70faa1f5d1b7734b7e33ed6b4bffa52050e0d",
- "4b80d37681eaed07b7f5b3825a392da929d1620933ede7c2749ef3613cc53f42",
- ],
- layer_b3=[
- "ff422d1734abf33366e87bbf44267dc6096c5d499e695287c35558174877412e",
- "5ad8034eac6b82d842311437101c52b5d35826ce34994940d9e667e702a0d45c",
- ],
- layer_256=[
- "e5d95de314cbfc49b79479118a1ac0b90fc95ccd6bb1a5c95803996d6cebf8fe",
- "d299e8ea4a605917ab98a4a7330d4d398b4ae295efbf458eeeceb5ff1bd7959a",
- ],
- )
- )
- repo = "shuttleai/shuttle-3-diffusion"
- mir_db.add(
- mir_entry(
- domain="info",
- arch="dit",
- series=schnell_series,
- comp=tag_model_from_repo(repo)[0],
- repo=repo,
- pkg={
- 2: {
- "diffusers": "DiffusionPipeline",
- "generation": {"guidance_scale": 3.5, "num_inference_steps": 4},
- }
- },
- file_256=[
- "a5b04df4072698395387c21e8da0176d03f6557e0c38ff1dd3bf469ebab9d0fd", # fp8
- "a91b46de2055b3511ee87523b57862648856e8c00100161d5b520543a7302755", # norm
- "23a77c86189d5934da48bf44bb871cf80ba99177ffd3fd5272cdecb208c8b8be", # mlx q8
- "d3782d5a8f6e82c6676e8e26d54020934ada589d2aceb17fc5ca604b1bd55da8", # mlx q4
- ],
- layer_b3=[
- "4dd3174edf6b680ce9daf3de643e33ae2c4f09a4d5968da61ea48885f3a193c0",
- "9fdf191b2c58b2a6e190396e12314530593dca4f2a2bee389ec5175da5e52af8",
- "ad203ad6a00d8b1315337e34069e7c41016ea407469a536de8ad6807042017fd",
- ],
- layer_256=[
- "14d0e1b573023deb5a4feaddf85ebca10ab2abf3452c433e2e3ae93acb216443",
- "7ce8d449b32a9c959431ade729b513ee7a6457f11e1c13e3ef04dd8db3494621",
- "9c3395f67a3d844483b77f0ddd5e2ea64b61732fa9d9da19845bb8ae574c1f8c",
- ],
- )
- )
- repo = "enhanceaiteam/Mystic"
- mir_db.add(
- mir_entry(
- domain="info",
- arch="dit",
- series=dev_series,
- comp=tag_model_from_repo(repo)[0],
- repo=repo,
- pkg={0: {"generation": {"num_inference_steps": 16, "guidance_scale": 7.5, "width": 768, "height": 1024}}},
- file_256=[
- "179d4000e44295f6dfadc0e4ac210146454724d46371b82657200ff9fb5c68a9", # mlx 0
- "48ca85274e3b67f07f70dd84b67725e62395c2f7b188394342716f783ea4c6ac", # mlx q8
- ],
- layer_b3=[
- "91074aaebe1b5f3b2e7755d3c092af7eb240e92a192360690f1033949d3c8a68", # mlx 0
- ],
- layer_256=[
- "3942e6a52dbb0abaf63b031d9c4eda0df47576b51d4c81361978a3dc27b1309e", # mlx 0
- ],
- )
- )
- repo = "shuttleai/shuttle-jaguar"
- mir_db.add(
- mir_entry(
- domain="info",
- arch="dit",
- series=schnell_series,
- comp=tag_model_from_repo(repo)[0],
- repo=repo,
- pkg={
- 2: {
- "diffusers": "DiffusionPipeline",
- "generation": {"guidance_scale": 3.5, "num_inference_steps": 4},
- }
- },
- file_256=[
- "dcbc4f2470b177eed12c7d7515c0e7342515a849ebd31a50c8d8d43913d7bd32",
- "26a7aa64c0798a3549e1d767932da0a7fb82b49f8edcbdcde804a20d9ed1478f", # mlx q8
- ],
- layer_b3=[
- "9906c29933d0c33a6ee8d9712f33fa8bd4b35b46a1c7b565ae48832b757dd980",
- "89c453c4bf99220405687eed984dace4492bdae1b6fb08f3d9629145b1a11672", # mlx q8
- ],
- sha_256=[
- "4eacf27e5659f5dc42f34c407cbe9e1e202290692df754eb68fe913f59fa2941",
- ],
- )
- )
- repo = "freepik/flux.1-lite-8b"
- mir_db.add(
- mir_entry(
- domain="info",
- arch="dit",
- series=dev_series,
- comp=tag_model_from_repo(repo)[0],
- repo=repo,
- pkg={0: {"generation": {"num_inference_steps": 28}}},
- file_256=[
- "09e970a7b8d1813ea7cacd48f9a944fd223882b137a8f4f3b61d864cdc20bbec", # mlx q8
- "de90e69945c2f4afcb9b6a057ce48190905c984370fce76b16ba3b97d46e2747", # mlx q4
- ],
- layer_b3=[
- "9276fa4805efeb45c08cca32c5b51d490e57a2ce5c15ef476a8e468a509c5cdf",
- ],
- layer_256=[
- "e1afe2f9b1ca55b3c659293cf3237f6b5571f5c4e826bad025ff0f7b54dc34ee",
- ],
- )
- )
- repo = "freepik/f-lite-7b"
- mir_db.add(
- mir_entry(
- domain="info",
- arch="dit",
- series=dev_series,
- comp=tag_model_from_repo(repo)[0],
- repo=repo,
- pkg={0: {"f_lite": "FLitePipeline", "generation": {"num_inference_steps": 28}}},
- )
- )
- repo = "freepik/f-lite-texture"
- mir_db.add(
- mir_entry(
- domain="info",
- arch="dit",
- series=dev_series,
- comp=tag_model_from_repo(repo)[0],
- repo=repo,
- pkg={0: {"f_lite": "FLitePipeline", "generation": {"num_inference_steps": 28}}},
- )
- )
- repo = "freepik/f-lite"
- mir_db.add(
- mir_entry(
- domain="info",
- arch="dit",
- series=dev_series,
- comp=tag_model_from_repo(repo)[0],
- repo=repo,
- pkg={0: {"f_lite": "FLitePipeline", "generation": {"num_inference_steps": 28}}},
- )
- )
- repo = "TencentARC/flux-mini"
- mir_db.add(
- mir_entry(
- domain="info",
- arch="dit",
- series=dev_series,
- comp=tag_model_from_repo(repo)[0],
- repo=repo,
- file_256=["4236455adeaeb4ed444d63b253ec99805022d17e962ed7261ada9c72ce11cfee"],
- layer_b3=["c1a6f83585398fe452d20596a79a522e2986f4c2c01a40e7bfd787af113735d3"],
- layer_256=["e4a0d8cf2034da094518ab058da1d4aea14e00d132c6152a266ec196ffef02d0"],
- ),
- )
- repo = "ostris/Flex.2-preview"
- mir_db.add(
- mir_entry(
- domain="info",
- arch="dit",
- series=dev_series,
- comp=tag_model_from_repo(repo)[0],
- repo=repo,
- file_256=[
- "0407108e446a4f57efffc5e7518bc374876af970d3c6068dc4074de0d221c615", # modelspec sai
- "df168ba94d5f96c478b24604a6beedff6189047152190509c73c162ea0d8ec02", # mlx
- ],
- layer_b3=[
- "7f85cdc186896da6965b57d5edb672f08663075d2b207f0e20e328c4034a8076", # mlx
- ],
- layer_256=[
- "5063de856be5365807d12b47ef6919b4ac611a72651739b2b4050e113bed7a83" # mlx,
- ],
- ),
- )
- repo = "ostris/Flex.1-alpha"
- mir_db.add(
- mir_entry(
- domain="info",
- arch="dit",
- series=dev_series,
- comp=tag_model_from_repo(repo)[0],
- repo=repo,
- file_256=[
- "5d6dce30a266ccbf530c3a3bf253cd5486720a8fb71cdeed556c28304201dc2f", # modelspec sai
- "7acf8771b80a91eaa21566abe8c7d9d3ba33d8688e6e98446827749aee7ca1ee", # mlx
- ],
- layer_b3=[
- "cb3d3edafd81651eefd62894b3572deb02c5304f4b5d4f7ab8654f1fb922ecd6", # mlx
- ],
- layer_256=[
- "a6b9af6efc25fa77cd24046b81ee66fea09a9987d2a8e56ffca9b7a1c9c9c519" # mlx,
- ],
- ),
- )
- repo = "tensorart/stable-diffusion-3.5-medium-turbo"
- mir_db.add(
- mir_entry(
- domain="info",
- arch="dit",
- series=sd3_series,
- comp=tag_model_from_repo(repo)[0],
- repo=repo,
- pkg={
- 0: {
- "precision": "ops.precision.bfloat.B16",
- "generation": {"num_inference_steps": 8, "guidance_scale": 1.5, "height": 1024, "width": 768},
- }
- },
- file_256=[
- "5b0530e8d71b49fa1358f1208047cd789a40bae5b44406c9524b0f0d88f8b246", # diffusers
- "07119c77c3548a1d9eb30923df4dd55ec74914dc5ec81626804dcbe51ce17a5d", # sai
- "3c379381344d2a2b3ee3d7a1bc97f7d1e58fa95c6b5187fb48b3ce446f99f17b", # q4km gguf
- "6b3806cafdb4303ea2638e9e08eb186067b4a46a95ddf344ccdbe56537afaf6e", # q8km gguf
- ],
- layer_b3=[
- "873821614080a98e1ebfe56673bc96c2ac57379720d4ad2f97e4bca317571d48", # diffusers
- "7284d2027523482af9ef47405667ca891cc518bfb6ebf1f1d4666cb0accc8cd5",
- "d938ee5738c73f701760ed18acad274b074d2796123aee3f2eee1328b6c36ea4",
- "c4c40056c2a77959083b5a69a1a4b205caa463ccabde057352c5c4e38b2c67b6",
- ],
- layer_256=[
- "3c324055a1ec6eb4ee0242e344bb2b6356afcbd2e215fdd9d160cda691a72fae",
- "7284d2027523482af9ef47405667ca891cc518bfb6ebf1f1d4666cb0accc8cd5",
- "d938ee5738c73f701760ed18acad274b074d2796123aee3f2eee1328b6c36ea4",
- "c4c40056c2a77959083b5a69a1a4b205caa463ccabde057352c5c4e38b2c67b6",
- ],
- ),
- )
- repo = "Wan-AI/Wan2.1-FLF2V-14B-720P-Diffusers"
- series, comp = tag_model_from_repo(repo)
- mir_db.add(
- mir_entry(
- domain="info",
- arch="dit",
- series=series,
- comp=comp,
- repo=repo,
- file_256=[
- "",
- "",
- ],
- layer_b3=[
- "",
- ],
- layer_256=[""],
- ),
- )
- repo = "OnomaAIResearch/Illustrious-Lumina-v0.03"
- mir_db.add(
- mir_entry(
- domain="info",
- arch="dit",
- series=tag_model_from_repo("Alpha-VLLM/Lumina-Image-2.0")[0],
- comp=tag_model_from_repo(repo)[0],
- repo=repo,
- file_256=[
- "dc6cffcfb0ccfca6332ddb5d2fe25bcb5f496f44b481627f48c42626156fa6a8", # 2b 22100 ema unified fp32
- "2ac549741fa1c6de2d6cd8be06abcdce52d472eeae2439f948e285258b66a214", # 0.03 ema
- ],
- layer_b3=[
- "a97b4a63e1e7678e8e7154fae55252267bd1f0ba76b03dba622d801644e657ac",
- "aa6c1b2d1971cea3c4ed0963c8d68d4c50db683f8eab9f77f60ea2d04ed6ce5c",
- ],
- layer_256=[
- "39086c199b9ac296dcba53461ba1e113906d91fbc1b12556d92f5cc77ca11f9f",
- "e51ba2ded40f1af5ca6f78c46eed8305fbd87cd6401e9d439837e10d35cc5828",
- ],
- )
- )
- mir_db.add(
- mir_entry(
- domain="ops",
- arch="patch",
- series="hidiffusion",
- comp=sdxl_series,
- pkg={
- 0: {
- "hidiffusion": {"apply_hidiffusion": {"timesteps": "StableDiffusionXLTimesteps"}},
- "generation": {"height": 2048, "width": 2048, "eta": 1.0, "guidance_scale": 7.5, "num_inference_steps": 10},
- },
- },
- )
- )
- mir_db.add(
- mir_entry(
- domain="ops",
- arch="scheduler",
- series="align-your-steps",
- comp=sdxl_series,
- pkg={
- 0: {
- "diffusers": "schedulers.scheduling_utils.AysSchedules",
- "generation": {"timesteps": "StableDiffusionXLTimesteps", "num_inference_steps": 10},
- }
- },
- )
- )
- # possible mixed-type architecture?
- # fusion / united / universal
-
-
-def add_mir_llm(mir_db: MIRDatabase):
- base_arch, base_series, base_comp = tag_base_model(repo_path="facebook/chameleon-7b", class_name="ChameleonModel")
- repo = "Alpha-VLLM/Lumina-mGPT-7B-1024"
- series, comp = tag_model_from_repo(repo)
- mir_db.add(
- mir_entry(
- domain="info",
- arch="art",
- series=base_series,
- comp=series,
- repo=repo,
- pkg={
- 0: {
- "inference_solver": {"FlexARInferenceSolver": {"precision": "bf16", "target_size": 768}},
- "generation": {"images": [], "qas": [["q1", None]], "max_gen_len": 8192, "temperature": 1.0},
- },
- 1: {"inference_solver": "ChameleonXLLMXForConditionalGeneration"},
- },
- identifiers=["model.embed_tokens.weight"],
- file_256=[
- "6b71408a7c574d98f00114ab770ac6addc71471770456e482e7b5ec641c02345",
- "1d5d8d5532bae0f32ba35d10d411e506d61e4378dc9fc338f2b1e6af2aa322ec", # 768
- "a8fe636bbee30fef06dcd8e806ffc65b2aed0ad08a07fdc62f35717d0f851be5", # 512 multi
- "6420fa13483576d46263996627ba7add2237a01f46dedd3b7750112c0cc2d95b", # 512
- ],
- layer_b3=["6cd6b3caaea270feb5aff8e9fec205a27da4f48a1e740e63dc9a08f16e70a656"],
- layer_256=["eaa882db6a69cf8ed0104a15b2cdbbb570a23a06ab8c8f65f4c6c21719c6ba25"],
- ),
- )
- repo = "openai/clip-vit-large-patch14"
- series, comp = tag_model_from_repo(repo)
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vit",
- series=series,
- comp=comp,
- repo=repo,
- pkg={0: {"transformers": "CLIPTextModel"}},
- identifiers=["text_model.encoder.layers.0.mlp.fc1.weight", "clip-l"],
- file_256=[
- "cb0cba1ead482a850532ebe5ff6b5c8d4456aee32a5228acf0a31e7d9472415e", # long vit best
- "39e79c916feca4ddf546d9fe923e664714b59ea61074f7228037d17c302f3d17", # vit l detail improved hit gmp
- "893d67a23f4693ed42cdab4cbad7fe3e727cf59609c40da28a46b5470f9ed082", # flux/shuttle 3 aes
- "778d02eb9e707c3fbaae0b67b79ea0d1399b52e624fb634f2f19375ae7c047c3", # playground 2.5
- "660c6f5b1abae9dc498ac2d21e1347d2abdb0cf6c0c0c8576cd796491d9a6cdd", # playground 2.5 fp16
- "71e183d11db0c6b6282a4d9e0abb74125edc8692393e89ed8ee5571005f35cb1", # sd3.5 fp16
- "5c3d6454dd2d23414b56aa1b5858a72487a656937847b6fea8d0606d7a42cdbc", # sdxl diffusers
- "87c1c0b0894c9e9e10b962e597e8d64dd3a3a2d372c389922b335a53c250b2ae", # L
- "bd289dd57fee86bc8816b55919a2b03f9c3c75af6025e21777325a6730872325", # jaguar mlx
- "8377b1ca9d88fe06ec483dd7b3cfc62e5e8dbf8ddd252f455e79d659fa0553c5", # ssd-1b
- "5487ea0eee9c9a9bff8abd097908d4deff3ae1fa87b3b67397f8b9538139d447", # ssd-1b fp16
- "92b998a9a64549bfa05c019bde114be6681549a0c79caee903fe30c9444d08b9", # vega
- "1e090d6a828fd92401be5f83e615fd7b4fb1f4a22e9af9040a38f602e839317c", # vega fp16
- "11807cb2522cfe99240e5ee2bbeb1ccb42cecca2215102ee872567c7773b28b9", # flux
- "d008943c017f0092921106440254dbbe00b6a285f7883ec8ba160c3faad88334", # sd1
- "77795e2023adcf39bc29a884661950380bd093cf0750a966d473d1718dc9ef4e", # sd1 fp16
- "b70c11ad5d7e9abf6109348908f599ea382f8019e1f36910bbc8ebecde936633", # hidream i1
- "fc42badf529dd83f2f7c3d20fe6bda1e22036162f37c4c668b9e130884e20561",
- "e27bafa0b3029ad637ef3ace24ce1efe85b8d0dbd22e03a2e70bda6fc88963a1", # onnx
- ],
- layer_b3=[
- "f58a22a381f79985b6d38782f6110a52c2f319b40fdedd3b88b24945dfcbdf64",
- "8faa00b8fd1dbd9286a7237df18caeb8c91af100a6813849b6bae272a01dd7b7",
- "ab5bebc98299c155251a06deccde599ba0128038ee3ce021e8c59a45f58f72c0",
- "c70e9d86a9dcbbbe7c269ef9dfac96ce9c96c46922577338cc1902e5fe936315",
- "f285e9b7b70745df81adc8b558ec74b536b79b6fc02a453ecc61ea9d13f25f1a",
- "7ab17bfa06ab8d65840997ef641f3f593d096860e20141f1eeb0169d131c1c23",
- "2737d3f327e8176dbb549b9c5c4994821430a6c3b07e3bbc925d97511c802636", # jaguar mlx q8
- "58a826a4a5fe555b4df188a1ebc0d8d9c96cedae3a26ce84c247861dbb93388f", # sd1
- "1540fd8844898960e18ce8fd153e5f21a8c446bd8c4d6f536a7cf11418f02bf3", # sd1
- "c4c9caccdbec12b965d93688c521893f75e0bf9a5e0aad70a6a962b669e7b9d5", # vega
- "e43fae8d5fd1e562607da172369cc0c5ec99b834e42502e682287ff7d12baacc", # vega fp16
- "c6f79f7416a882891957b815fbdfd6edfaa253c43970b1a25ef14e217599c7bc", # flux
- "daf5e09f67ad09a909f58a01298fec0132324634cb8fca2a604c3a240c2c453f", # jaguar mlx q8
- "3f62bfb6bbde05f01435129326166c44aeb113ac0d9f735f31ed3f7dd04f6980", # hidream i1
- "22f866f3c96a92bc61e9965cf366d706db942ad047ba8cb82109edcd4e68fa40", # sd3 turbo
- "f3fa9d7a8f15741621c1fe82f8a1bcc5c601c900d947ac09fba7016615a252a5", # shap-e
- ],
- layer_256=[
- "48daa3d8f939972e69f044533a4312a941971c18c78255f5e555fa26faf664c1",
- "60f5734a74c342be8b0011fc704e718431839790bcfdc7d7004fc39d70f7fec6",
- "6e76e25b4a55dddfa2eecf4b7ab189a8148658a9f6df165c00170f6ce661033c",
- "2d5249df489fec9137cc3a5e9bda499dd9b72a957ddd8e7ad4e99ff3684bad99",
- "3bf085e701713ed3e79775dafea375c3e2a43659ad1ee788b1b393c0aeff9f0e",
- "efb7976800692772e449c81a739339f59394886590ff3f768b0f9ddd87d2a94c",
- "9b0ac8d127c6c457b2eb8c7236f18c4e4ba9e8bbf27130aa8fe854d7c3f7b1e0",
- "24a9ee3d60cdde6c967f08e4b2ec7088fe1bfe308c6896e73caa874860570a5c",
- "5d6d9d0cc7943eb1b8c16862bfd5bee5c3766d0df027ec837e90fac715ac2bd3",
- "68fb122f7d6c3cfbef320341b2af8f5916678e36a69ed36fa8cfcb19e7d5c43d",
- "11807cb2522cfe99240e5ee2bbeb1ccb42cecca2215102ee872567c7773b28b9",
- "50c46cdddbe9f0162278c69b9a1f818519330e3a91b994272e19b5c789670471", # jaguar mlx q8
- "ffe1c4f55e07c2010ace7b9cf35798bb9f431bc954a32784e5acbdc16acc0364", # hidream i1
- "146ea48d234e05a934db9d8988e9a9dd86b2ac70f535eaa550ecb0ee23ec135e", # sd3 turbo
- "d97560cf9704cf71711f6121df2bf55e55a1eda4b574a6ddba074767420bc8c3",
- ],
- )
- )
- repo = "laion/CLIP-ViT-g-14-laion2B-s12B-b42K"
- series, comp = tag_model_from_repo(repo)
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vit",
- series=series,
- comp=comp,
- repo=repo,
- pkg={0: {"transformers": "CLIPTextModelWithProjection"}},
- identifiers=["31.self_attn.k_proj.weight", "text_model.encoder.layers.22.mlp.fc1.weight", "clip-g"],
- file_256=[
- "ca18e0c67c1ef1e64cac22926266765b60688f692307ecc06283d987c5768134", # seaart furry g
- "ec310df2af79c318e24d20511b601a591ca8cd4f1fce1d8dff822a356bcdb1f4", # modelspec sai
- "fa5b2e6f4c2efc2d82e4b8312faec1a5540eabfc6415126c9a05c8436a530ef4", # playground 2.5
- "b84f413eebecbd049b72874c1df533a516510cb5a2489ae58c7e320209cf0ebe", # ssd1b
- "d3df577f6e3799c8e1bd9b40e30133710e02e8e25d0ce48cdcc790e7dfe12d6d", # ssd1b fp16
- "943a2924ee888295a156dd47089d67181d633b782337890af11ef4b15af17ec5", # vega
- "5b98e4a57a9292eeb819d67e2d2100f66f17db723cde4ecea27a7c3741160d0c", # vega fp16
- "4d6effa7a5e600cabf7528ed7234146a13ead1b2c151211d706b293a060b112a", # hidream i1
- "3a6032f63d37ae02bbc74ccd6a27440578cd71701f96532229d0154f55a8d3ff", # modelspec sai
- "162042ac6556e73f93d4172d4c67532c1cbe4dc7a6a8fa7e44dd2e3d7cbb772b", # onnx
- ],
- layer_b3=[
- "d754db276f2d89d2808abb7086b3b8eccee43ac521c128d21a071f3a631474a8",
- "2eb93685b34719e1d1e0541d8902b0a592d95848f80657e32816cf3b152a0f31",
- "e253a5cf3a6242c58037abd6b378bf0281f278e441f28dff7ca1bcfcd3cd6bd8", # ssd1b
- "16d0eec4e55b0aa63cdca4e4d36f78f66a4b1b9605ce3b1089305026f853c3d2", # ssd1b fp16
- "f606463295ecf3bae8920d3d45bb9d180793418b3d08c3e84d4c4135c7dc2aa5", # vega
- "7060993a5eb32d94d1ea8aef7a7301e7be73b199c639c63f8f7cfbfcd2abf10e", # vega fp16
- "b92af95334c657371af6051a91374a41b5455907fa6622bb66a8c112dc511600", # hidream i1
- ],
- layer_256=[
- "270e998633eb22145100a3889a62ca270d5080654735e5ff8dda09a7c233af8d",
- "df18800c2a9d9318c4323d991a0fb24a6a9afceb41bea203812f60517c301536",
- "4c228b104f6b9b383e0808c9baa1998957f5125d8f90a4d98c1a86e71edd72dc", # ssd1b
- "f7fc81d8b5ae91ec28a5106ecc0d067be9a94fd3f394c4aa4686ed131ce5a5b3", # ssd1b fp16
- "61ab42bd5c0fcb9fd3db1d4014cb844ccae8dc17fd69a108cf077a573d092946", # vega
- "6c64e36cdda3bec7067e94b05619f882f5d31070792acaadac60ddbef580453a", # vega fp16
- "43c9e64995b485a7f128771c48defce128640df28e65c7f79537d472f43ebe46", # hidream i1
- ],
- )
- )
- repo = "laion/CLIP-ViT-H-14-laion2B-s32B-b79K"
- series, comp = tag_model_from_repo(repo)
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vit",
- series=series,
- comp=comp,
- repo=repo,
- pkg={
- 0: {"transformers": "CLIPModel"},
- },
- file_256=[
- "036e6e2bd49697511f4f8b8cb5ee465f93025f7a69a145eadeb9a881ace9b18d",
- "0084e75319a50ad85ef45377bad5bc38f2f58824459eb690048d51c9f8863be5", # open clip
- "64a7ef761bfccbadbaa3da77366aac4185a6c58fa5de5f589b42a65bcc21f161", # wan sai
- ],
- layer_b3=[
- "227f26ed63120b9034f4a0c90b6b37eede721a8260f2c1e8f7ea3ccc0d109e7e",
- "3a38ffd1b60499cf2f451f3065079ff26efb9190a86f23ad1c8d993bbeb9af05", # open clip
- "ce06cf1fd684269ee96631b2bf9334c6ecde6a84a55760dfa0d9d2a6411f28e4", # wan sai
- ],
- layer_256=[
- "130a94ed12569e099196a6ca27388181922e20148dee5bcb58c5e309acfc2352",
- "cfdbd3fd2b90b64ba12d395a62dd7c3c3ea3e811f0a54593e91bae6516ca5061", # open clip
- "9125ce5970c649d6f9368c25493d3aaa6b41e224d4cc427e955115f7b7e53d1c", # wan sai
- ],
- )
- )
- repo = "zai-org/chatglm3-6b" # formerly THUDM
- series, comp = tag_model_from_repo(repo)
- mir_db.add(
- mir_entry(
- domain="info",
- arch="aet",
- series=series,
- comp=comp,
- repo=repo,
- pkg={
- 0: {"transformers": "AutoModel"},
- },
- file_256=[
- "0054d03310248928fdabdeef3fdc753170218dc49a1e9eb5f98323e27683f654", # kolors
- "b1052386eac358a18add3d0f92521c85ab338979da8eeb08a6499555b857f80d",
- ],
- layer_b3=[
- "a45dfba6a9fa8739777c76deb845fc9589b40f88670d3ce4661646a7b7b1d481", # kolors
- ],
- layer_256=[
- "174924fd7a07f370bb6fcd1ad07a73eecb7de901f15eefb80f420c1042c47d44", # kolors
- ],
- )
- )
- base_arch, base_series, base_comp = tag_base_model(repo_path="Qwen/Qwen2-7B-beta", class_name="Qwen2Model")
- repo = "ByteDance-Seed/BAGEL-7B-MoT"
- series, comp = tag_model_from_repo(repo)
- mir_db.add(
- mir_entry(
- domain="info",
- arch="art",
- series=base_series,
- comp=series,
- repo=repo,
- pkg={0: {"Bagel": "app"}},
- )
- )
-
-
-def add_mir_audio(mir_db: MIRDatabase):
- """Create MIR audio modality entries"""
- repo = "facebook/audiogen-medium"
- series, comp = tag_model_from_repo(repo)
- mir_db.add(
- mir_entry(
- domain="info",
- arch="art",
- series=series,
- comp=comp,
- repo=repo,
- pkg={
- 0: {
- "audiocraft": "models.AudioGen",
- "generation": {"duration": 5},
- "stage_2": {
- "audiocraft": ".data.audioaudio_write",
- "generation": {"strategy": "loudness", "loudness_compressor": True},
- },
- }
- },
- )
- )
- repo = "parler-tts/parler-tts-tiny-v1"
- series, comp = tag_model_from_repo(repo)
- mir_db.add(
- mir_entry(
- domain="info",
- arch="art",
- series=series,
- comp=comp,
- repo=repo,
- pkg={
- 0: {
- "parler_tts": "ParlerTTSForConditionalGeneration",
- "generation": {"return_tensors": "pt"},
- },
- },
- )
- )
- repo = "Zuellni/snac-24khz-ST"
- series, comp = tag_model_from_repo(repo)
- (
- mir_db.add(
- mir_entry(
- domain="info",
- arch="gan",
- series=series,
- comp=comp,
- repo=repo,
- pkg={
- 0: {
- "snac": "SNAC",
- },
- "1": {
- "mlx_audio": "tts.generate.generate_audio",
- },
- },
- file_256=["e61ae2f638f56ee07a37592cd5a6a9e7d642560ddc78a76ee4a7f96d6922f1be", "973ee1be4032319fd9685ec54eee1b93e79c7bc98c786e67f17c04669714f11d"],
- layer_b3=["18307b00460a64cc4893f9061592ce8d7e15b70fc54065cc8ae0f0155381ec46", "d599b1bb36dee3cee4674b7922fcd69e5ec05b74413f611d21cfdfdf8f9b6119"],
- layer_256=["35ba9aa1feb931010559a178fcac243673d2efdd1396a4b69d406c9853a88300", "5a22c4707ed6c928043f23b59f2d102a579db3a9af41cf6e60d7c3958f182841"],
- )
- ),
- )
- repo = "parler-tts/parler-tts-large-v1"
- series, comp = tag_model_from_repo(repo)
- mir_db.add(
- mir_entry(
- domain="info",
- arch="art",
- series=series,
- comp=comp,
- repo=repo,
- pkg={
- 0: {
- "parler_tts": "ParlerTTSForConditionalGeneration",
- "generation": {"return_tensors": "pt"},
- },
- },
- )
- )
- repo = "hexgrad/Kokoro-82M"
- series, comp = tag_model_from_repo(repo)
- mir_db.add(
- mir_entry(
- domain="info",
- arch="gan",
- series=series,
- comp=comp,
- repo=repo,
- pkg={
- 0: {"kokoro": "KPipeline"},
- 1: {
- "mlx_audio": "tts.generate.generate_audio",
- "generation": {"audio_format": "wav", "join_audio": True, "verbose": False},
- },
- },
- file_256=[
- "5a5cb3d87478f2e74dfca208ee52209ccfce024095e137097fd276026506e45f",
- "496dba118d1a58f5f3db2efc88dbdc216e0483fc89fe6e47ee1f2c53f18ad1e4",
- ],
- layer_b3=[
- "3e9b5017cfe67a7804ac717b18b6add42ffc0bd3353490df2bcc520eaaef79b6",
- "379660a87a64524bab69a267e3d9580f04b5eec4f7e3fbd48c6597d164d9b17d", # safetensors
- "997f154f5a78879ef3ba1a1556977c40b28b9c21076b8f583f752c57ecc36e93" # pytorch
- "2dc3dba29452b85ea85266084a6248f9e0efe642d5f75b43e64f25b9f2837f92",
- ],
- layer_256=[
- "dbedf0e2115aa309b92689f86534be4a77b91d7900365e1717879fbb19b849f6",
- "2c68574571b3f9229e015a909788116ea2251142e29c1bd5c687863192124e8b",
- ],
- )
- )
- repo = "freddyaboulton/silero-vad"
- series, comp = tag_model_from_repo(repo)
- mir_db.add(
- mir_entry(
- domain="info",
- arch="stst",
- series=series,
- comp=comp,
- repo=repo,
- pkg={
- 0: {
- "onnx": "onnx",
- },
- 1: {
- "mlx_audio": "tts.generate.generate_audio",
- "generation": {"audio_format": "wav", "join_audio": True, "verbose": False},
- },
- },
- file_256=["591f853590d11ddde2f2a54f9e7ccecb2533a8af7716330e8adfa6f3849787a9"],
- layer_b3=[
- "41ca5931452b3ffee588c6c7e5bd327c4e914141604eaf3fd05f4a790ac83bb2",
- "7dc736cd5d840182792bde4edfbf5ddc5aeaf16826a9c72d1ba8166c1e3fab9b",
- "6e2c1bdbad74f56663ffb5710c7cb849a2b91ba331d81acdba47a21f69107434", # onnx
- "ab5ff443aece9171af5e7603d0b4309d3ecc934e3940ccedefff10f0b54b931e", # onnx vad
- # "7939427700c3b4d91428a490bde1a6d893f63ee5d79b86f68de9e89c7094d3e7" # onnx # <- clip-g ?? unet? inaccurate test at layer level
- ],
- layer_256=[
- "2ffef1834d5fe14ad8db58fc78d769d5dc38dda5eddbfc396786f74b326215fd",
- # "94ea015f5f7f65b1d8e80f7d52859535e7761d7ed2752e24d57a8d9d9da96672", # onnx lose reliability with layer search apparently
- ],
- ),
- )
- repo = "facebook/wav2vec2-conformer-rope-large-960h-ft"
- series, comp = tag_model_from_repo(repo)
- mir_db.add(
- mir_entry(
- domain="info",
- arch="stst",
- series=series,
- comp=comp,
- repo=repo,
- pkg={
- 0: {
- "transformers": "Wav2Vec2ConformerForCTC",
- },
- },
- file_256=["97bb9761fb71ec1225100bc81ccf7d002e0d0ba3d0604c1fd2dbda7d7d491f1d"],
- layer_b3=["6c9c5642aa8dce62bcb3eb577bc519619a2d868005c767c5e65371c583a8a8eb"],
- layer_256=["1afcfda68307a75caa1a1c4456cf97e20c7914e8aba828006e9fe17e8675a79d"],
- ),
- )
- repo = "canopylabs/orpheus-3b-0.1-ft"
- series, comp = tag_model_from_repo(repo)
- mir_db.add(
- mir_entry(
- domain="info",
- arch="art",
- series=series,
- comp=comp,
- repo=repo,
- pkg={
- 0: {
- "orpheus_tts": "OrpheusModel",
- "generation": {"max_model_len": 2048},
- },
- 1: {
- "mlx_audio": "tts.generate.generate_audio",
- "generation": {"audio_format": "wav", "join_audio": True, "verbose": False},
- },
- },
- )
- )
- repo = "OuteAI/OuteTTS-0.3-1B"
- series, comp = tag_model_from_repo(repo)
- mir_db.add(
- mir_entry(
- domain="info",
- arch="art",
- series=series,
- comp=comp,
- repo=repo,
- pkg={
- 0: {"outetts": "InterfaceHF"},
- 1: {
- "mlx_audio": "tts.generate.generate_audio",
- "generation": {"audio_format": "wav", "join_audio": True, "verbose": False},
- },
- },
- )
- )
-
-
-def add_mir_lora(mir_db: MIRDatabase):
- """Create MIR lora entries"""
- mir_db.add(
- mir_entry(
- domain="info",
- arch="lora",
- series="dmd",
- comp=sdxl_series,
- repo="tianweiy/DMD2",
- pkg={
- 0: {
- "diffusers": {"load_lora_weights": {}},
- "generation": {"num_inference_steps": 4, "guidance_scale": 0, "timesteps": [999, 749, 499, 249]},
- "scheduler": {"ops.scheduler.lcm": ""},
- }
- },
- file_256=[
- "b3d9173815a4b595991c3a7a0e0e63ad821080f314a0b2a3cc31ecd7fcf2cbb8",
- "a374289e9446d7f14d2037c4b3770756b7b52c292142a691377c3c755010a1bb",
- ],
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="lora",
- series="dpo",
- comp=sdxl_series,
- repo="radames/sdxl-DPO-LoRA",
- pkg={
- 0: {
- "diffusers": {"load_lora_weights": {}},
- "generation": {"guidance_scale": 7.5, "num_inference_steps": 4},
- "scheduler": {"ops.scheduler.dpm": {"algorithm_type": "sde-dpmsolver++", "use_karras_sigmas": True, "order": 2}},
- },
- },
- file_256=[
- "666f71a833fc41229ec7e8a264fb7b0fcb8bf47a80e366ae7486c18f38ec9fc0",
- "6b1dcbfb234d7b6000948b5b95ccebc8f903450ce2ba1b50bc3456987c9087ad",
- ],
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="lora",
- series="flash",
- comp=sdxl_series,
- repo="jasperai/flash-sdxl",
- pkg={
- 0: {
- "diffusers": {"load_lora_weights": {}},
- "scheduler": "ops.scheduler.lcm",
- }
- },
- file_256=["afe2ca6e27c4c6087f50ef42772c45d7b0efbc471b76e422492403f9cae724d7"],
- ),
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="lora",
- series="flash",
- comp="pixart-alpha",
- repo="jasperai/flash-pixart",
- pkg={
- 0: {"diffusers": {"load_lora_weights": {}}},
- },
- file_256=["99ef037fe3c1fb6d6bbefdbb85ad60df434fcc0577d34c768d752d60cf69681b"],
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="lora",
- series="flash",
- comp=sd3_series,
- repo="jasperai/flash-sd3",
- pkg={
- 0: {"diffusers": {"load_lora_weights": {}}},
- },
- file_256=["85fce13c36e3739aa42930f745eb9fceb6c53d53fb17e2a687e3234c1a58ee15"],
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="lora",
- series="flash",
- comp=sd1_series,
- repo="jasperai/flash-sd",
- pkg={
- 0: {"diffusers": {"load_lora_weights": {}}, "generation": {"num_inference_steps": 4, "guidance_scale": 0}},
- },
- file_256=["99353444c1a0f40719a1b3037049dbd24800317979a73c312025c05af3574a5f"],
- ),
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="lora",
- series="hyper",
- comp=sdxl_series,
- repo="ByteDance/Hyper-SD",
- pkg={0: {"diffusers": {"load_lora_weights": {"fuse": 1.0}}}},
- file_256={
- "0b97f447b5878323a28fbe7c51ba7acebd21f4d77552ba77b04b11c8911825b6": {"num_inference_steps": 12},
- "55b51334c85061afff5eff7c550b61963c8b8607a5868bbe4f26db49374719b1": {"num_inference_steps": 8},
- "c912df184c5116792d2c604d26c6bc2aa916685f4a793755255cda1c43a3c78a": {"num_inference_steps": 1, "guidance_scale": 0.0},
- "69b25c0187ced301c3603c599c0bc509ac99b8ac34db89a2aecc3d5f77a35187": {"num_inference_steps": 2, "guidance_scale": 0.0},
- "12f81a27d00a751a40d68fd15597091896c5a90f3bd632fb6c475607cbdad76e": {"num_inference_steps": 4, "guidance_scale": 0.0},
- "ca689190e8c46038550384b5675488526cfe5a40d35f82b27acb75c100f417c1": {"num_inference_steps": 8, "guidance_scale": 0.0},
- },
- ),
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="lora",
- series="hyper",
- comp=dev_series,
- repo="ByteDance/Hyper-SD",
- pkg={0: {"diffusers": {"load_lora_weights": {"fuse": 0.125}}}},
- file_256={
- "6461f67dfc1a967ae60344c3b3f350877149ccab758c273cc37f5e8a87b5842e": {"num_inference_steps": 16, "guidance_scale": 0.0},
- "e0ab0fdf569cd01a382f19bd87681f628879dea7ad51fe5a3799b6c18c7b2d03": {"num_inference_steps": 8, "guidance_scale": 0.0},
- },
- ),
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="lora",
- series="hyper",
- comp=sd3_series,
- repo="ByteDance/Hyper-SD",
- pkg={0: {"diffusers": {"load_lora_weights": {"fuse": 0.125}}}},
- file_256={
- "5b4d0b99d58deb811bdbbe521a06f4dbf56a2e9148ff3211c594e0502b656bc9": {"num_inference_steps": 16},
- "0ee4e529abd17b06d4295e3bb91c0d4ddae393afad86b2b43c4f5eeb9e401602": {"num_inference_steps": 4},
- "fc6a3e73e14ed11e21e4820e960d7befcffe7e333850ada9545f239e9aa6027e": {"num_inference_steps": 8},
- },
- ),
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="lora",
- series="hyper",
- comp=sd1_series,
- repo="ByteDance/Hyper-SD",
- pkg={0: {"diffusers": {"load_lora_weights": {}}}},
- file_256={
- "64b98437383537cd968fda6f87a05c33160ece9c79ff4757949a1e212ff78361": {"num_inference_steps": 12},
- "f6123d5b950d5250ab6c33600e27f4dcf71b3099ebf888685e01e9e8117ce482": {"num_inference_steps": 8},
- "a04fd9a535c1e56d38f7590ee72a13fd5ca0409853b4fff021e5a9482cf1ca3b": {"num_inference_steps": 1, "guidance_scale": 0.0},
- "2f26dcc1d883feb07557a552315baae2ca2a04ac08556b08a355a244547e8c3a": {"num_inference_steps": 2, "guidance_scale": 0.0},
- "c5dd058616461ed5053e2b14eec4dbe3fa0eea3b13688642f6d6c80ea2ba5958": {"num_inference_steps": 4, "guidance_scale": 0.0},
- "91fc3186236e956d64dbb4357f2e120c69b968b78af7d2db9884a5ca74d3cd13": {"num_inference_steps": 8, "guidance_scale": 0.0},
- },
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="lora",
- series="lcm",
- comp=sdxl_series,
- repo="latent-consistency/lcm-lora-sdxl",
- pkg={
- 0: {
- "diffusers": {"load_lora_weights": {"fuse": 1.0}},
- "scheduler": {"ops.scheduler.lcm": {"timestep_spacing": "trailing"}},
- "generation": {"num_inference_steps": 8},
- },
- },
- file_256=["a764e6859b6e04047cd761c08ff0cee96413a8e004c9f07707530cd776b19141"],
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="lora",
- series="lcm",
- comp=ssd_series,
- repo="latent-consistency/lcm-lora-ssd-1b",
- pkg={0: {"diffusers": {"load_lora_weights": {}}, "generation": {"num_inference_steps": 8}}},
- file_256=["7adaaa69db6f011058a19fd1d5315fdf19ef79fcd513cdab30e173833fd5c59b"],
- ),
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="lora",
- series="lcm",
- comp=vega_series,
- repo="segmind/Segmind-VegaRT",
- pkg={0: {"diffusers": {"load_lora_weights": {}}, "gen_kwargs": {"num_inference_steps": 8}}},
- file_256=["9b6e8cd833fa205eaeeed391ca623a6f2546e447470bd1c5dcce3fa8d2f26afb"],
- ),
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="lora",
- series="lcm",
- comp=sd1_series,
- repo="latent-consistency/lcm-lora-sdv1-5",
- pkg={0: {"diffusers": {"load_lora_weights": {}}, "generation": {"num_inference_steps": 8}}},
- file_256=["8f90d840e075ff588a58e22c6586e2ae9a6f7922996ee6649a7f01072333afe4"],
- ),
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="lora",
- series="lightning",
- comp=sdxl_series,
- repo="ByteDance/SDXL-Lightning",
- pkg={0: {"diffusers": {"load_lora_weights": {}}, "generation": {"num_inference_steps": 4, "guidance_scale": 0}}},
- ),
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="lora",
- series="pcm",
- comp=sdxl_series,
- repo="wangfuyun/PCM_Weights",
- pkg={0: {"diffusers": {"load_lora_weights": {}}}},
- file_256={
- "0365f6107250a4fed1b83e8ae6a070065e026a2ba54bff65f55a50284232bbe6": {"num_inference_steps": 4, "guidance_scale": 0.0},
- "04ea827435d5750e63d113dc509174b4f6e8a069ff8f91970c3d25299c10b1f8": {"num_inference_steps": 16},
- "7eb353b2abcaabab6251ba4e17d6cbe2e763feb0674b0f950555552212b44621": {"num_inference_steps": 16},
- "a85cf70ac16ed42011630a5cd6b5927722cb7c40a2107eff85e2670f9a38c893": {"num_inference_steps": 4}, # float16
- "9f7f13bb019925eacd89aeff678e4fd831f7b60245b986855dff6634aee4eba9": {"num_inference_steps": 4},
- "3b9c970a3e4c0e182931e71b3f769c1956f16c6b06db98b4d67236790d4d0b1d": {"num_inference_steps": 8},
- "7f04ba8911b4c25ef2c7cbf74abcb6daa3b4f0e4bc6a03896bdae7601f2f180b": {"num_inference_steps": 8},
- "13fb038025ce9dad93b8ee1b67fc81bac8affb59a77b67d408d286e0b0365a1d": {"num_inference_steps": 16, "guidance_scale": 0.0},
- "3442eff271aa3b60a094fd6f9169d03e49e4051044a974f6fcf690507959191f": {"num_inference_steps": 16, "guidance_scale": 0.0},
- "242cbe4695fe3f2e248faa71cf53f2ccbf248a316973e4b2f38ab9e34f35a5ab": {"num_inference_steps": 2, "guidance_scale": 0.0},
- "e1f600491bb8e0cd94f41144321e44fdb2cb346447f31e71f6e53f1c24cccfbf": {"num_inference_steps": 2, "guidance_scale": 0.0},
- "d0bf40a7f280829195563486bec7253f043a06b1f218602b20901c367641023e": {"num_inference_steps": 4, "guidance_scale": 0.0},
- "212150d7953627fb89df99aad579d6763645a1cb2ef26b19fee8b398d5e5ff4d": {"num_inference_steps": 4, "guidance_scale": 0.0},
- "e80fcf46d15f4d3821d3d9611bdb3022a4a8b647b2536833b168d317a91e4f74": {"num_inference_steps": 8, "guidance_scale": 0.0},
- "56ed9dc9f51f4bb0d6172e13b7947f215c347fc0da341c8951b2c12b9507d09e": {"num_inference_steps": 8, "guidance_scale": 0.0},
- },
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="lora",
- series="pcm",
- comp=sd1_series,
- repo="wangfuyun/PCM_Weights",
- pkg={0: {"diffusers": {"load_lora_weights": {}}}},
- file_256={
- "b80b27dd6504f1c3a7637237dda86bc7e26fa5766da30c4fc853c0a1d46bad31": {"num_inference_steps": 4, "guidance_scale": 0.0},
- "8f605ffde3616592deb37ed8c6bacb83fe98963c1fd0883c2a4f93787098aa45": {"num_inference_steps": 16},
- "fa6acb94f11dba3bf4120af5a12e3c88cd2b9572d43ec1a6fb04eede9f32829e": {"num_inference_steps": 4},
- "bff3d4499718b61455b0757b5f8d98fe23e73a768b538c82ecf91c693b69dbcd": {"num_inference_steps": 8},
- "c7ac2fa3df3a5b7080ebe63f259ab13630014f104c93c3c706d77b05cc48506b": {"num_inference_steps": 16, "guidance_scale": 0.0},
- "4c5f27a727d12146de4b1d987cee3343bca89b085d12b03c45297af05ce88ef4": {"num_inference_steps": 2, "guidance_scale": 0.0},
- "29278bc86274fdfc840961e3c250758ff5e2dc4666d940f103e78630d5b879d3": {"num_inference_steps": 4, "guidance_scale": 0.0},
- "41a7f0b966d18f643d16c4401f0b5ef6b9ef7362c20e17128322f17874709107": {"num_inference_steps": 8, "guidance_scale": 0.0},
- },
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="lora",
- series="pcm",
- comp=sd3_series,
- repo="wangfuyun/PCM_Weights",
- pkg={0: {"diffusers": {"load_lora_weights": {}}}},
- file_256={
- "8a45878ecc34e53855fe21146cb6ef32682053b7c4eacc013be89fb08c4c19d8": {"num_inference_steps": 2, "guidance_scale": 1.2},
- "9444a5cead551c56c4d1c455ce829ba9f96f01fbcca31294277e0862a6a15b76": {"num_inference_steps": 4, "guidance_scale": 1.2},
- "e365902c208cbc0456ca5e7c41a490f637c15f3f7b98691cbba21f96a8c960b4": {"num_inference_steps": 4, "guidance_scale": 1.2},
- "3550fa018cd0b60d9e36ac94c31b30f27e402d3855ed63e47668bb181b35a0ad": {"num_inference_steps": 4, "guidance_scale": 1.2},
- },
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="lora",
- series="slam",
- comp=sdxl_series,
- repo="alimama-creative/slam-lora-sdxl",
- pkg={
- 0: {
- "diffusers": {"load_lora_weights": {}},
- "scheduler": {"ops.scheduler.lcm": {"timestep_spacing": "trailing"}},
- "generation": {"num_inference_steps": 4, "guidance_scale": 1},
- }
- },
- file_256=["22569a946b0db645aa3b8eb782c674c8e726a7cc0d655887c21fecf6dfe6ad91"],
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="lora",
- series="slam",
- comp=sd1_series,
- repo="alimama-creative/slam-sd1.5",
- pkg={0: {"diffusers": {"load_lora_weights": {}}}},
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="lora",
- series="spo",
- comp=sdxl_series,
- repo="SPO-Diffusion-Models/SPO-SDXL_4k-p_10ep_LoRA",
- pkg={0: {"diffusers": {"load_lora_weights": {}}, "generation": {"guidance_scale": 5.0}}},
- file_256=["0b9896f30d29daa5eedcfc9e7ad03304df6efc5114508f6ca9c328c0b4f057df"],
- ),
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="lora",
- series="spo",
- comp=sd1_series,
- repo="SPO-Diffusion-Models/SPO-SD-v1-5_4k-p_10ep_LoRA",
- pkg={0: {"diffusers": {"load_lora_weights": {}}, "generation": {"guidance_scale": 7.5}}},
- file_256=["1be130c5be2de0beacadd3bf0bafe3bedd7e7a380729932a1e369fb29efa86f4"],
- ),
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="lora",
- series="tcd",
- comp=sdxl_series,
- repo="h1t/TCD-SDXL-LoRA",
- pkg={
- 0: {
- "diffusers": {"load_lora_weights": {}},
- "generation": {"num_inference_steps": 4, "guidance_scale": 0, "eta": 0.3},
- "scheduler": {"ops.scheduler.tcd": {}},
- }
- },
- file_256=["2c777bc60abf41d3eb0fe405d23d73c280a020eea5adf97a82a141592c33feba"],
- ),
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="lora",
- series="tcd",
- comp=sd1_series,
- repo="h1t/TCD-SD15-LoRA",
- pkg={0: {"diffusers": {"load_lora_weights": {}}}},
- file_256=["eaecb24a1cda4411eab67275b1d991071216ac93693e8fa0c9226c9df0386232"],
- layer_b3=["90158259812a89beb8874216009c799f420334aac49bbf4fa1bf0ebf4bbd256b"],
- layer_256=["e9825b81bca684126ac3cc8867d2ebc655f74268bc26bea4e4b7e58a52ad6c75"],
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="lora",
- series="turbo",
- comp=sdxl_series,
- file_256=["a599c42a9f4f7494c7f410dbc0fd432cf0242720509e9d52fa41aac7a88d1b69"],
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="lora",
- series="turbo",
- comp=dev_series,
- repo="alimama-creative/FLUX.1-Turbo-Alpha",
- pkg={
- 0: {
- "diffusers": {"load_lora_weights": {"fuse": 0.125}},
- "generation": {"guidance_scale": 3.5, "num_inference_steps": 8, "max_sequence_length": 512},
- }
- },
- file_256=["77f7523a5e9c3da6cfc730c6b07461129fa52997ea06168e9ed5312228aa0bff"],
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="lora",
- series="turbo",
- comp=sd3_series,
- repo="tensorart/stable-diffusion-3.5-medium-turbo",
- pkg={0: {"diffusers": {"load_lora_weights": {"fuse": 1.0}}, "scheduler": {"ops.scheduler.flow-match": {"shift": 5}}}},
- file_256={"bdcbdfa3ec8ed838b77b1020eea3bc7917a2d42573688a034feb921fde8b1858": {"num_inference_steps": "4"}},
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="lora",
- series="turbo",
- comp=sd3_series,
- repo="tensorart/stable-diffusion-3.5-large-TurboX",
- pkg={0: {"diffusers": {"load_lora_weights": {"fuse": 1.0}}, "scheduler": {"ops.scheduler.flow-match": {"shift": 5}}}},
- file_256={"fae59d1b749c0d14a8fd4c68cc94eaac92876cee7b91fa75cf8fde3160e09548": {"num_inference_steps": "8"}},
- )
- )
-
-
-def add_mir_vae(mir_db: MIRDatabase):
- """Create MIR VAE missing from the database"""
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="tae",
- comp=sd3_series,
- repo="madebyollin/taesd3",
- pkg={0: {"diffusers": "AutoencoderTiny"}},
- file_256=["6f79c1397cb9ce1dac363722dbe70147aee0ccca75e28338f8482fe515891399"],
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="tae",
- comp=sdxl_series,
- repo="madebyollin/taesdxl",
- pkg={0: {"diffusers": "AutoencoderTiny"}},
- file_256=["ff4824aca94dd6111e0340fa749347fb74101060d9712cb5ef1ca8f1cf17502f"],
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="tae",
- comp=sd1_series,
- repo="madebyollin/taesd",
- pkg={0: {"diffusers": "AutoencoderTiny"}},
- file_256=["db169d69145ec4ff064e49d99c95fa05d3eb04ee453de35824a6d0f325513549"],
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="tae",
- comp=dev_series,
- repo="madebyollin/taef1",
- pkg={0: {"diffusers": "AutoencoderTiny"}},
- file_256=["927f7de7f11bbd3b2d5ce402e608d97a7649e0921a9601995b044e8efc81e449"],
- )
- )
- series, comp = tag_model_from_repo("Qwen/Qwen-Image")
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="kl",
- comp=series,
- # no repo here, may conflict
- pkg={
- 0: {"diffusers": "AutoencoderKLQwenImage"},
- },
- file_256=[
- "0c8bc8b758c649abef9ea407b95408389a3b2f610d0d10fcb054fe171d0a8344", # diffusers
- ],
- layer_b3=[
- "64af8fb08d2054c81ad2aef94965be8fb1366fcc6136cb9222ae046550af014b", # diffusers
- ],
- layer_256=[
- "42f255440ef1d379a8a731456bc44312a73a8568716caa6100803990cd5ea7dc", # diffusers
- ],
- )
- )
- series, comp = tag_model_from_repo("Wan-AI/Wan2.1-I2V-14B-480P-Diffusers")
- sr_series_text2v, _ = tag_model_from_repo("Skywork/SkyReels-V2-T2V-14B-720P-Diffusers")
- sr_series_image2v, _ = tag_model_from_repo("Skywork/SkyReels-V2-I2V-14B-720P-Diffusers")
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="wan",
- comp=series,
- # no repo here, may conflict
- pkg={
- 0: {
- "diffusers": "AutoencoderKLWan",
- "precision": "ops.precision.float.F32",
- }
- },
- file_256=[
- "d6e524b3fffede1787a74e81b30976dce5400c4439ba64222168e607ed19e793", # diffusers
- "2fc39d31359a4b0a64f55876d8ff7fa8d780956ae2cb13463b0223e15148976b", # sai
- ],
- layer_b3=[
- "f867543d636029ebfc05b8075e572be0b313a83b0470e56bcf4bbad07a6db010", # diffusers
- "6b5b229727a2d4e37993687c62c94ff8519a371ab4103c699ff1f5969ca0b433", # sai
- ],
- layer_256=[
- "121b3974b39263dcca9d644d1b5c9b9251a911b6a8a8e307fcb21ca778e78ed2",
- "364be43a8959012d798d3f98e17d8b5c4b99ba1e70077008dd19acca3ced395e",
- ],
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="wan",
- comp=sr_series_text2v,
- # no repo here, may conflict
- file_256=[],
- layer_b3=[],
- layer_256=[],
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="wan",
- comp=sr_series_image2v,
- # no repo here, may conflict
- file_256=[],
- layer_b3=[],
- layer_256=[],
- )
- )
- series, comp = tag_model_from_repo("Lightricks/LTX-Video")
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="kl",
- comp=series,
- # no repo here, may conflict
- pkg={
- 0: {"diffusers": "AutoencoderKLLTXVideo"},
- },
- file_256=[],
- layer_b3=[],
- layer_256=[],
- )
- )
- series, comp = tag_model_from_repo("rhymes-ai/Allegro")
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="kl",
- comp=series,
- # no repo here, may conflict
- pkg={
- 0: {"diffusers": "AutoencoderKLAllegro"},
- },
- file_256=[],
- layer_b3=[],
- layer_256=[],
- )
- )
- series, comp = tag_model_from_repo("zai-org/CogVideoX-5b-I2V")
- series_fun, _ = tag_model_from_repo("alibaba-pai/CogVideoX-Fun-V1.1-5b-Pose")
- series_wish, _ = tag_model_from_repo("BestWishYsh/ConsisID-preview")
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="cogvideox",
- comp=series,
- # no repo here, may conflict
- pkg={
- 0: {"diffusers": "AutoencoderKLCogVideoX"},
- },
- file_256=["a410e48d988c8224cef392b68db0654485cfd41f345f4a3a81d3e6b765bb995e"],
- layer_b3=["246addb8dc798240638bffee4546a3c5c83572139b4a2a602d68b4c4146226eb"],
- layer_256=["43c7e9cb4364e55fd563817f01484ede8a09ff19a8e69eb61a32a12f93d6f66e"],
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="cogvideox",
- comp=series_fun,
- # no repo here, may conflict
- file_256=[],
- layer_b3=[],
- layer_256=[],
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="cogvideox",
- comp=series_wish,
- # no repo here, may conflict
- file_256=[],
- layer_b3=[],
- layer_256=[],
- )
- )
- series, comp = tag_model_from_repo("nvidia/Cosmos-1.0-Diffusion-7B-Video2World")
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="kl",
- comp=series,
- # no repo here, may conflict
- pkg={
- 0: {"diffusers": "AutoencoderKLCosmos"},
- },
- file_256=[],
- layer_b3=[],
- layer_256=[],
- )
- )
- series, comp = tag_model_from_repo("alibaba-pai/EasyAnimateV5.1-7b-zh-diffusers")
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="kl",
- comp=series,
- # no repo here, may conflict
- pkg={
- 0: {"diffusers": "AutoencoderKLMagvit"},
- },
- file_256=[],
- layer_b3=[],
- layer_256=[],
- )
- )
- series, comp = tag_model_from_repo("hunyuanvideo-community/HunyuanVideo-I2V")
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="kl",
- comp=series,
- # no repo here, may conflict
- pkg={
- 0: {"diffusers": "AutoencoderKLHunyuanVideo"},
- },
- file_256=[
- "95d1fc707c1421ccd88ea542838ab4c5d45a5babb48205bac9ce0985525f9818", # pt,
- "7c68a6295f9034a88225fbafb1f3258291a08d57a1fdb938233fa57b1b8f4883",
- "fbe5ea338431bc8ba20f7019b474e83379fe5763abfd562adcc04b1c0d35c728",
- "019973c147e0c3462629d8d06bdbdbb83408f3ebd4ea4b4ae21a99c3cdcb54c0",
- ],
- # layer_b3=[],
- # layer_256=[],
- )
- )
- series, comp = tag_model_from_repo("genmo/mochi-1-preview")
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="kl",
- comp=series,
- # no repo here, may conflict
- pkg={
- 0: {"diffusers": "AutoencoderKLMochi"},
- },
- file_256=[],
- layer_b3=[],
- layer_256=[],
- )
- )
- series, comp = tag_model_from_repo("rhymes-ai/Allegro")
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="kl",
- comp=series,
- # no repo here, may conflict
- pkg={
- 0: {
- "diffusers": "AutoencoderKLAllegro",
- },
- },
- file_256=["47871a698b18f92f15019d361a81cbc8af4676f8eef9a47fd2b95354a39f831a"],
- layer_b3=["93654cbab7541504d2377c66e72943c7fd9947fca2eb1be01bcc8877c322c1e0"],
- layer_256=["bfd496586118165a13243997101fc7cdd4f855b2d8a73ee2b771a4484c4c2f9f"],
- )
- )
- series, comp = tag_model_from_repo("cvssp/audioldm-s-full-v2")
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="kl",
- comp=series,
- # no repo here, may conflict
- pkg={
- 0: {
- "diffusers": "AutoencoderKL",
- },
- },
- file_256=["42f64f7565b23eabde68c9694e39f18b8bba5f7a14f477e7ed4b51e0ea7de8a5"],
- layer_b3=["00959677dae940b9cfdbe5380c8cbb5a6b4951864cd26f8211d74a3d22b4f3de"],
- layer_256=["54d075953d5253a3abac651de070736c1d5510b857a8ab24c624304f428146b6"],
- )
- )
-
- series, comp = tag_model_from_repo("Efficient-Large-Model/Sana_1600M_1024px_BF16_diffusers")
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="dc",
- comp=series,
- # no repo here, may conflict
- pkg={
- 0: {"diffusers": "AutoencoderDC"},
- },
- file_256=["15a4b09e56d95b768a0ec9da50b702e21d920333fc9b3480d66bb5c7fad9d87f"],
- layer_b3=["cf4ecc6697d18b0663e4eac58203f1dd6d9fb689cf99adfeadbc0019de0c73d0"],
- layer_256=["abfc39d1a6d71f03dde7bc40fec4a90478a97d17ae1688be9aad00e0512b9bde"],
- )
- )
- series, comp = tag_model_from_repo("stabilityai/stable-audio-open-1.0")
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="oobleck",
- comp=series,
- # no repo here, may conflict
- pkg={
- 0: {"diffusers": "AutoencoderOobleck"},
- },
- # file_256=[],
- # layer_b3=[],
- # layer_256=[],
- )
- )
- series, comp = tag_model_from_repo("stable-video-diffusion-img2vid-xt")
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="kl",
- comp=series,
- # no repo here, may conflict
- pkg={
- 0: {"diffusers": "AutoencoderKLTemporalDecoder"},
- },
- # file_256=[],
- # layer_b3=[],
- # layer_256=[],
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="kl",
- comp=sdxl_series,
- repo="madebyollin/sdxl-vae-fp16-fix",
- pkg={
- 0: {"diffusers": "AutoencoderKL"},
- },
- file_256=[
- "235745af8d86bf4a4c1b5b4f529868b37019a10f7c0b2e79ad0abca3a22bc6e1", # modelspec sai
- "1b909373b28f2137098b0fd9dbc6f97f8410854f31f84ddc9fa04b077b0ace2c", # diffusers
- "78f6189c8492013e3cac81637a1f657f790a237387f8a9dfd6bfa5fee28eb646", # ssd1b diffusers
- "6353737672c94b96174cb590f711eac6edf2fcce5b6e91aa9d73c5adc589ee48", # ssd1b diffusers fp16
- "bcb60880a46b63dea58e9bc591abe15f8350bde47b405f9c38f4be70c6161e68", # kolors fp16
- "1598f3d24932bcfe6634e8b618ea1e30ab1d57f5aad13a6d2de446d2199f2341", # vega / lumina next sft d / auraflow
- "703abdcd7c389316b5128faa9b750a530ea1680b453170b27afebac5e4db30c4", # pixart a
- "98a14dc6fe8d71c83576f135a87c61a16561c9c080abba418d2cc976ee034f88", # hyd 1.1
- ],
- layer_b3=[
- "bd5b356b509814025a9cf692710b87116d4fcd0e30a8232ed1db133e908d0e74", # modelspec sai
- "9106380403dee83238af63ff1738396d2fdff9f6d78d0d9c1d0bf770ae4294d0", # diffusers
- # "245070a60a25ca080cb4951220c3fb1503da43829930d5f6f7a6770b491eafe1",
- # "50e65a628b5fe379798d8956e4a4e1d4b105c84b329f088d577f7f28c22abc49", # diffusers fp16 matches sd1
- ],
- layer_256=[
- "c9399a4cd39a180a0bb2af96a8297b9330541e090c21e83317cebb2f7cc651da", # modelspec sai
- "2240ae134a3b983abf45200c198f07e3d8068012fbbd2f658bbaa1fd6a0629c0", # diffusers
- # "35641f65ad7ea600cb931dcab556f7503279f1d8d99eda170fe7976d48502a2a", # diffusers fp16 matches sd1 (incorrect)
- ],
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="kl",
- comp=sdxl_series + sdxl_comp,
- pkg={
- 0: {"diffusers": "AutoencoderKL"},
- },
- file_256=[
- "235745af8d86bf4a4c1b5b4f529868b37019a10f7c0b2e79ad0abca3a22bc6e1", # modelspec sai
- "27ed3b02e09638568e99d4398c67bc654dde04e6c0db61fb2d21dba630e7058a", # diffusers
- "eb6516ab7e1104d5d1a174a4d65c57835ae38061531d0a2192103aecfb790cc1", # diffusers fp16
- "e6bb9ea85bbf7bf6478a7c6d18b71246f22e95d41bcdd80ed40aa212c33cfeff", # modelspec sai vae 0.9
- ],
- layer_b3=[
- "bd5b356b509814025a9cf692710b87116d4fcd0e30a8232ed1db133e908d0e74", # modelspec sai
- # "9106380403dee83238af63ff1738396d2fdff9f6d78d0d9c1d0bf770ae4294d0", # diffusers
- # "245070a60a25ca080cb4951220c3fb1503da43829930d5f6f7a6770b491eafe1",
- # "50e65a628b5fe379798d8956e4a4e1d4b105c84b329f088d577f7f28c22abc49", # diffusers fp16 matches sd1
- ],
- layer_256=[
- "c9399a4cd39a180a0bb2af96a8297b9330541e090c21e83317cebb2f7cc651da", # modelspec sai
- "2240ae134a3b983abf45200c198f07e3d8068012fbbd2f658bbaa1fd6a0629c0", # diffusers
- # "35641f65ad7ea600cb931dcab556f7503279f1d8d99eda170fe7976d48502a2a", # diffusers fp16 matches sd1 (incorrect)
- ],
- )
- )
-
- repo = "shuttleai/shuttle-jaguar"
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="kl",
- comp=tag_model_from_repo(repo)[0],
- # no repo here, may conflict
- pkg={
- 0: {"diffusers": "AutoencoderKL"},
- },
- file_256=[
- "6fdfa2add4f04d94f36157cbb0197f97966b612e3f8eff4095315aefea74b904",
- ], # q8,
- layer_b3=[
- "0ebf9b7010accc44e219e355dd24bf1e3128004093c0c1dfc06f88c0a39fdbdd",
- "d0e7ef3c4af06fa08b4c0485a073e2df55f7b1e9e3ba8f7b261688bc562568f0", # q8
- ],
- layer_256=[
- "9b28f36873ea283905094a64e1ccb7cfc2b0f0aa166201d0ca63807ac37caa7b", # q8
- ],
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="kl",
- comp=dev_series,
- # no repo here, may conflict
- pkg={
- 0: {"diffusers": "AutoencoderKL"},
- },
- file_256=[
- "afc8e28272cd15db3919bacdb6918ce9c1ed22e96cb12c4d5ed0fba823529e38", # dev
- "f5b59a26851551b67ae1fe58d32e76486e1e812def4696a4bea97f16604d40a3", # dev diffusers
- "8c717328c8ad41faab2ccfd52ae17332505c6833cf176aad56e7b58f2c4d4c94", # lumina2
- "8f53304a79335b55e13ec50f63e5157fee4deb2f30d5fae0654e2b2653c109dc", # sd3 turbo
- ],
- layer_b3=[
- "b6db93ed78c4a10d69e80831c1b8fbc1447f04e9b3d494889ee2056b98d41f17", # diffusers
- "a8a3ebdec4d7b38d65b7169d3604c19b587330e5e66f69ebf0ded56a24ec6903", # lumina2
- # "245070a60a25ca080cb4951220c3fb1503da43829930d5f6f7a6770b491eafe1",
- ],
- layer_256=[
- "7950e4f3897c75affaa5f9f3c51c88b4d9a27bfd9b05ad41c3f71d8c1c620b89",
- "79d2bfe93a2ac037cdc59ccb5576e32d00d75d4741fba49fc7e82b9724928216", # diffusers
- "8f084dc91fd5b481875bc9c86a4ef05e5f176896b7d31c6a5c2ce45c2e174004", # dev diffusers
- "322e01bd511e20bc2a3c27cd611f81ed85f0046b7c023b5622c2c9a5b8b34f80", # lumina2
- ],
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="eq",
- comp=sdxl_series,
- repo="KBlueLeaf/EQ-SDXL-VAE",
- pkg={
- 0: {"diffusers": "AutoencoderKL"},
- },
- )
- )
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="ms-lc-eq",
- comp=sdxl_series,
- repo="Anzhc/MS-LC-EQ-D-VR_VAE",
- pkg={
- 0: {
- "diffusers": "AutoencoderKL",
- },
- },
- )
- )
- repo = "ucsd-reach/musicldm"
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="kl",
- comp=tag_model_from_repo(repo)[0],
- # no repo here, may conflict
- file_256=[
- "16e0c6c7c34e459c19500cc15cf538e6331db14969ea15917caa9b0966e44fd4",
- ], # q8,
- layer_b3=[
- "c5c32b3fb3e73799838836ccce27d883254254daecd10f86ba8ddc55214014e0",
- ],
- layer_256=[
- "1610c0ce39d1379091eb9ab2a4d14a8567e0f1a5dc6cca40fc0fa6f8e4e97c0f",
- ],
- )
- )
-
- mir_db.add(
- mir_entry(
- domain="info",
- arch="vae",
- series="kl",
- comp=sd1_series,
- pkg={
- 0: {"diffusers": "AutoencoderKL"},
- },
- file_256=[
- "0b204ad0cae549e0a7e298d803d57e36363760dec71c63109c1da3e1147ec520", # ckpt ema original ema pruned
- "95f26a5ab04779d5467d1fcecaf93160ffa523afe399b835b3e1bb77ff2d937a", # safetensors ema original ema pruned
- "32db726da04f06c1b6b14c0043ce115cc87a501482945c5add89a40d838fcb46", # safetensors ema diffusers
- "c6a580b13a5bc05a5e16e4dbb80608ff2ec251a162311590c1f34c013d7f3dab", # ckpt mse original ema pruned
- "735e4c3a447a3255760d7f86845f09f937809baa529c17370d83e4c3758f3c75", # safetensors mse original ema pruned
- "a1d993488569e928462932c8c38a0760b874d166399b14414135bd9c42df5815", # safetensors mse diffusers
- "a2b5134f4dbc140d9c11f11cba3233099e00af40f262f136c691fb7d38d2194c", # safetensors diffusers
- "4fbcf0ebe55a0984f5a5e00d8c4521d52359af7229bb4d81890039d2aa16dd7c", # safetensors fp16 diffusers
- ],
- layer_b3=[
- "82e2dc440a23d78bb91df8c9fce069a8512da51f8f54ea29e3431f545808171e", # safetensors original
- "2230487833925a104bee96e7ecfebaa4c3c43cc426c7a5b863f2584313dd4833", # safetensors diffusers
- ],
- layer_256=[
- "e43f3a227b5ecb43a6272fa92ed6011d2e9abcadadd1032dfa7ea7f875f9d5bd", # safetensors original
- "2494154245becf98891be884f943276aa3f54e9b3f0ea1042903fc15fba488f3", # safetensors diffusers
- ],
- )
- )
diff --git a/mir/_deprecated/_extras.py b/mir/_deprecated/_extras.py
deleted file mode 100644
index 39af779..0000000
--- a/mir/_deprecated/_extras.py
+++ /dev/null
@@ -1,242 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-
-from typing import Callable, Dict, List, Optional, Union
-
-from mir import NFO
-from mir.generate.from_module import import_object_named, show_path_for
-from mir.generate.tasks import TaskAnalyzer
-
-
-def _class_parent(code_name: str, pkg_name: str) -> Optional[List[str]]:
- """Retrieve the folder path within a class. Only returns if it is a valid path in the system\n
- ### NOTE: in most cases `__module__` makes this redundant
- :param code_name: The internal name for the model in the third-party API.
- :param pkg_name: The API Package
- :return: A list corresponding to the path of the model, or None if not found
- :raises KeyError: for invalid pkg_name
- """
- import os
- from importlib import import_module
-
- pkg_paths = {
- "diffusers": "pipelines",
- "transformers": "models",
- }
- folder_name = code_name.replace("-", "_")
- pkg_name = pkg_name.lower()
- folder_path = pkg_paths[pkg_name]
- package_obj = import_module(pkg_name)
- folder_path_named = [folder_path, folder_name]
- pkg_folder = os.path.dirname(getattr(package_obj, "__file__"))
- # dbuq(os.path.exists(os.path.join(pkg_folder, *folder_path_named)))
- if os.path.exists(os.path.join(pkg_folder, *folder_path_named)) is True:
- import_path = [pkg_name]
- import_path.extend(folder_path_named)
- return import_path
-
-
-def _trace_classes(pipe_class: str, pkg_name: str) -> Dict[str, List[str]]:
- """Retrieve all compatible pipe forms\n
- NOTE: Mainly for Diffusers
- :param pipe_class: Origin pipe
- :param pkg_name: Dependency package
- :return: A dictionary of pipelines"""
-
- related_pipes = []
- code_name = show_path_for(pipe_class, pkg_name)
- if pkg_name == "diffusers":
- related_pipe_class_name = pipe_class
- else:
- related_pipe_class_name = None
- related_pipes: list[str] = TaskAnalyzer.show_diffusers_tasks(code_name=code_name, class_name=related_pipe_class_name)
- # for i in range(len(auto_tasks)):
- # auto_tasks.setdefault(i, revealed_tasks[i])
- parent_folder = class_parent(code_name, pkg_name)
- if pkg_name == "diffusers":
- pkg_folder = import_object_named(parent_folder[0], ".".join(parent_folder))
- else:
- pkg_folder = import_object_named("__init__", ".".join(parent_folder[:-1]))
- if hasattr(pkg_folder, "_import_structure"):
- related_pipes.extend(next(iter(x)) for x in pkg_folder._import_structure.values())
- related_pipes = set(related_pipes)
- related_pipes.update(tuple(x) for x in _extract_inherited_classes(model_class=pipe_class, pkg_name=pkg_name))
- return related_pipes
-
-
-def _show_shared_hyperparameters(parameter_filter: Optional[str] = None) -> List[str]:
- """Show all config classes in the Transformer package with the specified init annotation\n
- :param from_match: Narrow the classes to only those with an exact key inside
- :return: A list of all Classes"""
- from mir.config.constants import extract_init_parameters
- from mir.inspect.metadata import find_transformers_classes
-
- transformers_data = find_transformers_classes()
- config_data = []
- for entry in transformers_data:
- if parameter_filter:
- segments = extract_init_parameters(module=entry.config, package_name="transformers")
- if parameter_filter in list(segments):
- config_data.append(entry.config)
- else:
- config_data.append(entry.config)
- return config_data
-
-
-def _get_class_parent_folder(class_name: str, pkg_name: str) -> List[str]:
- """Retrieve the folder path within a class. Only returns if it is a valid path in the system (formerly seek_class_path)\n
- ### NOTE: in most cases `__module__` makes this redundant
- :param class_name: The internal name for the model in the third-party API.
- :param pkg_name: The API Package
- :return: A list corresponding to the path of the model, or None if not found
- :raises KeyError: for invalid pkg_name
- """
- from mir.config.console import dbuq
- from mir.config.constants import extract_init_parameters
- from mir.inspect.classes import resolve_code_names
-
- pkg_name = pkg_name.lower()
- if pkg_name == "diffusers":
- parent_folder: List[str] = resolve_code_names(class_name=class_name, pkg_name=pkg_name, path_format=True)
- if not parent_folder or not parent_folder[-1].strip():
- dbuq("Data not found for", " class_name = {class_name},pkg_name = {pkg_name},{parent_folder} = parent_folder")
- return None
- elif pkg_name == "transformers":
- print(class_name)
- module_path = extract_init_parameters(class_name, "transformers")
- print(module_path)
- config = str(module_path.get("config"))
- print(config)
- config = config.split(": ")[-1].split(".")
- parent_folder = config[:3]
- return parent_folder
-
-
-def _class_to_mir_tag(mir_db: Dict[str, str], code_name: str) -> Optional[str]:
- """Converts a class identifier to its corresponding MIR tag.\n
- :param mir_db: A dictionary mapping series-compatibility pairs to their respective data.
- :param code_name: The Transformers class identifier to convert.
- :return: An optional list containing the series and compatibility if found, otherwise None."""
-
- from transformers.models.auto.modeling_auto import MODEL_MAPPING_NAMES
-
- from mir.config.constants import TEMPLATE
-
- template_data = TEMPLATE["arch"]["transformer"]
-
- for series, compatibility_data in mir_db.database.items():
- if any([template for template in template_data if template in series.split(".")[1]]):
- for compatibility, field_data in compatibility_data.items():
- if code_name == series.split(".")[2]:
- return [series, compatibility]
-
- class_name = MODEL_MAPPING_NAMES.get(code_name, False)
- if not class_name: # second pass without separators
- recoded_mapping = {code.replace("-", "").replace("_", ""): model for code, model in MODEL_MAPPING_NAMES.items()}
- class_name = recoded_mapping.get(code_name, False)
- if not class_name:
- return None
- pkg_data = field_data.get("pkg")
- if pkg_data:
- for _, pkg_type_data in pkg_data.items():
- maybe_class = pkg_type_data.get("transformers")
- if maybe_class == class_name:
- return [series, compatibility]
- return None
-
-
-def tag_transformers_model(repo_path: str, class_name: str, addendum: dict | None = None) -> tuple[str, str, str | dict[str, dict]]:
- """Convert model repo paths to MIR tags, classifying by feature\n
- :param name: Repo path
- :param class_name: The HF transformers class for the model
- :return: A segmented MIR tag useful for appending index entries"""
-
- from mir.config.constants import extract_init_parameters
-
- annotations = extract_init_parameters(class_name.replace("Model", "Config"), "transformers")
- if not annotations:
- class_name = class_name.replace("Config", "Model")
- annotations = extract_init_parameters(class_name, "transformers")
- if not annotations:
- raise TypeError("No mode type returned")
- if "Bert" in class_name:
- print(annotations)
- mir_prefix = mir_prefix_from_forward_pass(True, **annotations)
- base_series, base_comp = tag_model_from_repo(repo_path)
- if not addendum:
- return mir_prefix, base_series, base_comp
- else:
- mir_prefix = f"info.{mir_prefix}"
- return mir_prefix, base_series, {base_comp: addendum}
-
-
-# def extract_model_data(self,pipe_name, file_name: str) -> dict | None:
-# migrated_pipes = MIGRATIONS["migrated_pipes"]
-# pkg_path = f"diffusers.pipelines.{pipe_name}.{file_name}"
-# pipe_file: Callable = import_object_named(file_name, pkg_path) or import_module(pkg_path)
-# if pipe_file and (doc_string := getattr(pipe_file, "EXAMPLE_DOC_STRING", None)): #where pipe class and repo are
-# docstrings= DocStringEntry(package_name=pipe_name, file_name=file_name, pipe_module=pipe_file, doc_string=doc_string)
-# DocStringParser(doc_string=docstrings.doc_string)
-# self.parsed_docs.pipe_repo = migrated_pipes.get(self.parsed_docs.pipe_class, self.parsed_docs.pipe_repo)
-# model = import_object_named(parsed_data.pipe_class, docstrings.pipe_module.__name__)
-# model_data = show_init_fields_for(model,"diffusers")
-# return {"model_params": model_data}
-
-
-# for pipe_name in IMPORT_STRUCTURE.keys():
-# if pipe_name not in exclusion_list and (import_name := getattr(diffusers_pipelines, str(pipe_name))):
-# file_specific = uncommon_naming.get(pipe_name, pipe_name)
-# file_names:list[str] = [getattr(import_name, "_import_structure", {})] or [f"pipeline_{file_specific}"]
-# for file_name in file_names:
-# if not file_name in exclusion_list or not (model_data := self.extract_model_data(pipe_name, file_name)):
-# continue
-# if not (prepared_data := PrepareData( **model_data)):
-# continue
-# else:
-# continue
-
-
-# def show_path_for(code_name: str, pkg_name: str) -> list[str] | str | None:
-# """Retrieve the folder path within a class. Only returns if it is a valid path in the system\n
-# ### NOTE: in most cases `__module__` makes this redundant
-# :param code_name: The internal name for the model in the third-party API.
-# :param pkg_name: The API Package
-# :return: A list corresponding to the path of the model, or None if not found
-# :raises KeyError: for invalid pkg_name
-# """
-
-# pkg_paths = {
-# "diffusers": "pipelines",
-# "transformers": "models",
-# }
-# folder_name = code_name.replace("-", "_")
-# pkg_name = pkg_name.lower()
-# folder_path = pkg_paths[pkg_name]
-# package_obj = import_module(pkg_name)
-# folder_path_named = [folder_path, folder_name]
-# pkg_folder = os.path.dirname(getattr(package_obj, "__file__"))
-# # dbuq(os.path.exists(os.path.join(pkg_folder, *folder_path_named)))
-# if os.path.exists(os.path.join(pkg_folder, *folder_path_named)) is True:
-# import_path = [pkg_name]
-# import_path.extend(folder_path_named)
-# return import_path
-
-
-# def get_internal_name_for(module_name: str | Type | None = None, pkg_name: str = "transformers", path_format: bool | None = False) -> list[str] | str | None:
-# """Reveal code names for class names from Diffusers or Transformers (formerly get code names)\n
-# :param class_name: To return only one class, defaults to None
-# :param pkg_name: optional field for library, defaults to "transformers"
-# :param path_format: Retrieve just the code name, or the full module path and code name within the package
-# :return: A list of all code names, or the one corresponding to the provided class"""
-# from mir.generate.diffusers import IMPORT_STRUCTURE
-# from mir.generate.transformers import MODEL_MAPPING_NAMES
-
-# package_imports = IMPORT_STRUCTURE if pkg_name == "diffusers" else MODEL_MAPPING_NAMES
-# pkg_name = pkg_name.lower()
-# MAPPING_NAMES: dict[str, str] = import_object_named(*package_imports[pkg_name])
-# if module_name:
-# if isinstance(module_name, Type):
-# module_name = module_name.__name__
-# code_name = next(iter(key for key, value in MAPPING_NAMES.items() if module_name in str(value)), "")
-# return show_path_for(code_name, pkg_name) if path_format else code_name.replace("_", "-")
-# return list(MAPPING_NAMES)
diff --git a/mir/_deprecated/_guiders.py b/mir/_deprecated/_guiders.py
deleted file mode 100644
index b791829..0000000
--- a/mir/_deprecated/_guiders.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-
-
-# def gen_guiders(mir_db: MIRDatabase): # upstream not quite ready for this yet
-# from nnll.metadata.helpers import snake_caseify
-# from diffusers.guider import GuiderType
-
-# guider_type = GuiderType
-# for comp_name in guider_type.items():
-# class_obj = comp_name.__name__
-# mir_data = {"pkg": {0: {"diffusers": class_obj}}}
-# try:
-# mir_db.add(
-# mir_entry(
-# domain="ops",
-# arch="noise_prediction",
-# series="guider",
-# comp=snake_caseify(class_obj),
-# **mir_data,
-# ),
-# )
-# except IndexError as error_log:
-# nfo(f"Failed to create compatibility: {class_obj}")
-# dbug(error_log)
-
-
-# (
-# "info.unet",
-# "stable-cascade",
-# {
-# "combined": {
-# "pkg": {
-# 0: { # decoder=decoder_unet
-# "precision": "ops.precision.bfloat.B16",
-# "generation": {
-# "negative_prompt": "",
-# "num_inference_steps": 20,
-# "guidance_scale": 4.0,
-# "num_images_per_prompt": 1,
-# "width": 1024,
-# "height": 1024,
-# },
-# },
-# "pkg_alt": {
-# 0: {
-# "diffusers": {
-# "StableCascadeCombinedPipeline": {
-# "negative_prompt": "",
-# "num_inference_steps": 10,
-# "prior_num_inference_steps": 20,
-# "prior_guidance_scale": 3.0,
-# }
-# },
-# }
-# },
-# }
-# }
-# },
-# ),
-
-
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-
-
-# def gen_attention_processors(mir_db: MIRDatabase): # upstream not quite ready for this yet
-# from diffusers.models.attention_processor import AttentionProcessor
-
-# mir_data
-# for series, comp_name in mir_data.items():
-# id_segment = series.split(".")
-# for compatibility in comp_name:
-# dbug(id_segment)
-# try:
-# mir_db.add(
-# mir_entry(
-# domain=id_segment[0],
-# arch=id_segment[1],
-# series=id_segment[2],
-# comp=compatibility,
-# **mir_data[series][compatibility],
-# ),
-# )
-# except IndexError as error_log:
-# nfo(f"Failed to create series: {series} compatibility: {comp_name} ")
-# dbug(error_log)
-
diff --git a/mir/_deprecated/_index.py b/mir/_deprecated/_index.py
deleted file mode 100644
index 813bcdd..0000000
--- a/mir/_deprecated/_index.py
+++ /dev/null
@@ -1,270 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-
-# import os
-# from importlib import import_module
-# from typing import Any, Generator
-
-# from mir import DBUQ, NFO
-# from mir.data import EXCLUSIONS
-# from mir.generate.diffusers import GET_TASK_CLASS, IMPORT_STRUCTURE, SUPPORTED_TASKS_MAPPINGS
-# from mir.generate.from_module import import_object_named, show_init_fields_for, to_domain_tag
-# from mir.generate.indexers import migrations
-
-
-# def retrieve_diffusers_docstrings(
-# package_name: str,
-# file_names: list[str],
-# ) -> Generator[DocStringEntry]:
-# """Yield (pkg, file, EXAMPLE_DOC_STRING) from a folder or a single file.\n
-# :param pkg_name: Package under ``diffusers.pipelines``.\n
-# :param file_names: A list of related file names.\n
-# :param use_folder: True → treat ``source`` as a folder with ``_import_structure``.\n
-# :return: DocString Entry class.\n
-# """
-
-# module_location: str | None = import_module("diffusers.pipelines").__file__
-# module_path = os.path.dirname(module_location)
-
-# for file_name in file_names:
-# assert isinstance(file_name, str), f"Expected path to be string, got {file_name} type {type(file_name)}"
-# if file_name == "pipeline_stable_diffusion_xl_inpaint":
-# continue
-
-# pkg_path = f"diffusers.pipelines.{package_name}.{file_name}"
-# DBUQ(pkg_path)
-
-# if os.path.exists(os.path.join(module_path, package_name, f"{file_name}.py")):
-# pipe_file = import_object_named(file_name, pkg_path) or import_module(pkg_path) or NFO(f"Failed to import {pkg_path}")
-# if doc_string := getattr(pipe_file, "EXAMPLE_DOC_STRING", None):
-# yield DocStringEntry(package_name=package_name, file_name=file_name, pipe_module=pipe_file, doc_string=doc_string)
-# else:
-# NFO(f"Doc string attribute missing for {package_name}/{file_name}")
-# else:
-# NFO(f"Path not found for {package_name}/{file_name}")
-
-# return
-
-
-# def create_pipe_entry(repo_path: str, class_name: str, model_class_obj: Callable | None = None) -> tuple[str, dict[str, dict[Any, Any]]]:
-# """Create a pipeline article and generate corresponding information according to the provided repo path and pipeline category\n
-# :param repo_path (str): Repository path.
-# :param model_class_obj (str): The model class function
-# :raises TypeError: If 'repo_path' or 'class_name' are not set.
-# :return: Tuple: The data structure containing mir_series and mir_comp is used for subsequent processing.
-# """
-# import diffusers # pyright: ignore[reportMissingImports] # pylint:disable=redefined-outer-name
-
-# control_net = ["Control", "Controlnet"] #
-# mir_prefix = "info"
-# if hasattr(diffusers, class_name):
-# model_class_obj = getattr(diffusers, class_name)
-# sub_segments = show_init_fields_for(model_class_obj, "diffusers")
-# decoder = "decoder" in sub_segments
-# if repo_path in ["kandinsky-community/kandinsky-3"]:
-# mir_prefix = "info.unet"
-# if repo_path in ["openai/shap-e"]:
-# mir_prefix = "info.unet"
-# class_name = "ShapEPipeline"
-# elif class_name == "MotionAdapter":
-# mir_prefix = "info.lora"
-# elif class_name == "WanPipeline":
-# mir_prefix = "info.dit"
-# elif class_name == "CogVideoXVideoToVideoPipeline":
-# class_name = "CogVideoXPipeline"
-# elif any(maybe for maybe in control_net if maybe.lower() in class_name.lower()):
-# mir_prefix = "info.controlnet"
-# else:
-# mir_prefix = to_domain_tag(**sub_segments)
-# if mir_prefix is None and class_name not in ["AutoPipelineForImage2Image", "DiffusionPipeline"]:
-# NFO(f"Failed to detect type for {class_name} {list(sub_segments)}\n")
-# else:
-# mir_prefix = "info." + mir_prefix
-# if class_name == "StableDiffusion3InpaintPipeline" or repo_path in ["stabilityai/stable-diffusion-3-medium-diffusers"]:
-# class_name = "StableDiffusion3Pipeline"
-# repo_path = "stabilityai/stable-diffusion-3.5-medium"
-# if class_name == "HunyuanVideoFramepackPipeline" or repo_path in ["hunyuanvideo-community/HunyuanVideo"]:
-# class_name = "HunyuanVideoPipeline"
-# mir_series, mir_comp = list(tag_model_from_repo(repo_path, decoder))
-# mir_series = mir_prefix + "." + mir_series
-# repo_path = migrations(repo_path)
-# # modalities = add_mode_types(mir_tag=[mir_series, mir_comp])
-# prefixed_data = {
-# "repo": repo_path,
-# "pkg": {0: {"diffusers": class_name}},
-# # "mode": modalities.get("mode"),
-# }
-# return mir_series, {mir_comp: prefixed_data}
-
-
-# def tag_pipe(repo_path: str, class_name: str, addendum: dict) -> tuple:
-# """Convert model repo pipes to MIR tags, classifying by feature\n
-# :param name: Repo path
-# :param class_name: The HF Diffusers class for the model
-# :return: A segmented MIR tag useful for appending index entries"""
-# mir_series, mir_data = create_pipe_entry(repo_path=repo_path, class_name=class_name)
-# mir_prefix, mir_series = mir_series.rsplit(".", 1)
-# mir_comp = list(mir_data)[0]
-# return mir_prefix, mir_series, {mir_comp: addendum}
-
-
-# def find_diffusers_docstrings() -> Generator[list[DocStringEntry]]:
-# """Pull down docstrings from 🤗Diffusers pipelines, minimizing internet requests\n
-# :return: Docstrings for common diffusers models"""
-# import diffusers.pipelines as diffusers_pipelines
-
-# docstring_patterns = EXCLUSIONS
-# exclusion_list = docstring_patterns["exclusion_list"]
-# uncommon_naming = docstring_patterns["uncommon_naming"]
-# for pipe_name in IMPORT_STRUCTURE.keys():
-# if pipe_name not in exclusion_list:
-# file_specific = uncommon_naming.get(pipe_name, pipe_name)
-# if import_name := getattr(diffusers_pipelines, str(pipe_name)):
-# file_names = list(getattr(import_name, "_import_structure", {}).keys()) or [f"pipeline_{file_specific}"]
-# yield list(retrieve_diffusers_docstrings(pipe_name, file_names))
-# else:
-# continue
-
-
-# def diffusers_index() -> dict[str, dict[str, dict[str, Any]]]:
-# """Generate diffusion model data for MIR index\n
-# :return: Dictionary ready to be applied to MIR data fields
-# """
-# special_repos = {
-# "black-forest-labs/FLUX.1-schnell": "black-forest-labs/FLUX.1-dev",
-# # "stabilityai/stable-diffusion-3-medium-diffusers": "stabilityai/stable-diffusion-3.5-medium",
-# }
-# special_classes = {
-# # "StableDiffusion3Pipeline": "stabilityai/stable-diffusion-3.5-medium", # NOT sd3
-# "HunyuanDiTPipeline": "tencent-hunyuan/hunyuandiT-v1.2-diffusers", # NOT hyd .ckpt
-# "ChromaPipeline": "lodestones/Chroma",
-# }
-# for class_name, swap_repo in special_classes.items():
-# if parsed_data.pipe_class == class_name:
-# parsed_data.pipe_repo = swap_repo
-# extracted_docstrings = find_diffusers_docstrings()
-# model_info = [extract for pipeline in extracted_docstrings for extract in pipeline]
-# pipe_data = {} # pipeline_stable_diffusion_xl_inpaint
-
-# for extracted in model_info:
-# parsed_data: DocParseData = parse_docs(extracted.doc_string)
-# if parsed_data is None:
-# print(f"Doc string not found in '{extracted.package_name}' in {extracted.file_name}")
-# continue
-# for class_name, swap_repo in special_classes.items():
-# if parsed_data.pipe_class == class_name:
-# parsed_data.pipe_repo = swap_repo
-# break
-# model_class_obj = import_object_named(parsed_data.pipe_class, extracted.pipe_module.__name__)
-# if not model_class_obj:
-# continue
-# try:
-# series, comp_data = create_pipe_entry(parsed_data.pipe_repo, parsed_data.pipe_class)
-# except TypeError:
-# pass # Attempt 1
-# if pipe_data.get(series):
-# if "img2img" in parsed_data.pipe_class.lower():
-# continue
-# pipe_data.setdefault(series, {}).update(comp_data)
-# special_conditions = special_repos | special_classes
-# if parsed_data.staged_class or parsed_data.pipe_repo in list(special_conditions):
-# test = special_conditions.get(parsed_data.pipe_repo)
-# if test:
-# staged_repo = test
-# parsed_data.staged_class = parsed_data.pipe_class
-# try:
-# series, comp_data = create_pipe_entry(
-# staged_repo if parsed_data.staged_repo else parsed_data.pipe_repo,
-# parsed_data.staged_class #
-# if parsed_data.staged_class
-# else parsed_data.pipe_class,
-# )
-# except TypeError as error_log:
-# NFO(series, comp_data)
-# NFO(error_log)
-# continue # Attempt 2,
-# pipe_data.setdefault(series, {}).update(comp_data)
-# return dict(pipe_data)
-
-
-# def pull_weight_map(repo_id: str, arch: str) -> Dict[str, str]:
-# from nnll.download.hub_cache import download_hub_file
-
-# model_file = download_hub_file(
-# repo_id=f"{repo_id}/tree/main/{arch}",
-# source="huggingface",
-# file_name="diffusion_pytorch_model.safetensors.index.json",
-# local_dir=".tmp",
-# )
-
-
-# @MODE_DATA.decorator
-# def add_mode_types(mir_tag: list[str], data: dict | None = None) -> dict[str, list[str] | str]:
-# """_summary_\n
-# :param mir_tag: _description_
-# :param data: _description_, defaults to None
-# :return: _description_"""
-# fused_tag = ".".join(mir_tag)
-
-# mir_details = {
-# "mode": data.get(fused_tag, {}).get("pipeline_tag"),
-# "pkg_type": data.get(fused_tag, {}).get("library_type"),
-# "tags": data.get(fused_tag, {}).get("tags"),
-# }
-# return mir_details
-
-
-# def generate_pipe_tag(repo_path: str, class_name: str, model_class_obj: Callable | None = None) -> tuple[str, dict[str, dict[Any, Any]]]:
-# """Create a pipeline article and generate corresponding information according to the provided repo path and pipeline category\n
-# :param repo_path (str): Repository path.
-# :param model_class_obj (str): The model class function
-# :raises TypeError: If 'repo_path' or 'class_name' are not set.
-# :return: Tuple: The data structure containing mir_series and mir_comp is used for subsequent processing.
-# """
-# import diffusers # pyright: ignore[reportMissingImports] # pylint:disable=redefined-outer-name
-
-# if hasattr(diffusers, class_name):
-# model_class_obj = getattr(diffusers, class_name)
-# sub_segments = show_init_fields_for(model_class_obj, "diffusers")
-
-# else:
-# mir_prefix = to_domain_tag(**sub_segments)
-# if mir_prefix is None and class_name not in ["AutoPipelineForImage2Image", "DiffusionPipeline"]:
-# NFO(f"Failed to detect type for {class_name} {list(sub_segments)}\n")
-# else:
-# mir_prefix = "info." + mir_prefix
-
-# mir_series, mir_comp = list(tag_model_from_repo(repo_path, decoder))
-# mir_series = mir_prefix + "." + mir_series
-# repo_path = migrations(repo_path)
-# # modalities = add_mode_types(mir_tag=[mir_series, mir_comp])
-# prefixed_data = {
-# "repo": repo_path,
-# "pkg": {0: {"diffusers": class_name}},
-# # "mode": modalities.get("mode"),
-# }
-# return mir_series, {mir_comp: prefixed_data}
-
-
-# def write_to_mir(new_data: dict, mir_db: MIRDatabase) -> None:
-# """Generate MIR HF Hub model database
-# :param new_data: Data for the MIR database
-# :param mir_database: MIRDatabase instance
-# """
-# for series, comp_name in new_data.items():
-# id_segment = series.split(".")
-# for compatibility in comp_name:
-# # dbug(id_segment)
-# try:
-# mir_db.add(
-# mir_entry(
-# domain=id_segment[0],
-# arch=id_segment[1],
-# series=id_segment[2],
-# comp=compatibility,
-# **new_data[series][compatibility],
-# ),
-# )
-# except IndexError: # as error_log:
-# NFO(f"Failed to create series: {series} compatibility: {comp_name} ")
-# # dbug(error_log)
diff --git a/mir/_deprecated/_schedulers.py b/mir/_deprecated/_schedulers.py
deleted file mode 100644
index e415427..0000000
--- a/mir/_deprecated/_schedulers.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-
-import re
-from importlib import import_module
-
-from mir.generate.diffusers import IMPORT_STRUCTURE
-from mir.maid import MIRDatabase
-from mir.spec import mir_entry
-
-
-def tag_scheduler(series_name: str) -> tuple[str, str]:
- """Create a mir label from a scheduler operation\n
- :param class_name: Known period-separated prefix and model type
- :return: The assembled mir tag with compatibility pre-separated"""
-
- comp_name = None
- patterns = [r"Schedulers", r"Multistep", r"Solver", r"Discrete", r"Scheduler"]
- for scheduler in patterns:
- compiled = re.compile(scheduler)
- match = re.search(compiled, series_name)
- if match:
- comp_name = match.group()
- comp_name = comp_name.lower()
- break
- for pattern in patterns:
- series_name = re.sub(pattern, "", series_name)
- series_name.lower()
- assert series_name is not None, "Expected series tag but got None"
- assert comp_name is not None, "Expected compatibility tag but got None"
- return series_name, comp_name
-
-
-def add_schedulers(mir_db: MIRDatabase):
- """Create mir info database"""
-
- for class_name in IMPORT_STRUCTURE["schedulers"]:
- if class_name != "SchedulerMixin":
- series_name, comp_name = tag_scheduler(class_name)
- class_obj = import_module("diffusers.schedulers")
- class_path = getattr(class_obj, class_name).__module__
- mir_db.add(
- mir_entry(
- domain="ops",
- arch="scheduler",
- series=series_name,
- comp=comp_name.lower(),
- pkg={
- 0: {
- "diffusers": class_name,
- "module_path": class_path,
- },
- },
- )
- )
-
- class_name = "KarrasDiffusionSchedulers"
- series_name, comp_name = tag_scheduler(class_name)
- class_obj = import_module("diffusers.schedulers.scheduling_utils")
- class_path = getattr(class_obj, class_name).__module__
- mir_db.add(
- mir_entry(
- domain="ops",
- arch="scheduler",
- series=series_name,
- comp=comp_name,
- pkg={
- 0: {
- "diffusers": class_name,
- "module_path": class_path,
- },
- },
- ),
- )
diff --git a/mir/generate/_tasks.py b/mir/generate/_tasks.py
index 5c746ef..32961ae 100644
--- a/mir/generate/_tasks.py
+++ b/mir/generate/_tasks.py
@@ -4,7 +4,6 @@
from typing import Any, Callable, List
from mir.generate.diffusers.raw_data import DPrepareData
-from mir.generate.diffusers.schedulers import tag_scheduler
from mir import DBUQ
from mir.tag import MIRTag
@@ -88,6 +87,7 @@ async def tag_class(self, pipe_class: Callable, pipe_role: str, series: str) ->
:param pipe_role: Role of the class in the pipeline
:param series: Series identifier for the component
:return: Tuple containing MIR tag and class name"""
+ from mir.generate.diffusers.schedulers import tag_scheduler
mir_tag = None
class_name = pipe_class.__name__
diff --git a/mir/generate/diffusers/harvest.py b/mir/generate/diffusers/harvest.py
index db40d91..e0a697c 100644
--- a/mir/generate/diffusers/harvest.py
+++ b/mir/generate/diffusers/harvest.py
@@ -3,92 +3,46 @@
from importlib import import_module
from inspect import getmro
-from typing import Any, Callable, get_type_hints
+from typing import get_type_hints
from mir.generate.diffusers.raw_data import DPrepareData
-from mir.package import MIRNesting, MIRPackage
-from mir.tag import MIRTag
-class HarvestClasses:
+class HarvestLoop:
def __init__(self) -> None:
"""Initializes the HarvestClasses instance with an empty list to store raw class data."""
+ from mir.generate.transformers.harvest import HarvestLoop
+
from mir.maid import MIRDatabase
self.db = MIRDatabase()
- self.raw_data = []
- self.find_diffusers_docstrings()
-
- def find_diffusers_docstrings(self) -> None:
- """Pull down docstrings from 🤗Diffusers pipelines, minimizing internet requests\n
- :return: Docstrings for common diffusers models"""
-
- # from mir.generate.tasks import TaskAnalyzer
+ self.harvest_tf = HarvestLoop()
- subclasses = self.extract_subclass_data("diffusers", "DiffusionPipeline")
- for module_path, model in subclasses.items():
- if not (base_data := self.extract_base_data(module_path)):
- continue
- if not (model_data := self.extract_model_class_data(model)):
- continue
- if not (prepared_data := DPrepareData(**base_data, **model_data)):
- continue
- mir_tag = MIRTag(prepared_data)
- # task_analysis = TaskAnalyzer(prepared_data=prepared_data, mir_tag=mir_tag)
- mir_nest = MIRNesting(mir_tag, prepared_data)
- packages = {"model": MIRPackage(data=prepared_data.model)}
- for component_name, component_model in prepared_data.model_params.items():
- if hasattr(prepared_data, component_name):
- packages.setdefault(component_name, MIRPackage(data=component_model))
- packages.setdefault("framework", MIRPackage(data=mir_nest.framework_data))
- # print(packages)
- mir_nest(packages)
-
- self.db.add_data(mir_nest, *mir_nest.loops)
-
- def extract_base_data(self, module_path: str) -> dict[str, str] | None:
+ def __call__(self) -> None:
from mir.data import EXCLUSIONS
- if module_path.rsplit(".", 1)[-1] in EXCLUSIONS["exclusion_list"]:
- return None
- base_path = module_path.rsplit(".", 1)[0]
- model_path = import_module(base_path)
- if doc_string := getattr(model_path, "EXAMPLE_DOC_STRING", None):
- return {
- "doc_string": doc_string,
- "model_path": base_path,
- }
- return None
-
- def extract_model_class_data(self, model: Callable) -> dict[str, str | Callable | dict[str, Any]] | None:
- model_name: str = model.__name__
- library: str = model.__module__.split(".", 1)[0]
- model_params: dict[str, Any] = get_type_hints(model.__init__)
- for module in model_params.values():
- module_name = module.__module__
- library_path = f"{library}.models."
- if library_path in module_name:
- module_name = module_name.replace(library_path, "").split(".")[0]
- return {
- "model": model,
- "model_name": model_name,
- "model_params": model_params,
- "library": library,
- }
- return None
+ prepared_data = {}
+ library = "diffusers"
+ subclasses = self.extract_subclass_data(library, "DiffusionPipeline") # diffusers.pipelines.
+ for module_path, pipeline in subclasses.items():
+ if module_path.rsplit(".", 1)[-1] not in EXCLUSIONS["exclusion_list"]:
+ loop_parameters = get_type_hints(pipeline.__init__)
+ loop_parameters.setdefault("pipeline", pipeline)
+ for name, self.model in loop_parameters.items():
+ if prepare_data := self.prepare_class_data():
+ prepared_data.setdefault(name, prepare_data)
+ for data in prepared_data:
+ pass
+
+ def prepare_class_data(self):
+ prepared_data = DPrepareData(model=self.model)
+ return prepared_data
def extract_subclass_data(self, package_name: str, base_class_name: str):
- """
- Return a dict mapping `.` → class object
+ """Return a dict mapping `.` → class object
for every class in `package_name` that subclasses a class named
- `base_class_name`.
+ `base_class_name`."""
- The implementation is intentionally defensive: it avoids
- triggering `__getattr__` on lazy‑loaded submodules that might
- raise a `RuntimeError`. Instead of `inspect.getmembers`, it
- iterates over the module's `__dict__` which contains only
- attributes that have already been imported.
- """
from pkgutil import walk_packages
results = {}
diff --git a/mir/generate/diffusers/package.py b/mir/generate/diffusers/package.py
new file mode 100644
index 0000000..9c08f39
--- /dev/null
+++ b/mir/generate/diffusers/package.py
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
+
+
+from types import ModuleType
+from typing import Callable
+from dataclasses import dataclass, field
+
+
+@dataclass
+class MIRPackage:
+ model_type: str
+ model: Callable | str | dict[str, str]
+ model_path: ModuleType
+ package: dict[str, str] = field(init=False, default_factory=dict[str, str])
+
+ def __post_init__(self):
+ self.package = {}
+ self.model_name: str = self.model.__name__
+ self.model_path: ModuleType = self.model.__module__
+ if not isinstance(self.data, dict):
+ self.generate_package()
+ self.generate_repo()
+
+ def generate_repo(self):
+ from mir.data import MIGRATIONS
+
+ if self.model_type in ["unet", "transformer"] and (doc_string := getattr(self.model_path, "EXAMPLE_DOC_STRING", None)):
+ if repo := MIGRATIONS["migrated_pipes"].get(self.model_name, False):
+ self.repo = repo
+ elif self.model_type not in ["scheduler", "vae", "tokenizer"]:
+ self.process_doc_string(doc_string=doc_string)
+
+ def generate_package(self) -> None:
+ """Generates package information for the MIR tag based on class.
+ :param pkg: A class object (model, tokenizer, etc) to build a tag from"""
+ model = f"{self.model_path}.{self.model_name}"
+ self.package: dict[str, str] = {"model": model}
+
+ def config_to_repo(self, config_class: Callable) -> str | None:
+ """Extracts the repository path from the configuration class documentation.\n
+ :param config_class: Configuration class to extract repository path from.
+ :return: Repository path as a string if found, otherwise None."""
+ import re
+
+ from mir import NFO
+
+ doc_check = [config_class]
+ if hasattr(config_class, "forward"):
+ doc_check.append(config_class.forward) # type: ignore
+ for pattern in doc_check:
+ doc_string = pattern.__doc__
+ matches = re.findall(r"\[([^\]]+)\]", doc_string) # type: ignore
+ if matches:
+ try:
+ return next(iter(snip.strip('"').strip() for snip in matches if "/" in snip))
+ except StopIteration as error_log:
+ NFO(f"ERROR >>{matches} : LOG >> {error_log}")
+ continue
+
+ def process_doc_string(self, doc_string: str) -> None:
+ from mir.generate.diffusers.doc_parse import DocStringParser
+
+ doc_parser = DocStringParser(doc_string=doc_string, model=self.model, model_path=self.model_path)
+ doc_parser.parse()
+ if repo_path := doc_parser.pipe_repo:
+ self.repo_path = repo_path
+ if staged_repo := doc_parser.staged_repo:
+ self.staged_repo = staged_repo
diff --git a/mir/generate/diffusers/raw_data.py b/mir/generate/diffusers/raw_data.py
index e86dbfb..26170e8 100644
--- a/mir/generate/diffusers/raw_data.py
+++ b/mir/generate/diffusers/raw_data.py
@@ -3,55 +3,22 @@
from dataclasses import dataclass, field
-from typing import Callable, get_type_hints
+from typing import Callable
@dataclass
class DPrepareData:
- doc_string: str
+ """Represents a structured entry of the name of the class and its associated attributes."""
+
model: Callable
- model_path: str
- library: str
- model_name: str
model_params: dict[str, list[str]] = field(init=True, default_factory=lambda: {"": [""]})
- repo_path: str = field(init=False, default_factory=str)
- staged_repo: str | None = field(init=False, default_factory=str)
- tasks: list[str] = field(init=False, default_factory=lambda: [""])
- name: str = field(init=False, default_factory=str)
-
- def __post_init__(self) -> None:
- from mir.data import MIGRATIONS
- from mir.generate.diffusers.doc_parse import DocStringParser
-
- doc_parser = DocStringParser(doc_string=self.doc_string, model=self.model, model_path=self.model_path)
- doc_parser.parse()
- if repo_path := MIGRATIONS["migrated_pipes"].get(self.model.__name__, False):
- self.repo_path = repo_path
- else:
- if repo_path := doc_parser.pipe_repo:
- self.repo_path = repo_path
- if staged_repo := doc_parser.staged_repo:
- self.staged_repo = staged_repo
- self.show_diffusers_tasks()
- for name, model in self.model_params.items():
- setattr(self, name, model)
- print(name, model)
-
- def show_diffusers_tasks(self) -> None:
- """Return Diffusers task pipes based on package-specific query\n
- :param class_name: To find task pipes from a Diffusers class pipe, defaults to None
- :param code_name: To find task pipes from a Transformers class pipe, defaults to None
- :return: A list of alternate class pipelines derived from the specified class"""
- from mir.generate.diffusers import SUPPORTED_TASKS_MAPPINGS, GET_TASK_CLASS
- alt_tasks = set({})
- self.internal_name = self.model_path.rsplit(".", 2)[-1]
- for task_map in SUPPORTED_TASKS_MAPPINGS:
- task_class = GET_TASK_CLASS(task_map, self.model, False)
- if task_class:
- alt_tasks.add(task_class.__name__)
- for model_code, pipe_class_obj in task_map.items():
- if self.internal_name in model_code:
- alt_tasks.add(pipe_class_obj.__name__)
+ model_name: str = field(init=False)
+ library: str = field(init=False)
+ import_path: str = field(init=False)
- self.tasks = [x for x in alt_tasks]
+ def __post_init__(self):
+ """Initializes the DPrepareData instance by setting derived attributes."""
+ self.model_name: str = self.model.__name__
+ self.import_path: str = self.model.__module__.rsplit(".", 1)[0]
+ self.library: str = self.import_path.split(".")[0]
diff --git a/mir/generate/diffusers/tasks.py b/mir/generate/diffusers/tasks.py
new file mode 100644
index 0000000..068b126
--- /dev/null
+++ b/mir/generate/diffusers/tasks.py
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
+
+from dataclasses import dataclass, field
+from typing import Callable
+
+
+@dataclass
+class CollectTasks:
+ model: Callable
+ import_path: str
+ tasks: list[str] = field(init=False)
+
+ def __post_init__(self) -> None:
+ self.model_to_tasks()
+
+ def model_to_tasks(self) -> None:
+ """Return Diffusers task pipes based on package-specific query\n
+ :param class_name: To find task pipes from a Diffusers class pipe, defaults to None
+ :param code_name: To find task pipes from a Transformers class pipe, defaults to None
+ :return: A list of alternate class pipelines derived from the specified class"""
+ from mir.generate.diffusers import SUPPORTED_TASKS_MAPPINGS, GET_TASK_CLASS
+
+ alt_tasks = set({})
+ self.internal_name = self.import_path.rsplit(".", 2)[-1]
+ for task_map in SUPPORTED_TASKS_MAPPINGS:
+ task_class = GET_TASK_CLASS(task_map, self.model, False)
+ if task_class:
+ alt_tasks.add(task_class.__name__)
+ for model_code, pipe_class_obj in task_map.items():
+ if self.internal_name in model_code:
+ alt_tasks.add(pipe_class_obj.__name__)
+
+ self.tasks = [x for x in alt_tasks]
diff --git a/mir/generate/transformers/harvest.py b/mir/generate/transformers/harvest.py
index 90de8f6..5b8525d 100644
--- a/mir/generate/transformers/harvest.py
+++ b/mir/generate/transformers/harvest.py
@@ -1,108 +1,44 @@
# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
#
-from typing import Any, Callable
+from typing import Callable
-from mir.package import MIRNesting, MIRPackage
from mir.generate.transformers.raw_data import PrepareData
-from mir.tag import MIRTag
-class HarvestClasses:
+class HarvestLoop:
def __init__(self) -> None:
"""Initializes the HarvestClasses instance with an empty list to store raw class data."""
from mir.maid import MIRDatabase
self.db = MIRDatabase()
- self.find_transformers_classes()
- def find_transformers_classes(self) -> None:
- """Finds and collects PrepareData entries for all transformer classes defined in AUTO_MAP.\n
- :return: List of PrepareData entries representing the transformer classes."""
+ def __call__(self) -> None:
from mir.generate.transformers import AUTO_MAP
-
- for config_class, model_class in AUTO_MAP.items(): # type: ignore
- if isinstance(model_class, tuple):
- model_class: Callable = model_class[0]
- if not (config_data := self.extract_config_class_data(config_class)):
- continue
- if not (model_data := self.extract_model_class_data(model_class)):
- continue
- if not (prepared_data := PrepareData(**config_data, **model_data)): # type:ignore , _Lazyautomapping tuple
- continue
-
- mir_tag = MIRTag(prepared_data)
- mir_nest = MIRNesting(mir_tag, prepared_data)
-
- packages = {"model": MIRPackage(data=prepared_data.model)}
- if hasattr(prepared_data, "tokenizer") and prepared_data.tokenizer:
- packages.setdefault("tokenizer", MIRPackage(data=prepared_data.tokenizer)) # type: ignore , _Lazyautomapping tuple
- packages.setdefault("framework", MIRPackage(data=mir_nest.framework_data))
- mir_nest(packages)
-
- self.db.add_data(mir_nest, *mir_nest.loops)
-
- def extract_config_class_data(self, config_class: Callable) -> dict[str, str | Callable | dict[str, Any]] | None:
- """Extracts information from config classes.\n
- :param config_class: Model class or callable returning model classes.
- :return: dictionary of discovered elements"""
- from mir.data import MIGRATIONS, PARAMETERS
+ from mir.generate.transformers import TOKENIZER_MAPPING
+
+ prepared_data = {}
+ for config_class, model_data in AUTO_MAP.items():
+ assert isinstance(config_class, Callable)
+ loop_parameters = {"model": (model_data, config_class)}
+ if tokenizer := TOKENIZER_MAPPING.get(config_class, None):
+ loop_parameters.setdefault("tokenizer", (tokenizer, tokenizer)) # type: ignore
+ for name, (self.model, self.config) in loop_parameters.items():
+ if prepare_data := self.prepare_class_data(): # type: ignore
+ prepared_data.setdefault(name, prepare_data)
+ for data in prepared_data:
+ pass
+
+ def prepare_class_data(self) -> PrepareData | None:
+ """Extract and collect information from model and config classes.\n
+ :return: A PrepareData entry representing the transformer class."""
+ from mir.data import PARAMETERS
from mir.generate.from_module import show_init_fields_for
- config_name = config_class.__name__
- config_params = PARAMETERS.get(config_name, {})
- if not config_params:
- config_params = show_init_fields_for(config_class)
- repo_path = MIGRATIONS["config"].get(config_name, {})
- if not repo_path:
- repo_path = self.config_to_repo(config_class)
- if not repo_path or not config_params:
- return None
- elif "inspect" in config_params or "deprecated" in config_params:
- return None
- return {
- "name": config_name,
- "config": config_class,
- "config_params": config_params,
- "repo_path": repo_path,
- }
-
- def extract_model_class_data(self, model_class: Callable) -> dict[str, str | Any] | None:
- """Extracts information from model classes.\n
- :param model_class: Model class or callable returning model classes.
- :return: dictionary of discovered elements"""
- from mir.generate.from_module import show_init_fields_for # Ensure it's a tuple for consistency.
-
- model_data: dict[str, str | Any] = {"model": model_class}
- model_params = show_init_fields_for(model_class)
- if "inspect" in model_params or "deprecated" in model_params:
+ config_name = self.config.__name__
+ config_params = PARAMETERS.get(config_name, show_init_fields_for(self.config))
+ if any(x in config_params for x in ["inspect", "deprecated"]):
return None
- else:
- return model_data | {
- "model_params": model_params,
- }
-
- def config_to_repo(self, config_class: Callable) -> str | None:
- """Extracts the repository path from the configuration class documentation.\n
- :param config_class: Configuration class to extract repository path from.
- :return: Repository path as a string if found, otherwise None."""
- import re
-
- from mir import NFO
-
- doc_check = [config_class]
- if hasattr(config_class, "forward"):
- doc_check.append(config_class.forward) # type: ignore
- for pattern in doc_check:
- doc_string = pattern.__doc__
- matches = re.findall(r"\[([^\]]+)\]", doc_string) # type: ignore
- if matches:
- try:
- return next(iter(snip.strip('"').strip() for snip in matches if "/" in snip))
- except StopIteration as error_log:
- NFO(f"ERROR >>{matches} : LOG >> {error_log}")
- continue
-
-
-if __name__ == "__main__":
- HarvestClasses()
+ if isinstance(self.model, tuple):
+ self.model_class: Callable = self.model[0]
+ return PrepareData(model=self.model, **config_params) # type: ignore
diff --git a/mir/generate/transformers/package.py b/mir/generate/transformers/package.py
new file mode 100644
index 0000000..148aded
--- /dev/null
+++ b/mir/generate/transformers/package.py
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
+
+
+from typing import Callable, ModuleType
+from dataclasses import dataclass, field
+
+
+@dataclass
+class MIRPackage:
+ config: Callable
+ model: Callable
+ package: dict[str, str] = field(init=False, default_factory=dict[str, str])
+
+ def __post_init__(self):
+ self.package = {}
+ self.model_name: str = self.model.__name__
+ self.model_path: ModuleType = self.model.__module__
+ if not isinstance(self.config, dict):
+ self.generate_package()
+ self.generate_repo()
+
+ def generate_repo(self) -> None:
+ from mir.data import MIGRATIONS
+
+ if repo := MIGRATIONS["config"].get(self.config.__name__, {}):
+ self.repo = repo
+ else:
+ self.repo = self.config_to_repo(self.config)
+
+ def generate_package(self) -> None:
+ """Generates package information for the MIR tag based on class.
+ :param pkg: A class object (model, tokenizer, etc) to build a tag from"""
+ model = f"{self.model_type}.{self.model_name}"
+ self.package: dict[str, str] = {"model": model}
+
+ def config_to_repo(self) -> str | None:
+ """Extracts the repository path from the configuration class documentation.\n
+ :param config_class: Configuration class to extract repository path from.
+ :return: Repository path as a string if found, otherwise None."""
+ import re
+
+ from mir import NFO
+
+ doc_check = [self.config]
+ if hasattr(self.config, "forward"):
+ doc_check.append(self.config.forward) # type: ignore
+ for pattern in doc_check:
+ doc_string = pattern.__doc__
+ matches = re.findall(r"\[([^\]]+)\]", doc_string) # type: ignore
+ if matches:
+ try:
+ return next(iter(snip.strip('"').strip() for snip in matches if "/" in snip))
+ except StopIteration as error_log:
+ NFO(f"ERROR >>{matches} : LOG >> {error_log}")
+ continue
diff --git a/mir/generate/transformers/raw_data.py b/mir/generate/transformers/raw_data.py
index 0664c45..02bbd11 100644
--- a/mir/generate/transformers/raw_data.py
+++ b/mir/generate/transformers/raw_data.py
@@ -1,47 +1,24 @@
# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
#
-
+from typing import Callable
from dataclasses import dataclass, field
-from typing import Callable, Any
@dataclass
class PrepareData:
"""Represents a structured entry of the name of the class and its associated attributes."""
- name: str
model: Callable
- config: type
- repo_path: str
config_params: dict[str, list[str]]
- model_params: dict[str, list[str]] | None = field(init=True, default_factory=lambda: {"": [""]})
- tasks: list[str] = field(init=False, default_factory=lambda: [""])
+ config: Callable | None = None
+
+ model_name: str = field(init=False)
+ library: str = field(init=False)
+ import_path: str = field(init=False)
def __post_init__(self) -> None:
"""Initializes the PrepareData instance by setting derived attributes."""
- from mir.generate.transformers import REVERSE_MAP, TOKENIZER_MAPPING
-
- self.model_name: str = self.model.__name__.split(".")[-1]
- if tokenizer := TOKENIZER_MAPPING.get(self.config, None):
- self.tokenizer: tuple[type[Any] | None, type[Any] | None] = tokenizer
- if internal_name := REVERSE_MAP.get(self.config):
- self.internal_name = internal_name
- self.library = self.model.__module__.split(".")[0]
- self.model_to_tasks()
-
- def model_to_tasks(self) -> None:
- """Transform a single model class into derivative classes for specific tasks.\n
- :return: A list of task classes associated with the model."""
- from pathlib import Path
- from importlib import import_module
-
- import_path = Path(self.model.__module__).stem
- parent_module = import_module(import_path)
- self.tasks = []
- if hasattr(parent_module, "__all__") and parent_module.__name__ != "DummyPipe":
- for module in parent_module.__all__:
- if (module.lower() != module) and (module != self.model_name) and (module != self.config.__name__):
- self.tasks.append(module)
- else:
- self.tasks = [self.model.__name__]
+ self.model_name: str = self.model.__name__
+ self.import_path = self.model.__module__.rsplit(".", 1)[0]
+ self.library = self.import_path.split(".")[0]
diff --git a/mir/generate/transformers/tasks.py b/mir/generate/transformers/tasks.py
new file mode 100644
index 0000000..be9cf81
--- /dev/null
+++ b/mir/generate/transformers/tasks.py
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
+#
+
+from dataclasses import dataclass, field
+from typing import Callable
+
+
+@dataclass
+class CollectTasks:
+ model: Callable
+ import_path: str
+ config: Callable
+ tasks: list[str] = field(init=False)
+
+ def __post_init__(self) -> None:
+ self.model_to_tasks()
+
+ def model_to_tasks(self) -> None:
+ """Transform a single model class into derivative classes for specific tasks.\n
+ :return: A list of task classes associated with the model."""
+ from importlib import import_module
+
+ model_name = self.model.__name__
+
+ parent_module = import_module(self.import_path)
+ self.tasks = []
+ if hasattr(parent_module, "__all__") and parent_module.__name__ != "DummyPipe":
+ for module in parent_module.__all__:
+ if (module.lower() != module) and (module != model_name) and (module != self.config.__name__):
+ self.tasks.append(module)
+ else:
+ self.tasks = [model_name]
diff --git a/mir/package.py b/mir/package.py
index 97187e9..7269fab 100644
--- a/mir/package.py
+++ b/mir/package.py
@@ -8,31 +8,6 @@
from mir.tag import MIRTag
-@dataclass
-class MIRPackage:
- data: Callable | str | dict[str, str]
- package: dict[str, str] = field(init=False, default_factory=dict[str, str])
-
- def __init__(self, data: Callable | str | dict[str, str] | dict[str, Any]):
- self.package = {}
- self.data = data
- if not isinstance(self.data, dict):
- self.generate_package()
- else:
- self.add_framework(self.data)
-
- def generate_package(self) -> None:
- """Generates package information for the MIR tag based on class.
- :param pkg: A class object (model, tokenizer, etc) to build a tag from"""
- self.domain = "ops"
- model = f"{self.data.__module__}.{self.data.__name__}"
- self.package: dict[str, str] = {"model": model}
-
- def add_framework(self, framework_data) -> None:
- self.domain = "info"
- self.package = framework_data
-
-
class MIRNesting:
"""Build tag components from the extracted data\n
:param mir_tag: An instance of MIR tag with the necessary information
@@ -55,7 +30,7 @@ def __init__(self, mir_tag: MIRTag, prepared_data: PrepareData | DPrepareData) -
self.loops = []
self.framework_data = {}
- def __call__(self, packages: dict[str, MIRPackage]) -> None:
+ def __call__(self, packages: MIRPackage) -> None:
"""Common routine for handling a package: store tag data, nest the package,
and record the name of the newly-created attribute.\n
:param name: Identification string to store data underneath
@@ -64,6 +39,7 @@ def __call__(self, packages: dict[str, MIRPackage]) -> None:
for name, mir_package in packages.items():
is_framework = name == "framework"
is_model = name == "model"
+ is_tokenizer = name == "tokenizer"
if is_framework:
package_data = {self.prepared_data.library: mir_package.package}
diff --git a/mir/tag.py b/mir/tag.py
index 82f7b8b..4d49e83 100644
--- a/mir/tag.py
+++ b/mir/tag.py
@@ -2,9 +2,9 @@
#
from dataclasses import dataclass, field
-
-from mir.generate.transformers.raw_data import PrepareData
-from mir.generate.diffusers.raw_data import DPrepareData
+from typing import Callable
+# from mir.generate.transformers.raw_data import PrepareData
+# from mir.generate.diffusers.raw_data import DPrepareData
@dataclass
@@ -18,20 +18,30 @@ class MIRTag:
comp The compatibility component of the MIR tag (generated, optional).
"""
- raw_data: PrepareData | DPrepareData
+ domain: str = field(init=False)
arch: str = field(init=False)
series: str = field(init=False)
decoder: bool = False
def __post_init__(self) -> None:
"""Initializes MIRTag instance, setting up database connection and generating package and MIR tag information."""
+ self.generate_domain()
self.generate_arch()
self.generate_series_and_comp(repo_path=self.raw_data.repo_path)
+ if hasattr(self, "comp"):
+ self.flat = f"{self.domain}.{self.arch}.{self.series}.{self.comp}"
+ else:
+ self.flat = f"{self.domain}.{self.arch}.{self.series}"
+
+ def generate_domain(self) -> None:
+ if isinstance(self.raw_data.model, Callable):
+ self.domain = "ops"
+ else:
+ self.domain = "info"
def generate_arch(self) -> None:
"""Generates the architecture part of the MIR tag based on prepared data.\n
:raises ValueError: If no suitable tag can be determined."""
-
arch = None
library = self.raw_data.model.__module__.split(".")[0]
if hasattr(self.raw_data, "config_params"):
diff --git a/tests/old/test_class_parent.py b/tests/old/test_class_parent.py
deleted file mode 100644
index cbd729d..0000000
--- a/tests/old/test_class_parent.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# # #
-# # #
-
-import pytest
-from mir.inspect.parenting import class_parent # Replace with the actual module name
-
-
-def test_class_parent_diffusers():
- assert class_parent("stable-diffusion", "Diffusers") == ["diffusers", "pipelines", "stable_diffusion"]
-
-
-def test_class_parent_transformers():
- assert class_parent("albert", "Transformers") == ["transformers", "models", "albert"]
-
-
-def test_class_parent_invalid_parent():
- with pytest.raises(KeyError):
- class_parent("unknown", "Unknown")
-
-
-def test_class_parent_empty_parent():
- with pytest.raises(KeyError):
- assert class_parent("", "") == ["", "", ""]
-
-
-def test_class_parent_bad_code_name():
- assert class_parent("diffdusers", "diffusers") is None
-
-
-def test_class_parent_mixed_case():
- assert class_parent("sana", "DIFFusERS") == ["diffusers", "pipelines", "sana"]
-
-
-if __name__ == "__main__":
- pytest.main(["-vv", __file__])
diff --git a/tests/old/test_deconstructors_root.py b/tests/old/test_deconstructors_root.py
deleted file mode 100644
index 67a0bed..0000000
--- a/tests/old/test_deconstructors_root.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# # #
-# # #
-
-import pytest
-from mir.config.constants import extract_init_parameters
-
-
-def test_root_class_with_builtin_types():
- class DummyInitModule:
- def __init__(self):
- pass
-
- expected_output = {}
-
- result = extract_init_parameters(DummyInitModule)
- assert result == expected_output
-
-
-if __name__ == "__main__":
- import pytest
-
- pytest.main(["-vv", __file__])
diff --git a/tests/old/test_doc_parser.py b/tests/old/test_doc_parser.py
deleted file mode 100644
index 3178d41..0000000
--- a/tests/old/test_doc_parser.py
+++ /dev/null
@@ -1,143 +0,0 @@
-import unittest
-from mir.doc_parser import parse_docs
-
-
-class TestDocParser(unittest.TestCase):
- def test_parse_simple_case(self):
- doc_string = """
- >>> pipe = MyPipeline.from_pretrained("model/repo")
- """
- result = parse_docs(doc_string)
- self.assertEqual(result.pipe_class, "MyPipeline") # pipe_class
- self.assertEqual(result.pipe_repo, "model/repo") # repo_path
- self.assertIsNone(result.staged_class) # staged_class
- self.assertIsNone(result.staged_repo) # staged_repo
-
- def test_parse_with_variable_resolution(self):
- doc_string = """
- model_id = "custom/model"
- >>> pipe = MyPipeline.from_pretrained(model_id)
- """
- result = parse_docs(doc_string)
- self.assertEqual(result.pipe_class, "MyPipeline")
- self.assertEqual(result.pipe_repo, "custom/model")
-
- def test_parse_staged_case(self):
- doc_string = """
- >>> pipe = MyPipeline.from_pretrained("model/repo")
- >>> prior_pipe = PriorPipeline.from_pretrain("prior/repo")
- """
- result = parse_docs(doc_string)
- self.assertEqual(result.pipe_class, "MyPipeline") # pipe_class
- self.assertEqual(result.pipe_repo, "model/repo") # repo_path
- self.assertEqual(result.staged_class, "PriorPipeline") # staged_class
- self.assertEqual(result.staged_repo, "prior/repo") # staged_repo
-
- def test_parse_no_match(self):
- doc_string = """
- >>> something_else = SomeClass.do_something()
- """
- result = parse_docs(doc_string)
- self.assertIsNone(result) # pipe_class
-
- def test_parse_multiline_doc(self):
- doc_string = """
- # model_id_or_path = "another/repo"
- >>> pipe_prior = PriorPipeline.from_pretrain(model_id_or_path)
- >>> pipeline = MyPipeline.from_pretrained("repo/path")
- """
- result = parse_docs(doc_string)
- self.assertEqual(result.pipe_class, "MyPipeline") # pipe_class
- self.assertEqual(result.pipe_repo, "repo/path") # repo_path
- self.assertEqual(result.staged_class, "PriorPipeline") # staged_class
- self.assertEqual(result.staged_repo, "another/repo") # staged_repo
-
- def test_parse_blip(self):
- from diffusers.pipelines.blip_diffusion.pipeline_blip_diffusion import EXAMPLE_DOC_STRING
-
- result = parse_docs(EXAMPLE_DOC_STRING)
- self.assertEqual(result.pipe_class, "BlipDiffusionPipeline") # pipe_class
- self.assertEqual(result.pipe_repo, "Salesforce/blipdiffusion") # repo_path
- self.assertIsNone(result.staged_class) # staged_class
- self.assertIsNone(result.staged_repo) # staged_repo
-
- def test_parse_pia(self):
- from diffusers.pipelines.pia.pipeline_pia import EXAMPLE_DOC_STRING
-
- result = parse_docs(EXAMPLE_DOC_STRING)
- self.assertEqual(result.pipe_class, "PIAPipeline") # pipe_class
- self.assertEqual(result.pipe_repo, "openmmlab/PIA-condition-adapter") # repo_path
- self.assertIsNone(result.staged_class) # staged_class
- self.assertIsNone(result.staged_repo) # staged_repo
-
- def test_parse_animatediff_xl(self):
- from diffusers.pipelines.animatediff.pipeline_animatediff_sdxl import EXAMPLE_DOC_STRING
-
- result = parse_docs(EXAMPLE_DOC_STRING)
- self.assertEqual(result.pipe_class, "AnimateDiffSDXLPipeline") # pipe_class
- self.assertEqual(result.pipe_repo, "a-r-r-o-w/animatediff-motion-adapter-sdxl-beta") # repo_path
- self.assertIsNone(result.staged_class) # staged_class
- self.assertIsNone(result.staged_repo) # staged_repo
-
- def test_parse_animatediff_controlnet(self):
- from diffusers.pipelines.animatediff.pipeline_animatediff_controlnet import EXAMPLE_DOC_STRING
-
- result = parse_docs(EXAMPLE_DOC_STRING)
- # TODO : This ought to return control net data but its missing in the docstring
-
- # self.assertEqual(result.pipe_class, "ControlNetModel") # pipe_class
- # self.assertEqual(result.pipe_repo, "lllyasviel/ControlNet-v1-1") # repo_path
- # self.assertIsNone(result.staged_class) # staged_class
- # self.assertIsNone(result.staged_repo) # staged_repo
-
- def test_parse_consistency(self):
- from diffusers.pipelines.consistency_models.pipeline_consistency_models import EXAMPLE_DOC_STRING
-
- result = parse_docs(EXAMPLE_DOC_STRING)
- self.assertEqual(result.pipe_class, "ConsistencyModelPipeline") # pipe_class
- self.assertEqual(result.pipe_repo, "openai/diffusers-cd_imagenet64_l2") # repo_path
- self.assertIsNone(result.staged_class) # staged_class
- self.assertIsNone(result.staged_repo) # staged_repo
-
- def test_parse_pixart_sigma(self):
- from diffusers.pipelines.pixart_alpha.pipeline_pixart_sigma import EXAMPLE_DOC_STRING
-
- result = parse_docs(EXAMPLE_DOC_STRING)
- self.assertEqual(result.pipe_class, "PixArtSigmaPipeline") # pipe_class
- self.assertEqual(result.pipe_repo, "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS") # repo_path
- self.assertIsNone(result.staged_class) # staged_class
- self.assertIsNone(result.staged_repo) # staged_repo
-
- def test_parse_cascade(self):
- from diffusers.pipelines.stable_cascade.pipeline_stable_cascade import EXAMPLE_DOC_STRING
-
- result = parse_docs(EXAMPLE_DOC_STRING)
- self.assertEqual(result.pipe_class, "StableCascadePriorPipeline") # pipe_class
- self.assertEqual(result.pipe_repo, "stabilityai/stable-cascade-prior") # repo_path
- self.assertEqual(result.staged_class, "StableCascadeDecoderPipeline") # staged_class
- self.assertEqual(result.staged_repo, "stabilityai/stable-cascade") # staged_repo
-
- def test_parse_xl(self):
- from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl import EXAMPLE_DOC_STRING
- from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_inpaint import EXAMPLE_DOC_STRING as EXAMPLE_DOC_STRING_INPAINT
-
- doc_strings = [
- EXAMPLE_DOC_STRING,
- EXAMPLE_DOC_STRING_INPAINT,
- ]
- result = []
- for doc in doc_strings:
- result.append(parse_docs(doc))
-
- self.assertEqual(result[0].pipe_class, "StableDiffusionXLPipeline") # pipe_class
- self.assertEqual(result[0].pipe_repo, "stabilityai/stable-diffusion-xl-base-1.0") # repo_path
- self.assertIsNone(result[0].staged_class) # staged_class
- self.assertIsNone(result[0].staged_repo) # staged_repo
- self.assertEqual(result[1].pipe_class, "StableDiffusionXLInpaintPipeline") # pipe_class
- self.assertEqual(result[1].pipe_repo, "stabilityai/stable-diffusion-xl-base-1.0") # repo_path
- self.assertIsNone(result[1].staged_class) # staged_class
- self.assertIsNone(result[1].staged_repo) # staged_repo
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/tests/old/test_find_docstring_run.py b/tests/old/test_find_docstring_run.py
deleted file mode 100644
index 952c5a5..0000000
--- a/tests/old/test_find_docstring_run.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from mir.inspect.metadata import find_diffusers_docstrings
-from pprint import pprint
-
-find_diffusers_docstrings()
-list(find_diffusers_docstrings())
diff --git a/tests/old/test_gather_diffusers_metadata.py b/tests/old/test_gather_diffusers_metadata.py
deleted file mode 100644
index e628720..0000000
--- a/tests/old/test_gather_diffusers_metadata.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-import pytest
-from unittest.mock import Mock
-
-
-@pytest.fixture
-def mock_import_module(mocker):
- """Fixture to mock import_module and simulate different module scenarios."""
- return mocker.patch("mir.config.conversion.import_submodules")
-
-
-@pytest.fixture
-def mock_pkgutil_iter_modules(mocker):
- """Fixture to mock pkgutil.iter_modules for controlled testing."""
-
- return mocker.patch(
- "pkgutil.iter_modules",
- return_value=[
- (Mock(), "allegro", True),
- (Mock(), "amused", True),
- (Mock(), "animatediff", True),
- (Mock(), "audioldm", True),
- (Mock(), "cogvideo", True),
- (Mock(), "deepfloyd_if", True),
- ],
- )
-
-
-def test_list_diffusers_models():
- from mir.inspect.metadata import find_diffusers_docstrings
-
- find_diffusers_docstrings()
-
-
-def test_find_docstrings_excluded(mock_import_module, mock_pkgutil_iter_modules):
- """Test that excluded modules are not processed."""
- from mir.inspect.metadata import find_diffusers_docstrings
-
- excluded_modules = ["ddpm"]
-
- def side_effect(import_name, *args, **kwargs):
- if any(exc in import_name for exc in excluded_modules):
- raise ImportError(f"Module {import_name} is excluded.")
- return Mock()
-
- mock_import_module.side_effect = side_effect
- results = list(find_diffusers_docstrings()) # type: ignore # noqa
- assert not any("ddpm" in call_arg[0][0] for call_arg in mock_import_module.call_args_list)
diff --git a/tests/old/test_json_io.py b/tests/old/test_json_io.py
deleted file mode 100644
index cc68cb8..0000000
--- a/tests/old/test_json_io.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-
-
-import os
-import unittest
-from tempfile import TemporaryDirectory
-from mir.config.json_io import write_json_file, read_json_file
-
-
-class TestFileOperations(unittest.TestCase):
- def setUp(self):
- """Create a temporary directory to store the test files"""
- self.temp_dir = TemporaryDirectory()
- self.file_name = "test_data.json"
- self.file_path = os.path.join(self.temp_dir.name, self.file_name)
- self.test_data = {
- "key1": "value1",
- "key2": 69, # nice
- "key3": [1, 2, 3],
- }
-
- def test_write_and_read_json_file(self):
- """Write data to a JSON file, Read data back from the JSON file,Assert that the written and read data are the same"""
- write_json_file(self.temp_dir.name, self.file_name, self.test_data)
- read_data = read_json_file(self.file_path)
- self.assertEqual(read_data, self.test_data)
-
- def test_read_nonexistent_file(self):
- """Test reading a non-existent file should raise FileNotFoundError"""
- with self.assertRaises(FileNotFoundError):
- read_json_file("non_existent_file.json")
-
- def tearDown(self):
- """Clean up the temporary directory"""
- self.temp_dir.cleanup()
-
-
-if __name__ == "__main__":
- import pytest
-
- pytest.main(["-vv", __file__])
diff --git a/tests/old/test_mir_db_create_restore.py b/tests/old/test_mir_db_create_restore.py
deleted file mode 100644
index b927cb0..0000000
--- a/tests/old/test_mir_db_create_restore.py
+++ /dev/null
@@ -1,160 +0,0 @@
-# # SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-# #
-
-# import os
-# from pathlib import Path
-# from mir.config.constants import MIR_PATH_NAMED
-
-
-# def test_mir_creation():
-# from mir.spec import mir_entry
-# from pprint import pprint
-
-# os.remove(MIR_PATH_NAMED)
-# Path().touch()
-
-# entry = mir_entry(
-# domain="info",
-# arch="unet",
-# series="stable-diffusion-xl",
-# comp="base",
-# repo="stabilityai/stable-diffusion-xl",
-# pkg={
-# 0: {
-# "diffusers": "class_name",
-# "generation": {"num_inference_steps": 40, "denoising_end": 0.8, "output_type": "latent", "safety_checker": False},
-# }
-# },
-# )
-# entry.update(
-# mir_entry(
-# domain="model",
-# arch="unet",
-# series="stable-diffusion-xl",
-# comp="base",
-# file_path="/Users/nyan/Documents/models",
-# ),
-# )
-# entry.update(
-# mir_entry(
-# domain="ops",
-# arch="scheduler",
-# series="align-your-steps",
-# comp="stable-diffusion-xl",
-# pkg={
-# 0: {
-# "diffusers.schedulers.scheduling_utils": {
-# "AysSchedules": {"num_inference_steps": 10, "timesteps": "StableDiffusionXLTimesteps"},
-# }
-# }
-# },
-# )
-# )
-# entry.update(
-# mir_entry(
-# domain="ops",
-# arch="patch",
-# series="hidiffusion",
-# comp="stable-diffusion-xl",
-# pkg={0: {"hidiffusion": {"apply_hidiffusion": {"generation": {"height": 2048, "width": 2048, "eta": 1.0, "guidance_scale": 7.5}}}}},
-# )
-# )
-# pprint(entry)
-
-
-# def test_mir_maid():
-# import json
-# import os
-# from mir.spec.mir import mir_entry
-
-# entry = mir_entry(
-# domain="info",
-# arch="unet",
-# series="stable-diffusion-xl",
-# comp="base",
-# repo="stabilityai/stable-diffusion-xl",
-# pkg={
-# 0: {
-# "diffusers": "class_name",
-# "generation": {"num_inference_steps": 40, "denoising_end": 0.8, "output_type": "latent", "safety_checker": False},
-# }
-# },
-# )
-# try:
-# os.remove(MIR_PATH_NAMED)
-# except FileNotFoundError:
-# pass
-# with open(MIR_PATH_NAMED, "x", encoding="UTF-8") as f:
-# f.write("{}")
-# folder_path_named = os.path.dirname(MIR_PATH_NAMED)
-# from mir.maid import MIRDatabase
-
-# mir_db = MIRDatabase()
-# mir_db.add(entry)
-# mir_db.write_to_disk()
-# print(mir_db.database)
-# with open(MIR_PATH_NAMED, "r", encoding="UTF-8") as f:
-# result = json.load(f)
-# expected = {
-# "info.unet.stable-diffusion-xl": {
-# "base": {
-# "pkg": {
-# "0": {
-# "diffusers": "class_name",
-# "generation": {
-# "denoising_end": 0.8,
-# "num_inference_steps": 40,
-# "output_type": "latent",
-# "safety_checker": False,
-# },
-# },
-# },
-# "repo": "stabilityai/stable-diffusion-xl",
-# },
-# },
-# }
-
-# assert mir_db.database == expected
-# assert result == expected
-
-
-# def test_restore_mir():
-# import json
-# import os
-
-# from mir.config.json_io import write_json_file
-# from mir.config.constants import MIR_PATH_NAMED
-# from mir.maid import MIRDatabase, main
-
-# database = {"expecting": "data"}
-# try:
-# os.remove(MIR_PATH_NAMED)
-# except FileNotFoundError:
-# pass
-# folder_path_named = os.path.dirname(MIR_PATH_NAMED)
-# write_json_file(folder_path_named, file_name="mir.json", data=database, mode="w")
-# database.pop("expecting", {})
-# mir_db = MIRDatabase()
-# mir_db.database.pop("empty", {})
-# main(mir_db)
-# with open(MIR_PATH_NAMED, "r", encoding="UTF-8") as f:
-# result = json.load(f)
-# mir_db = MIRDatabase()
-# expected = mir_db.database
-# for tag, compatibility in result.items():
-# for comp, field in compatibility.items():
-# for header, definition in field.items():
-# if isinstance(definition, dict):
-# for key in definition:
-# if len(key) > 1:
-# assert field[header][key] == expected[tag][comp][header][key]
-# # else:
-# # assert field[header][key] == expected[tag][comp][header][key]
-# else:
-# assert field[header] == expected[tag][comp][header]
-
-# print(mir_db.database)
-
-
-# if __name__ == "__main__":
-# test_mir_creation()
diff --git a/tests/old/test_mir_merge.py b/tests/old/test_mir_merge.py
deleted file mode 100644
index 3d14ac9..0000000
--- a/tests/old/test_mir_merge.py
+++ /dev/null
@@ -1,122 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-
-# test_merge_data.py
-import pytest
-
-from mir.automata import assimilate
-
-
-class MIRDatabase:
- def __init__(self):
- self.database = {
- "info.unet.stable-diffusion-xl": {
- "base": {
- "repo": "stabilityai/stable-diffusion-xl-base-1.0",
- "pkg": {0: {"diffusers": "StableDiffusionXLPipeline"}},
- "layer_256": ["62a5ab1b5fdfa4fedb32323841298c6effe1af25be94a8583350b0a7641503ef"],
- },
- }
- }
-
-
-def test_merge_data_simple_case():
- mir_db = MIRDatabase()
- mir_db.database["arch1.series1"] = {"component1": {}}
-
- data_tuple = [("arch1", "series1", {"component1": {"field1": {"key1": "value1"}}})]
-
- assimilate(mir_db, data_tuple)
- assert mir_db.database["arch1.series1"]["component1"]["field1"]["key1"] == "value1"
-
-
-# Test case
-@pytest.fixture
-def mock_mir_db():
- return MIRDatabase()
-
-
-def test_merge_data(mock_mir_db):
- """TEST DATAAAAA 測試資料
- Call the function to test & Check if the data was merged correctly"""
- from pprint import pprint
-
- data_tuple = [
- (
- "info.unet",
- "stable-diffusion-xl",
- {
- "base": {
- "pkg": {
- 0: {
- "generation": {
- "denoising_end": 0.8,
- "output_type": "latent",
- "safety_checker": False,
- "width": 1024,
- "height": 1024,
- },
- },
- 1: {"diffusers": "DiffusionPipeline"},
- },
- "layer_256": ["62a5ab1b5fdfa4fedb32323841298c6effe1af25be94a8583350b0a7641503ef"],
- }
- },
- ),
- ]
-
- assimilate(mock_mir_db, data_tuple)
- expected_result = {
- "base": {
- "repo": "stabilityai/stable-diffusion-xl-base-1.0",
- "pkg": {
- 0: {
- "diffusers": "StableDiffusionXLPipeline",
- "generation": {
- "denoising_end": 0.8,
- "output_type": "latent",
- "safety_checker": False,
- "width": 1024,
- "height": 1024,
- },
- },
- 1: {"diffusers": "DiffusionPipeline"},
- },
- "layer_256": ["62a5ab1b5fdfa4fedb32323841298c6effe1af25be94a8583350b0a7641503ef"],
- }
- }
- pprint(mock_mir_db.database)
- assert mock_mir_db.database["info.unet.stable-diffusion-xl"] == expected_result
-
-
-def test_merge_data_nested_case():
- mir_db = MIRDatabase()
- mir_db.database = {"arch2.series2": {"base": {"pkg": {0: {"module": {}}}}}}
- print(mir_db.database)
- assert mir_db.database["arch2.series2"]["base"]["pkg"][0] == {"module": {}}
- data_tuple = [("arch2", "series2", {"base": {"pkg": {0: {"extra": {"x": {"key2": "value2"}}}}}})]
- assimilate(mir_db, data_tuple)
- print(mir_db.database)
-
- assert mir_db.database["arch2.series2"]["base"]["pkg"][0]["module"] == {}
- assert mir_db.database["arch2.series2"]["base"]["pkg"][0]["extra"] == {"x": {"key2": "value2"}}
-
-
-def test_merge_data_multiple_levels():
- mir_db = MIRDatabase()
- mir_db.database["arch3.series3"] = {"component3": {"field3": {"definition3": {"sub_def3": {}}}}}
-
- data_tuple = [("arch3", "series3", {"component3": {"field3": {"definition3": {"sub_def3": {"key3": "value3"}}}}})]
-
- assimilate(mir_db, data_tuple)
- assert mir_db.database["arch3.series3"]["component3"]["field3"]["definition3"]["sub_def3"]["key3"] == "value3"
-
-
-def test_merge_data_type_error():
- mir_db = MIRDatabase()
- mir_db.database["arch4.series4"] = {"component4": {}}
-
- data_tuple = [("arch4", "series4", {"component4": "not a dict"})]
-
- with pytest.raises(TypeError):
- assimilate(mir_db, data_tuple)
diff --git a/tests/old/test_mir_search.py b/tests/old/test_mir_search.py
deleted file mode 100644
index 6bfd64c..0000000
--- a/tests/old/test_mir_search.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-
-import pytest
-
-
-@pytest.fixture
-def mock_test_database():
- from mir.maid import MIRDatabase # , main
-
- mir_db = MIRDatabase()
- # main(mir_db)
- return mir_db
-
-
-def test_grade_maybes_fail(mock_test_database):
- result = mock_test_database.find_tag(field="repo", target="table-cascade")
- assert result is None
-
-
-def test_grade_similar_fail_again(mock_test_database):
- result = mock_test_database.find_tag(field="repo", target="able-cascade-")
- assert result is None
-
-
-def test_grade_cascade_decoder_match(mock_test_database):
- result = mock_test_database.find_tag(field="repo", target="stabilityai/stable-cascade")
- assert result == ["info.unet.stable-cascade", "decoder"]
-
-
-def test_grade_cascade_match(mock_test_database):
- result = mock_test_database.find_tag(field="repo", target="stabilityai/stable-cascade", domain="info.unet")
- assert result == ["info.unet.stable-cascade", "decoder"]
-
-
-def test_grade_field_change(mock_test_database):
- result = mock_test_database.find_tag(field="pkg", target="parler_tts", domain="info.")
- assert result == ["info.art.parler-tts-v1", "*"]
-
-
-def test_grade_letter_case_change(mock_test_database):
- result = mock_test_database.find_tag(field="pkg", target="AuDiOCrAfT")
- assert result == ["info.art.audiogen", "*"]
-
-
-def test_repo_case_change(mock_test_database):
- result = mock_test_database.find_tag(field="repo", target="outeAI/OuteTTS-0.3-1b")
- assert result == ["info.art.outetts-0", "*"]
-
-
-def test_sub_module_detection(mock_test_database):
- result = mock_test_database.find_tag(field="repo", target="PixArt-alpha/PixArt-Sigma-XL-2-1024-Ms")
- assert result == ["info.dit.pixart-sigma-xl-2-1024-ms", "*"]
-
-
-def test_find_tag_truncated(mock_test_database):
- result = mock_test_database.find_tag(field="repo", target="UsefulSenso")
- assert result is None
-
-
-def test_find_tag_truncated_2(mock_test_database):
- result = mock_test_database.find_tag(field="repo", target="UsefulSensors")
- assert result is None
-
-
-def test_find_tag_truncated_4(mock_test_database):
- result = mock_test_database.find_tag(field="repo", target="UsefulSensors/moon")
- assert result is None
-
-
-def test_find_tag_decent(mock_test_database):
- result = mock_test_database.find_tag(field="repo", target="UsefulSensors/moonshine")
- assert result == ["info.stst.moonshine", "*"]
-
-
-def test_find_tag_truncated_6(mock_test_database):
- result = mock_test_database.find_tag(field="repo", target="UsefulSensors/moonshine-")
- assert result == ["info.stst.moonshine", "*"]
-
-
-def test_find_qwen_2_vl(mock_test_database):
- result = mock_test_database.find_tag(field="repo", target="Qwen/Qwen2-VL-7B-Instruct", domain="info.vit")
- assert result == ["info.vit.qwen2-vl", "*"]
-
-
-def test_find_qwen_2_vl_2(mock_test_database):
- result = mock_test_database.find_tag(field="repo", target="Qwen/Qwen2-VL-Instruct".lower(), domain="info.vit")
- assert result == ["info.vit.qwen2-vl", "*"]
-
-
-def test_grade_similar_fail_umt5(mock_test_database):
- result = mock_test_database.find_tag(field="task", target="UMT5EncoderModel")
- assert result is None
-
-
-def test_find_gpt_oss(mock_test_database):
- result = mock_test_database.find_tag(field="repo", target="openai/gpt-oss-120b".lower(), domain="info.moe")
- assert result == ["info.moe.gpt-oss", "*"]
diff --git a/tests/old/test_mir_tagging.py b/tests/old/test_mir_tagging.py
deleted file mode 100644
index 272f157..0000000
--- a/tests/old/test_mir_tagging.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-from mir.tag import tag_model_from_repo
-
-
-# def test_param_no_delimiter_version():BAH
-# result = make_mir_tag("xyz1b")
-# assert result == ("xyz", "*")
-# print(result)
-
-
-def test_split_hyphenated():
- result = tag_model_from_repo("xyz-15b")
- assert result == ("xyz", "*")
- print(result)
-
-
-# def test_split_dot(): BAH
-# result = make_mir_tag("xyz.15b")
-# assert result == ("xyz", "*")
-
-
-def test_split_dot_version():
- assert tag_model_from_repo("xyz1.0") == ("xyz1", "*")
-
-
-def test_split_hyphen_version():
- assert tag_model_from_repo("xyz1-0") == ("xyz1-0", "*")
-
-
-def test_split_hyphen_v_version():
- assert tag_model_from_repo("xyzv1-0") == ("xyzv1-0", "*")
-
-
-def test_no_split():
- assert tag_model_from_repo("flux.1-dev") == ("flux1-dev", "*")
-
-
-def test_no_split_again():
- assert tag_model_from_repo("blipdiffusion") == ("blipdiffusion", "*")
-
-
-def test_no_version_dot_numeric_and_diffusers():
- assert tag_model_from_repo("EasyAnimateV5.1-7b-zh-diffusers") == ("easyanimatev5-zh", "diffusers")
diff --git a/tests/old/test_regex_constants.py b/tests/old/test_regex_constants.py
deleted file mode 100644
index 70820a8..0000000
--- a/tests/old/test_regex_constants.py
+++ /dev/null
@@ -1,27 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-
-from mir.config.constants import PARAMETERS_SUFFIX
-from mir.tag import tag_model_from_repo
-
-
-def test_constants():
- import re
-
- data_tests = {
- "mlx-community/Kokoro-82M-4bit": ["kokoro", "*"],
- "RuadaptQwen2.5-32B-Pro-Beta:latest": ["ruadaptqwen2", "*"],
- "microsoft/Phi-4-mini-instruct": ["phi-4", "*"],
- "tiiuae/falcon-mamba-7b": ["falcon-mamba", "*"],
- "ijepa-vith14-1k": ["ijepa-vith14", "*"],
- "arcee-ai/AFM-4.5B": ["afm", "*"],
- "ibm-research/PowerMoE-3b": ["powermoe", "*"],
- "qwen1-5-moe-a2-7b": ["qwen1-5-moe-a2", "*"],
- "Efficient-Large-Model/Sana_Sprint_1.6B_1024px_diffusers": ["sana-sprint-1024px", "diffusers"],
- "Tencent-Hunyuan/HunyuanDiT-v1.2-Diffusers": ["hunyuandit-v1", "diffusers"],
- "parler-tts/parler-tts-large-v1": ["parler-tts-v1", "*"],
- }
- # regex = PARAMETERS_SUFFIX
- for test, expected in data_tests.items():
- mir_tag = list(tag_model_from_repo(test))
- assert mir_tag == expected
diff --git a/tests/old/test_resolve_code_names.py b/tests/old/test_resolve_code_names.py
deleted file mode 100644
index fa875a1..0000000
--- a/tests/old/test_resolve_code_names.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# # #
-# # #
-
-import pytest
-from mir.inspect.classes import resolve_code_names
-
-
-def test_diffusers_name():
- assert resolve_code_names("StableDiffusionPipeline", "diffusers") == "stable-diffusion"
-
-
-def test_transformers_name():
- assert resolve_code_names("BertModel", "transformers") == "bert"
-
-
-def test_no_class():
- result = resolve_code_names()
- assert isinstance(result, list) is True
- assert len(result) > 300
-
-
-def test_invalid_package():
- with pytest.raises(KeyError):
- assert resolve_code_names("EBertModel", "invalid_package") == ""
-
-
-def test_mixed_search():
- assert resolve_code_names("EBertModel", "transformers") == ""
-
-
-def test_difficult_search():
- assert resolve_code_names("AllegroPipeline", "diffusers") == "allegro"
-
-
-def test_diff_folder_search():
- assert resolve_code_names("AllegroPipeline", "diffusers", path_format=True) == ["diffusers", "pipelines", "allegro"]
-
-
-def test_tf_folder_search():
- assert resolve_code_names("Wav2Vec2Model", "transformers", path_format=True) == ["transformers", "models", "wav2vec2"]
-
-
-if __name__ == "__main__":
- pytest.main(["-vv", __file__])
diff --git a/tests/old/test_seek_class.py b/tests/old/test_seek_class.py
deleted file mode 100644
index 4d3a1de..0000000
--- a/tests/old/test_seek_class.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-
-from mir.config.conversion import import_submodules
-from mir.inspect.pipes import get_class_parent_folder
-
-
-def test_seek_diffusers_path():
- assert get_class_parent_folder(import_submodules("AllegroPipeline", "diffusers"), "diffusers") == ["diffusers", "pipelines", "allegro"]
-
-
-def test_seek_transformers_path():
- module = import_submodules("AlbertModel", "transformers")
- assert get_class_parent_folder(module, "transformers") == ["transformers", "models", "albert"]
-
-
-def test_seek_class_attention():
- assert get_class_parent_folder("CogVideoXAttnProcessor2_0", "diffusers") is None
diff --git a/tests/old/test_task.py b/tests/old/test_task.py
deleted file mode 100644
index 2c527b9..0000000
--- a/tests/old/test_task.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-
-
-# from mir.__main__ import main
-# from mir.maid import MIRDatabase
-
-
-# def test_task_and_pipe():
-# mir_db = MIRDatabase()
-# assert main(mir_db) is not None
diff --git a/tests/old/test_taskanalyzer.py b/tests/old/test_taskanalyzer.py
deleted file mode 100644
index 77adb96..0000000
--- a/tests/old/test_taskanalyzer.py
+++ /dev/null
@@ -1,320 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-
-import types
-from typing import OrderedDict
-import pytest
-import pytest_asyncio
-import sys
-
-from mir.inspect.tasks import TaskAnalyzer
-
-
-def test_show_transformers_tasks_by_code_name():
- """Test that show_transformers_tasks returns a list of class names when code_name is provided."""
- tasks = TaskAnalyzer.show_transformers_tasks(code_name="bert")
-
- # Should return a list (not a type object)
- assert isinstance(tasks, list), f"Expected list, got {tasks} type {type(tasks)}"
-
- # Should contain string class names
- if tasks:
- assert all(isinstance(task, str) for task in tasks), f"Expected list of strings, got {tasks} type {type(tasks)}"
- print(f"show_transformers_tasks('bert') returned: {tasks}")
-
-
-class DummyDiffusersTaskMap(OrderedDict):
- """Mimic a SUPPORTED_TASKS_MAPPINGS entry."""
-
- pass
-
-
-def make_dummy_diffusers_modules(monkeypatch):
- """Create minimal diffusers package structure required by AutoPkg.
- ie diffusers.pipelines.auto_pipeline"""
- auto_pipeline = types.SimpleNamespace()
- task_map_norm = DummyDiffusersTaskMap()
- task_map_i2i = DummyDiffusersTaskMap()
-
- #
- class CoronaPipeline:
- """Fake model code mapped to fake pipe class"""
-
- __name__ = "CoronaPipeline"
-
- class CoronaImg2ImgPipeline:
- __name__ = "CoronaImg2ImgPipeline"
-
- task_map_norm["corona-model"] = CoronaPipeline
- task_map_i2i["corona-model"] = CoronaImg2ImgPipeline
- auto_pipeline.SUPPORTED_TASKS_MAPPINGS = [
- task_map_norm,
- task_map_i2i,
- ]
-
- def _get_task_class(task_map, class_name, _):
- """Return a dummy class if class_name matches"""
-
- return task_map.get("corona-model")
- # return None
-
- auto_pipeline._get_task_class = _get_task_class
- monkeypatch.setitem(sys.modules, "diffusers.pipelines.auto_pipeline", auto_pipeline)
-
-
-def make_dummy_transformers_modules(monkeypatch):
- """Create minimal transformers package structure required by AutoPkg."""
- utils_fx = types.SimpleNamespace()
-
- def _generate_supported_model_class_names(code_name):
- """Return a list based on the code_name"""
- return [f"{code_name}_TaskA", f"{code_name}_TaskB"]
-
- utils_fx._generate_supported_model_class_names = _generate_supported_model_class_names
- monkeypatch.setitem(sys.modules, "transformers.utils.fx", utils_fx)
-
- # nnll.metadata.helpers.make_callable stub
- helpers = types.SimpleNamespace()
-
- def make_callable(name, pkg):
- # Return a dummy class with __module__ and __all__
- class Dummy:
- __module__ = f"{pkg}.dummy_module"
-
- Dummy.__all__ = ["DummyClass"]
- return Dummy
-
- helpers.make_callable = make_callable
- monkeypatch.setitem(sys.modules, "nnll.metadata.helpers", helpers)
-
-
-def make_dummy_nnll_modules(monkeypatch):
- """Create minimal nnll package structure required by AutoPkg."""
- # nnll.tensor_pipe.deconstructors.get_code_names
- deconstructors = types.SimpleNamespace()
-
- def get_code_names(class_name, package_name):
- """Return a deterministic code name"""
- return f"{class_name}_code"
-
- deconstructors.get_code_names = get_code_names
- monkeypatch.setitem(sys.modules, "nnll.tensor_pipe.deconstructors", deconstructors)
-
- # nnll.mir.tag.make_scheduler_tag
- mir_tag = types.SimpleNamespace()
-
- def make_scheduler_tag(class_name):
- """Return dummy series and component"""
- return ("scheduler_series", "scheduler_component")
-
- mir_tag.make_scheduler_tag = make_scheduler_tag
- monkeypatch.setitem(sys.modules, "nnll.mir.tag", mir_tag)
-
-
-class DummyMIRDatabase:
- """A very small in‑memory stand‑in for the real MIRDatabase."""
-
- def __init__(self):
- """# DB Structure: {series: {compatibility: {field_name: {"0": pkg:{ : ...}}}}}"""
- self.database = {}
-
- def add_entry(self, series, compatibility, field_name, pkg_tree):
- self.database.setdefault(series, {})
- self.database[series].setdefault(compatibility, {})
- self.database[series][compatibility][field_name] = {"0": pkg_tree}
-
- def find_tag(self, *, field, target, sub_field=None, domain=None):
- """Simplified: return a fake tag if target contains "Known"""
- tree = {
- "IPNDMScheduler": ["ops.scheduler.dummy", "ipndmscheduler"],
- "EQvae": ["info.vae.dummy", "AutoencoderKL"],
- "DummyOther": ["info.dummy.OtherClass", "*"],
- "CLIPTokenizer": [
- "info.encoder.tokenizer",
- "CLIPDummy",
- ],
- }
- return tree.get(target)
-
-
-@pytest.fixture(autouse=True)
-def stub_external_modules(monkeypatch):
- """Patch all external imports used by AutoPkg."""
-
- make_dummy_diffusers_modules(monkeypatch)
- make_dummy_transformers_modules(monkeypatch)
- make_dummy_nnll_modules(monkeypatch)
-
-
-def test_show_diffusers_tasks():
- tasks = TaskAnalyzer.show_diffusers_tasks(
- code_name="corona-model",
- class_name="CoronaModel",
- )
- assert "CoronaPipeline" in tasks
- assert "CoronaImg2ImgPipeline" in tasks
-
-
-# def test_show_transformers_tasks_by_class():
-# """When code_name is None, make_callable returns a dummy with __all__"""
-# tasks = TaskAnalyzer.show_transformers_tasks(class_name="AnyClass")
-# assert tasks == ["DummyClass"] # from Dummy.__all__
-
-
-# def test_show_transformers_tasks_by_code():
-# tasks = TaskAnalyzer.show_transformers_tasks(code_name="bert")
-# assert tasks == ["bert_TaskA", "bert_TaskB"]
-
-
-# @pytest.mark.asyncio
-# async def test_trace_tasks_filters_and_sorts():
-# """Package entry should be processed (not in `skip_auto` list)
-# show_transformers_tasks should return ["DummyClass"]; no snip words, so unchanged"""
-# ap = TaskAnalyzer()
-
-# pkg_tree = {"transformers": "SomeModel"}
-# tasks = await ap.trace_tasks(pkg_tree)
-
-# assert tasks == ["DummyClass"]
-
-
-@pytest.mark.asyncio
-async def test_trace_finds_map_with_code_name():
- ap = TaskAnalyzer()
- pkg_tree = {"diffusers": "CoronaPipeline"}
- tasks = await ap.trace_tasks(pkg_tree)
- assert tasks == [
- "CoronaImg2ImgPipeline",
- "CoronaPipeline",
- ]
-
-
-@pytest.mark.asyncio
-async def test_mflux_path_returns_static_list():
- ap = TaskAnalyzer()
- pkg_tree = {"mflux": "any"}
- tasks = await ap.trace_tasks(pkg_tree)
- assert tasks == ap.mflux_tasks
-
-
-@pytest.mark.asyncio
-async def test_skip_automode_return_none():
- ap = TaskAnalyzer()
- pkg_tree = {"transformers": "AutoModel"}
- tasks = await ap.trace_tasks(pkg_tree)
- assert tasks is None
-
-
-@pytest.mark.asyncio
-async def test_hyperlink_and_tag_class():
- """Populate a known tag for a scheduler class\n"""
- ap = TaskAnalyzer()
- mir_db = DummyMIRDatabase()
-
- mir_db.add_entry(
- series="ops.scheduler.scheduler_series",
- compatibility="any",
- field_name="pkg",
- pkg_tree={"diffusers": "IPNDMScheduler"},
- )
-
- class IPNDMScheduler:
- __name__ = "IPNDM"
- __module__ = "schedulers.ipndm.IPNDMScheduler"
-
- class EQvae:
- __name__ = "EQ-VAE"
- __module__ = "autoencoders.AutoencoderKL"
-
- class DummyOther:
- __name__ = "OtherClass"
- __module__ = "other_pkg.OtherClass"
-
- class CLIPTokenizer:
- __name__ = "CLIPTokenizer"
- __module__ = "tokenizers.CLIPTokenizer"
-
- pipe_args = {
- "scheduler": IPNDMScheduler,
- "vae": EQvae,
- "unrelated": DummyOther,
- "tokenizer": CLIPTokenizer, # should be mapped to encoder tokenizers
- }
-
- links = await ap.hyperlink_to_mir(pipe_args, "info.test_series", mir_db)
-
- assert "scheduler" in links["pipe_names"] # Scheduler should be resolved via make_scheduler_tag -> find_tag fallback\n
- scheduler_tag = links["pipe_names"]["scheduler"]
- assert scheduler_tag == ["ops.scheduler.dummy", "ipndmscheduler"]
-
- assert "vae" in links["pipe_names"] # VAE should be resolved via find_tag (since not in dummy DB)
- assert links["pipe_names"]["vae"] == ["info.vae.dummy", "AutoencoderKL"]
-
- assert links["pipe_names"]["unrelated"] == ["info.dummy.OtherClass", "*"] # Unrelated should just return the class name
-
- assert links["pipe_names"]["tokenizer"] == ["info.encoder.tokenizer", "test_series"] # Tokenizer role is *special‑cased*
-
-
-@pytest.mark.asyncio
-async def test_detect_tasks_and_pipes():
- ap = TaskAnalyzer()
- mir_db = DummyMIRDatabase()
-
- mir_db.add_entry(
- series="info.art.modelA", # Add a series that passes the skip filters
- compatibility="compat1",
- field_name="pkg",
- pkg_tree={"transformers": "SomeModel"},
- )
-
- mir_db.add_entry(
- series="info.lora.modelB", # Add a series (".lora") that should be ignored (skip_series)
- compatibility="compat2",
- field_name="pkg",
- pkg_tree={"transformers": "SomeModel"},
- )
-
- async def fake_trace_tasks(pkg_tree):
- """Patch trace_tasks to return a predictable list"""
- return ["TaskX", "TaskY"]
-
- ap.trace_tasks = fake_trace_tasks
-
- tasks = await ap.detect_tasks(mir_db)
- print(tasks)
- assert any("modelA" in series for prefix, series, _ in tasks)
- assert not any("lora" in prefix for prefix, series, _ in tasks)
-
- class DummyPipe:
- """diffusers entry with a pipe class for detect_pipes"""
-
- def __init__(arg1: int, arg2: str):
- """Exists purely for annotation reading!"""
- pass
-
- def fake_make_callable(name, pkg):
- """Stub make_callable to return DummyPipe for the module name"""
- return DummyPipe
-
- # Monkeypatch the helper used inside detect_pipes
- from mir.config.conversion import import_submodules
-
- import_submodules = fake_make_callable # type: ignore
-
- mir_db.add_entry(
- series="info.vit.modelC",
- compatibility="compat3",
- field_name="pkg",
- pkg_tree={"diffusers": "DummyPipe"},
- )
-
- async def fake_hyperlink(pipe_args, series, db):
- """Patch hyperlink_to_mir to return a simple marker"""
- return {"pipe_names": {"dummy": ["OK"]}}
-
- ap.hyperlink_to_mir = fake_hyperlink
-
- pipes = await ap.detect_pipes(mir_db) # Should contain the non‑skipped diffusers entry
- assert any("modelC" in series for prefix, series, _ in pipes)
- for _, _, data in pipes: # Ensure the returned structure matches the fake hyperlink output
- assert data["compat3"]["pipe_names"]["dummy"] == ["OK"]
diff --git a/tests/test_harvest_transformers.py b/tests/test_harvest_transformers.py
new file mode 100644
index 0000000..1d86502
--- /dev/null
+++ b/tests/test_harvest_transformers.py
@@ -0,0 +1,6 @@
+from mir.generate.transformers.harvest import HarvestLoop
+
+
+def test_harvest():
+ harvest_classes = HarvestLoop()
+ harvest_classes()
diff --git a/tests/test_inspect.py b/tests/test_inspect.py
new file mode 100644
index 0000000..57e4689
--- /dev/null
+++ b/tests/test_inspect.py
@@ -0,0 +1,7 @@
+from diffusers import CosmosTransformer3DModel
+
+
+model = CosmosTransformer3DModel()
+print(type(model.transformer_blocks[0]))
+for i in model.transformer_blocks[0]:
+ print(type(i))
diff --git a/tests/test_mir_generate_diffusers.py b/tests/test_mir_generate_diffusers.py
index 2db66d0..4cbfb8f 100644
--- a/tests/test_mir_generate_diffusers.py
+++ b/tests/test_mir_generate_diffusers.py
@@ -1,6 +1,6 @@
def test_info_key_exists_and_library_is_not_nested():
- from mir.generate.diffusers.harvest import HarvestClasses
+ from mir.generate.diffusers.harvest import HarvestLoop
- Mir = HarvestClasses().db.db
+ Mir = HarvestLoop().db.db
# print(Mir)
diff --git a/tests/test_mir_generate_transformers.py b/tests/test_mir_generate_transformers.py
index 2fd0a11..47bdb13 100644
--- a/tests/test_mir_generate_transformers.py
+++ b/tests/test_mir_generate_transformers.py
@@ -1,7 +1,7 @@
def test_info_key_exists_and_library_is_not_nested():
- from mir.generate.transformers.harvest import HarvestClasses
+ from mir.generate.transformers.harvest import HarvestLoop
- Mir = HarvestClasses().db.db
+ Mir = HarvestLoop().db.db
print(Mir.info.cnn.yolos)
result = Mir.info.cnn.yolos["transformers"] # should not throw
@@ -9,9 +9,9 @@ def test_info_key_exists_and_library_is_not_nested():
def test_ops_key_exists_and_library_is_not_tested():
- from mir.generate.transformers.harvest import HarvestClasses
+ from mir.generate.transformers.harvest import HarvestLoop
- Mir = HarvestClasses().db.db
+ Mir = HarvestLoop().db.db
print(Mir.ops.cnn.yolos)
result = Mir.ops.cnn.yolos["transformers"] # should not throw
@@ -26,9 +26,9 @@ def test_ops_key_exists_and_library_is_not_tested():
def test_ops_tokenizer_created():
- from mir.generate.transformers.harvest import HarvestClasses
+ from mir.generate.transformers.harvest import HarvestLoop
- Mir = HarvestClasses().db.db
+ Mir = HarvestLoop().db.db
result = Mir.ops.encoder.tokenizer.zamba2["transformers"]
assert result == {"model": "transformers.models.llama.tokenization_llama.LlamaTokenizer"}
From 193522f6170634f0337ece7741eb5796013112f5 Mon Sep 17 00:00:00 2001
From: exdysa <91800957+exdysa@users.noreply.github.com>
Date: Wed, 21 Jan 2026 20:25:31 -0500
Subject: [PATCH 14/16] ~eeby sleeeby
---
mir/generate/_tasks.py | 6 +-
.../diffusers/{harvest.py => gather.py} | 43 +-
mir/generate/diffusers/package.py | 69 -
mir/generate/diffusers/raw_data.py | 24 -
mir/generate/diffusers/tasks.py | 34 -
mir/generate/from_module.py | 2 +-
mir/generate/test.json | 4549 +++++++++++++++++
mir/generate/transformers/gather.py | 24 +
mir/generate/transformers/harvest.py | 44 -
mir/generate/transformers/package.py | 56 -
mir/generate/transformers/raw_data.py | 24 -
mir/generate/transformers/tasks.py | 32 -
mir/maid.py | 2 +-
mir/model.py | 50 +
mir/nesting.py | 85 +
mir/package.py | 188 +-
mir/tag.py | 53 +-
tests/test_gather_diffusers.py | 10 +
tests/test_gather_transformers.py | 10 +
tests/test_harvest_transformers.py | 6 -
20 files changed, 4873 insertions(+), 438 deletions(-)
rename mir/generate/diffusers/{harvest.py => gather.py} (55%)
delete mode 100644 mir/generate/diffusers/package.py
delete mode 100644 mir/generate/diffusers/raw_data.py
delete mode 100644 mir/generate/diffusers/tasks.py
create mode 100644 mir/generate/test.json
create mode 100644 mir/generate/transformers/gather.py
delete mode 100644 mir/generate/transformers/harvest.py
delete mode 100644 mir/generate/transformers/package.py
delete mode 100644 mir/generate/transformers/raw_data.py
delete mode 100644 mir/generate/transformers/tasks.py
create mode 100644 mir/model.py
create mode 100644 mir/nesting.py
create mode 100644 tests/test_gather_diffusers.py
create mode 100644 tests/test_gather_transformers.py
delete mode 100644 tests/test_harvest_transformers.py
diff --git a/mir/generate/_tasks.py b/mir/generate/_tasks.py
index 32961ae..2745598 100644
--- a/mir/generate/_tasks.py
+++ b/mir/generate/_tasks.py
@@ -3,7 +3,7 @@
from typing import Any, Callable, List
-from mir.generate.diffusers.raw_data import DPrepareData
+from mir.generate.diffusers.raw_data import ModelAttributes
from mir import DBUQ
from mir.tag import MIRTag
@@ -12,11 +12,11 @@
class TaskAnalyzer:
- prepared_data: DPrepareData
+ prepared_data: ModelAttributes
mir_tag: MIRTag
tasks: dict[str, str] | None = None
- def __init__(self, prepared_data: DPrepareData, mir_tag: MIRTag) -> None:
+ def __init__(self, prepared_data: ModelAttributes, mir_tag: MIRTag) -> None:
self.prepared_data = prepared_data
self.mir_tag = mir_tag
self.skip_series = [
diff --git a/mir/generate/diffusers/harvest.py b/mir/generate/diffusers/gather.py
similarity index 55%
rename from mir/generate/diffusers/harvest.py
rename to mir/generate/diffusers/gather.py
index e0a697c..43a68ca 100644
--- a/mir/generate/diffusers/harvest.py
+++ b/mir/generate/diffusers/gather.py
@@ -1,49 +1,36 @@
# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
#
-from importlib import import_module
-from inspect import getmro
from typing import get_type_hints
-from mir.generate.diffusers.raw_data import DPrepareData
-
-class HarvestLoop:
+class GatherLoop:
def __init__(self) -> None:
- """Initializes the HarvestClasses instance with an empty list to store raw class data."""
- from mir.generate.transformers.harvest import HarvestLoop
-
+ """Loops through diffusers packages to harvest class data."""
from mir.maid import MIRDatabase
self.db = MIRDatabase()
- self.harvest_tf = HarvestLoop()
-
- def __call__(self) -> None:
from mir.data import EXCLUSIONS
+ from mir.build_entry import BuildEntry
- prepared_data = {}
- library = "diffusers"
- subclasses = self.extract_subclass_data(library, "DiffusionPipeline") # diffusers.pipelines.
+ build_entries = []
+ subclasses = self.extract_subclass_data("diffusers", "DiffusionPipeline")
for module_path, pipeline in subclasses.items():
if module_path.rsplit(".", 1)[-1] not in EXCLUSIONS["exclusion_list"]:
- loop_parameters = get_type_hints(pipeline.__init__)
- loop_parameters.setdefault("pipeline", pipeline)
- for name, self.model in loop_parameters.items():
- if prepare_data := self.prepare_class_data():
- prepared_data.setdefault(name, prepare_data)
- for data in prepared_data:
- pass
-
- def prepare_class_data(self):
- prepared_data = DPrepareData(model=self.model)
- return prepared_data
+ build_entries.extend([BuildEntry(model_type=model_type, model=model) for model_type, model in get_type_hints(pipeline.__init__).items()])
+ build_entries.append(BuildEntry(model_type="pipeline", model=pipeline))
+ print([x.attributes for x in build_entries])
+ # TODO: for data in prepared_data:
def extract_subclass_data(self, package_name: str, base_class_name: str):
- """Return a dict mapping `.` → class object
- for every class in `package_name` that subclasses a class named
- `base_class_name`."""
+ """Extracts subclasses from a package that inherit from a specified base class.\n
+ :param package_name: Name of the package to search
+ :param base_class_name: Name of the base class to inherit from
+ :return: Dictionary mapping fully qualified class names to class objects"""
from pkgutil import walk_packages
+ from inspect import getmro
+ from importlib import import_module
results = {}
root_pkg = import_module(package_name)
diff --git a/mir/generate/diffusers/package.py b/mir/generate/diffusers/package.py
deleted file mode 100644
index 9c08f39..0000000
--- a/mir/generate/diffusers/package.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-
-
-from types import ModuleType
-from typing import Callable
-from dataclasses import dataclass, field
-
-
-@dataclass
-class MIRPackage:
- model_type: str
- model: Callable | str | dict[str, str]
- model_path: ModuleType
- package: dict[str, str] = field(init=False, default_factory=dict[str, str])
-
- def __post_init__(self):
- self.package = {}
- self.model_name: str = self.model.__name__
- self.model_path: ModuleType = self.model.__module__
- if not isinstance(self.data, dict):
- self.generate_package()
- self.generate_repo()
-
- def generate_repo(self):
- from mir.data import MIGRATIONS
-
- if self.model_type in ["unet", "transformer"] and (doc_string := getattr(self.model_path, "EXAMPLE_DOC_STRING", None)):
- if repo := MIGRATIONS["migrated_pipes"].get(self.model_name, False):
- self.repo = repo
- elif self.model_type not in ["scheduler", "vae", "tokenizer"]:
- self.process_doc_string(doc_string=doc_string)
-
- def generate_package(self) -> None:
- """Generates package information for the MIR tag based on class.
- :param pkg: A class object (model, tokenizer, etc) to build a tag from"""
- model = f"{self.model_path}.{self.model_name}"
- self.package: dict[str, str] = {"model": model}
-
- def config_to_repo(self, config_class: Callable) -> str | None:
- """Extracts the repository path from the configuration class documentation.\n
- :param config_class: Configuration class to extract repository path from.
- :return: Repository path as a string if found, otherwise None."""
- import re
-
- from mir import NFO
-
- doc_check = [config_class]
- if hasattr(config_class, "forward"):
- doc_check.append(config_class.forward) # type: ignore
- for pattern in doc_check:
- doc_string = pattern.__doc__
- matches = re.findall(r"\[([^\]]+)\]", doc_string) # type: ignore
- if matches:
- try:
- return next(iter(snip.strip('"').strip() for snip in matches if "/" in snip))
- except StopIteration as error_log:
- NFO(f"ERROR >>{matches} : LOG >> {error_log}")
- continue
-
- def process_doc_string(self, doc_string: str) -> None:
- from mir.generate.diffusers.doc_parse import DocStringParser
-
- doc_parser = DocStringParser(doc_string=doc_string, model=self.model, model_path=self.model_path)
- doc_parser.parse()
- if repo_path := doc_parser.pipe_repo:
- self.repo_path = repo_path
- if staged_repo := doc_parser.staged_repo:
- self.staged_repo = staged_repo
diff --git a/mir/generate/diffusers/raw_data.py b/mir/generate/diffusers/raw_data.py
deleted file mode 100644
index 26170e8..0000000
--- a/mir/generate/diffusers/raw_data.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-
-
-from dataclasses import dataclass, field
-from typing import Callable
-
-
-@dataclass
-class DPrepareData:
- """Represents a structured entry of the name of the class and its associated attributes."""
-
- model: Callable
- model_params: dict[str, list[str]] = field(init=True, default_factory=lambda: {"": [""]})
-
- model_name: str = field(init=False)
- library: str = field(init=False)
- import_path: str = field(init=False)
-
- def __post_init__(self):
- """Initializes the DPrepareData instance by setting derived attributes."""
- self.model_name: str = self.model.__name__
- self.import_path: str = self.model.__module__.rsplit(".", 1)[0]
- self.library: str = self.import_path.split(".")[0]
diff --git a/mir/generate/diffusers/tasks.py b/mir/generate/diffusers/tasks.py
deleted file mode 100644
index 068b126..0000000
--- a/mir/generate/diffusers/tasks.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# SPDX-License-Identifier: MPL-2.0 AND LicenseRef-Commons-Clause-License-Condition-1.0
-#
-
-from dataclasses import dataclass, field
-from typing import Callable
-
-
-@dataclass
-class CollectTasks:
- model: Callable
- import_path: str
- tasks: list[str] = field(init=False)
-
- def __post_init__(self) -> None:
- self.model_to_tasks()
-
- def model_to_tasks(self) -> None:
- """Return Diffusers task pipes based on package-specific query\n
- :param class_name: To find task pipes from a Diffusers class pipe, defaults to None
- :param code_name: To find task pipes from a Transformers class pipe, defaults to None
- :return: A list of alternate class pipelines derived from the specified class"""
- from mir.generate.diffusers import SUPPORTED_TASKS_MAPPINGS, GET_TASK_CLASS
-
- alt_tasks = set({})
- self.internal_name = self.import_path.rsplit(".", 2)[-1]
- for task_map in SUPPORTED_TASKS_MAPPINGS:
- task_class = GET_TASK_CLASS(task_map, self.model, False)
- if task_class:
- alt_tasks.add(task_class.__name__)
- for model_code, pipe_class_obj in task_map.items():
- if self.internal_name in model_code:
- alt_tasks.add(pipe_class_obj.__name__)
-
- self.tasks = [x for x in alt_tasks]
diff --git a/mir/generate/from_module.py b/mir/generate/from_module.py
index fffb820..586c46d 100644
--- a/mir/generate/from_module.py
+++ b/mir/generate/from_module.py
@@ -9,7 +9,7 @@
from typing import Callable
-def migrations(repo_path: str):
+def migrations(repo_path: str) -> str:
"""Replaces old organization names in repository paths with new ones.\n
:param repo_path: Original repository path containing old organization names
:return: Updated repository path with new organization names"""
diff --git a/mir/generate/test.json b/mir/generate/test.json
new file mode 100644
index 0000000..2e8091a
--- /dev/null
+++ b/mir/generate/test.json
@@ -0,0 +1,4549 @@
+model_parameters={'num_channels': 'num_channels=3', 'embedding_size': 'embedding_size=64', 'hidden_sizes': 'hidden_sizes=[
+ 256,
+ 512,
+ 1024,
+ 2048
+ ]', 'depths': 'depths=[
+ 3,
+ 4,
+ 6,
+ 3
+ ]', 'layer_type': "layer_type='preactivation'", 'hidden_act': "hidden_act='relu'", 'global_padding': 'global_padding=None', 'num_groups': 'num_groups=32', 'drop_path_rate': 'drop_path_rate=0.0', 'embedding_dynamic_padding': 'embedding_dynamic_padding=False', 'output_stride': 'output_stride=32', 'width_factor': 'width_factor=1', 'out_features': 'out_features=None', 'out_indices': 'out_indices=None'
+},
+model_name='BitModel', library='transformers', import_path='transformers.models.bit'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 128256', 'hidden_size': 'hidden_size: Optional[int
+ ] = 2560', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 6912', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 30', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 20', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 5', 'hidden_act': "hidden_act: Optional[str] = 'relu2'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 2048', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-05', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = None', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 128000', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 128001', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[str
+ ] = 0.0', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None'
+}, model_name='BitNetModel', library='transformers', import_path='transformers.models.bitnet'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'args': '*args'
+}, model_name='TokenizersBackend', library='transformers', import_path='transformers'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=8008', 'max_position_embeddings': 'max_position_embeddings=128', 'encoder_layers': 'encoder_layers=2', 'encoder_ffn_dim': 'encoder_ffn_dim=10240', 'encoder_attention_heads': 'encoder_attention_heads=32', 'decoder_layers': 'decoder_layers=24', 'decoder_ffn_dim': 'decoder_ffn_dim=10240', 'decoder_attention_heads': 'decoder_attention_heads=32', 'encoder_layerdrop': 'encoder_layerdrop=0.0', 'decoder_layerdrop': 'decoder_layerdrop=0.0', 'is_encoder_decoder': 'is_encoder_decoder=True', 'activation_function': "activation_function='gelu'", 'd_model': 'd_model=2560', 'dropout': 'dropout=0.1', 'attention_dropout': 'attention_dropout=0.0', 'activation_dropout': 'activation_dropout=0.0', 'init_std': 'init_std=0.02', 'decoder_start_token_id': 'decoder_start_token_id=1', 'scale_embedding': 'scale_embedding=False', 'pad_token_id': 'pad_token_id=0', 'bos_token_id': 'bos_token_id=1', 'eos_token_id': 'eos_token_id=2', 'encoder_no_repeat_ngram_size': 'encoder_no_repeat_ngram_size=3', 'forced_eos_token_id': 'forced_eos_token_id=2'
+}, model_name='BlenderbotModel', library='transformers', import_path='transformers.models.blenderbot'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'sep_token': "sep_token=''", 'cls_token': "cls_token=''", 'unk_token': "unk_token=''", 'pad_token': "pad_token=''", 'mask_token': "mask_token=''", 'add_prefix_space': 'add_prefix_space=True', 'vocab': 'vocab=None', 'merges': 'merges=None'
+}, model_name='BlenderbotTokenizer', library='transformers', import_path='transformers.models.blenderbot'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=50265', 'max_position_embeddings': 'max_position_embeddings=512', 'encoder_layers': 'encoder_layers=8', 'encoder_ffn_dim': 'encoder_ffn_dim=2048', 'encoder_attention_heads': 'encoder_attention_heads=16', 'decoder_layers': 'decoder_layers=8', 'decoder_ffn_dim': 'decoder_ffn_dim=2048', 'decoder_attention_heads': 'decoder_attention_heads=16', 'encoder_layerdrop': 'encoder_layerdrop=0.0', 'decoder_layerdrop': 'decoder_layerdrop=0.0', 'is_encoder_decoder': 'is_encoder_decoder=True', 'activation_function': "activation_function='gelu'", 'd_model': 'd_model=512', 'dropout': 'dropout=0.1', 'attention_dropout': 'attention_dropout=0.0', 'activation_dropout': 'activation_dropout=0.0', 'init_std': 'init_std=0.02', 'decoder_start_token_id': 'decoder_start_token_id=1', 'scale_embedding': 'scale_embedding=False', 'pad_token_id': 'pad_token_id=0', 'bos_token_id': 'bos_token_id=1', 'eos_token_id': 'eos_token_id=2', 'forced_eos_token_id': 'forced_eos_token_id=2'
+}, model_name='BlenderbotSmallModel', library='transformers', import_path='transformers.models.blenderbot_small'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab_file': 'vocab_file', 'merges_file': 'merges_file', 'bos_token': "bos_token='__start__'", 'eos_token': "eos_token='__end__'", 'unk_token': "unk_token='__unk__'", 'pad_token': "pad_token='__null__'"
+}, model_name='BlenderbotSmallTokenizer', library='transformers', import_path='transformers.models.blenderbot_small'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'vision_config': 'vision_config=None', 'projection_dim': 'projection_dim=512', 'logit_scale_init_value': 'logit_scale_init_value=2.6592', 'image_text_hidden_size': 'image_text_hidden_size=256', 'label_smoothing': 'label_smoothing=0.0'
+}, model_name='BlipModel', library='transformers', import_path='transformers.models.blip'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'do_lower_case': 'do_lower_case: bool = False', 'unk_token': "unk_token: str = '[UNK]'", 'sep_token': "sep_token: str = '[SEP]'", 'pad_token': "pad_token: str = '[PAD]'", 'cls_token': "cls_token: str = '[CLS]'", 'mask_token': "mask_token: str = '[MASK]'", 'tokenize_chinese_chars': 'tokenize_chinese_chars: bool = True', 'strip_accents': 'strip_accents: Optional[bool
+ ] = None'
+}, model_name='BertTokenizer', library='transformers', import_path='transformers.models.bert'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'qformer_config': 'qformer_config=None', 'text_config': 'text_config=None', 'num_query_tokens': 'num_query_tokens=32', 'image_text_hidden_size': 'image_text_hidden_size=256', 'image_token_index': 'image_token_index=None'
+}, model_name='Blip2Model', library='transformers', import_path='transformers.models.blip_2'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'bos_token': "bos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'eos_token': "eos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'pad_token': 'pad_token: Union[tokenizers.AddedToken, str, NoneType
+ ] = None', 'add_prefix_space': 'add_prefix_space=False'
+}, model_name='GPT2Tokenizer', library='transformers', import_path='transformers.models.gpt2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=30522', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'pad_token_id': 'pad_token_id=0', 'cross_attention_frequency': 'cross_attention_frequency=2', 'encoder_hidden_size': 'encoder_hidden_size=1408', 'use_qformer_text_input': 'use_qformer_text_input=False'
+}, model_name='Blip2QFormerModel', library='transformers', import_path='transformers.models.blip_2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=250880', 'hidden_size': 'hidden_size=64', 'n_layer': 'n_layer=2', 'n_head': 'n_head=8', 'layer_norm_epsilon': 'layer_norm_epsilon=1e-05', 'initializer_range': 'initializer_range=0.02', 'bos_token_id': 'bos_token_id=1', 'eos_token_id': 'eos_token_id=2', 'apply_residual_connection_post_layernorm': 'apply_residual_connection_post_layernorm=False', 'hidden_dropout': 'hidden_dropout=0.0', 'attention_dropout': 'attention_dropout=0.0', 'pretraining_tp': 'pretraining_tp=1', 'slow_but_exact': 'slow_but_exact=False'
+}, model_name='BloomModel', library='transformers', import_path='transformers.models.bloom'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'args': '*args'
+}, model_name='TokenizersBackend', library='transformers', import_path='transformers'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 260', 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 4096', 'patch_in_forward': 'patch_in_forward: Optional[bool
+ ] = True', 'patch_size': 'patch_size: Optional[int
+ ] = 4', 'patching_mode': "patching_mode: Optional[str] = 'entropy'", 'patching_threshold': 'patching_threshold: Optional[float
+ ] = 1.335442066192627', 'patching_batch_size': 'patching_batch_size: Optional[int
+ ] = 1', 'max_patch_length': 'max_patch_length: Optional[int
+ ] = None', 'cross_attn_k': 'cross_attn_k: Optional[int
+ ] = 2', 'encoder_hash_byte_group_size': 'encoder_hash_byte_group_size: Optional[int
+ ] = None', 'encoder_hash_byte_group_vocab': 'encoder_hash_byte_group_vocab: Optional[int
+ ] = 500002', 'encoder_hash_byte_group_nb_functions': 'encoder_hash_byte_group_nb_functions: Optional[int
+ ] = 1', 'patcher_config': 'patcher_config: Optional[dict
+ ] = None', 'encoder_config': 'encoder_config: Optional[dict
+ ] = None', 'decoder_config': 'decoder_config: Optional[dict
+ ] = None', 'global_config': 'global_config: Optional[dict
+ ] = None', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None'
+}, model_name='BltModel', library='transformers', import_path='transformers.models.blt'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'args': '*args'
+}, model_name='TokenizersBackend', library='transformers', import_path='transformers'), ModelAttributes(model=, model_type='model', model_parameters={'share_cross_modal_transformer_layers': 'share_cross_modal_transformer_layers=True', 'hidden_act': "hidden_act='gelu'", 'hidden_size': 'hidden_size=768', 'initializer_factor': 'initializer_factor=1', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'share_link_tower_layers': 'share_link_tower_layers=False', 'link_tower_type': "link_tower_type='add'", 'num_attention_heads': 'num_attention_heads=12', 'num_hidden_layers': 'num_hidden_layers=6', 'tie_word_embeddings': 'tie_word_embeddings=False', 'init_layernorm_from_vision_encoder': 'init_layernorm_from_vision_encoder=False', 'text_config': 'text_config=None', 'vision_config': 'vision_config=None'
+}, model_name='BridgeTowerModel', library='transformers', import_path='transformers.models.bridgetower'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = ''", 'sep_token': "sep_token: str = ''", 'cls_token': "cls_token: str = ''", 'unk_token': "unk_token: str = ''", 'pad_token': "pad_token: str = ''", 'mask_token': "mask_token: str = ''", 'add_prefix_space': 'add_prefix_space: bool = False', 'trim_offsets': 'trim_offsets: bool = True'
+}, model_name='RobertaTokenizer', library='transformers', import_path='transformers.models.roberta'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=30522', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=2', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'pad_token_id': 'pad_token_id=0', 'dim_bbox': 'dim_bbox=8', 'bbox_scale': 'bbox_scale=100.0', 'n_relations': 'n_relations=1', 'classifier_dropout_prob': 'classifier_dropout_prob=0.1'
+}, model_name='BrosModel', library='transformers', import_path='transformers.models.bros'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'do_lower_case': 'do_lower_case: bool = False', 'unk_token': "unk_token: str = '[UNK]'", 'sep_token': "sep_token: str = '[SEP]'", 'pad_token': "pad_token: str = '[PAD]'", 'cls_token': "cls_token: str = '[CLS]'", 'mask_token': "mask_token: str = '[MASK]'", 'tokenize_chinese_chars': 'tokenize_chinese_chars: bool = True', 'strip_accents': 'strip_accents: Optional[bool
+ ] = None'
+}, model_name='BertTokenizer', library='transformers', import_path='transformers.models.bert'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=30522', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=2', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'pad_token_id': 'pad_token_id=1', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=2', 'classifier_dropout': 'classifier_dropout=None'
+}, model_name='CamembertModel', library='transformers', import_path='transformers.models.camembert'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'sep_token': "sep_token=''", 'cls_token': "cls_token=''", 'unk_token': "unk_token=''", 'pad_token': "pad_token=''", 'mask_token': "mask_token=''", 'additional_special_tokens': 'additional_special_tokens=None', 'add_prefix_space': 'add_prefix_space=True', 'vocab_file': 'vocab_file=None', 'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None'
+}, model_name='CamembertTokenizer', library='transformers', import_path='transformers.models.camembert'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=16384', 'type_vocab_size': 'type_vocab_size=16', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'pad_token_id': 'pad_token_id=0', 'bos_token_id': 'bos_token_id=57344', 'eos_token_id': 'eos_token_id=57345', 'downsampling_rate': 'downsampling_rate=4', 'upsampling_kernel_size': 'upsampling_kernel_size=4', 'num_hash_functions': 'num_hash_functions=8', 'num_hash_buckets': 'num_hash_buckets=16384', 'local_transformer_stride': 'local_transformer_stride=128'
+}, model_name='CanineModel', library='transformers', import_path='transformers.models.canine'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'bos_token': "bos_token='\\ue000'", 'eos_token': "eos_token='\\ue001'", 'sep_token': "sep_token='\\ue001'", 'cls_token': "cls_token='\\ue000'", 'pad_token': "pad_token='\\x00'", 'mask_token': "mask_token='\\ue003'", 'add_prefix_space': 'add_prefix_space=False', 'model_max_length': 'model_max_length=2048'
+}, model_name='CanineTokenizer', library='transformers', import_path='transformers.models.canine'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 65536', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 11008', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 32', 'hidden_act': "hidden_act: Optional[int] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 4096', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-05', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = None', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[int
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'model_parallel_size': 'model_parallel_size: Optional[int
+ ] = 1', 'swin_norm': 'swin_norm: Optional[bool
+ ] = False', 'vq_config': 'vq_config: Optional[dict
+ ] = None', 'vocabulary_map': 'vocabulary_map: Optional[dict
+ ] = None', 'mlp_bias': 'mlp_bias: Optional[bool
+ ] = False'
+}, model_name='ChameleonModel', library='transformers', import_path='transformers.models.chameleon'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'vision_config': 'vision_config=None', 'projection_dim': 'projection_dim=512', 'logit_scale_init_value': 'logit_scale_init_value=2.6592'
+}, model_name='ChineseCLIPModel', library='transformers', import_path='transformers.models.chinese_clip'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'do_lower_case': 'do_lower_case: bool = False', 'unk_token': "unk_token: str = '[UNK]'", 'sep_token': "sep_token: str = '[SEP]'", 'pad_token': "pad_token: str = '[PAD]'", 'cls_token': "cls_token: str = '[CLS]'", 'mask_token': "mask_token: str = '[MASK]'", 'tokenize_chinese_chars': 'tokenize_chinese_chars: bool = True', 'strip_accents': 'strip_accents: Optional[bool
+ ] = None'
+}, model_name='BertTokenizer', library='transformers', import_path='transformers.models.bert'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=768', 'intermediate_size': 'intermediate_size=3072', 'projection_dim': 'projection_dim=512', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'num_channels': 'num_channels=3', 'image_size': 'image_size=224', 'patch_size': 'patch_size=32', 'hidden_act': "hidden_act='quick_gelu'", 'layer_norm_eps': 'layer_norm_eps=1e-05', 'attention_dropout': 'attention_dropout=0.0', 'initializer_range': 'initializer_range=0.02', 'initializer_factor': 'initializer_factor=1.0'
+}, model_name='ChineseCLIPVisionModel', library='transformers', import_path='transformers.models.chinese_clip'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'audio_config': 'audio_config=None', 'logit_scale_init_value': 'logit_scale_init_value=14.285714285714285', 'projection_dim': 'projection_dim=512', 'projection_hidden_act': "projection_hidden_act='relu'", 'initializer_factor': 'initializer_factor=1.0'
+}, model_name='ClapModel', library='transformers', import_path='transformers.models.clap'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = ''", 'sep_token': "sep_token: str = ''", 'cls_token': "cls_token: str = ''", 'unk_token': "unk_token: str = ''", 'pad_token': "pad_token: str = ''", 'mask_token': "mask_token: str = ''", 'add_prefix_space': 'add_prefix_space: bool = False', 'trim_offsets': 'trim_offsets: bool = True'
+}, model_name='RobertaTokenizer', library='transformers', import_path='transformers.models.roberta'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'vision_config': 'vision_config=None', 'projection_dim': 'projection_dim=512', 'logit_scale_init_value': 'logit_scale_init_value=2.6592'
+}, model_name='CLIPModel', library='transformers', import_path='transformers.models.clip'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': "bos_token: str = '<|startoftext|>'", 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|endoftext|>'"
+}, model_name='CLIPTokenizer', library='transformers', import_path='transformers.models.clip'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=49408', 'hidden_size': 'hidden_size=512', 'intermediate_size': 'intermediate_size=2048', 'projection_dim': 'projection_dim=512', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=8', 'max_position_embeddings': 'max_position_embeddings=77', 'hidden_act': "hidden_act='quick_gelu'", 'layer_norm_eps': 'layer_norm_eps=1e-05', 'attention_dropout': 'attention_dropout=0.0', 'initializer_range': 'initializer_range=0.02', 'initializer_factor': 'initializer_factor=1.0', 'pad_token_id': 'pad_token_id=1', 'bos_token_id': 'bos_token_id=49406', 'eos_token_id': 'eos_token_id=49407'
+}, model_name='CLIPTextModel', library='transformers', import_path='transformers.models.clip'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=768', 'intermediate_size': 'intermediate_size=3072', 'projection_dim': 'projection_dim=512', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'num_channels': 'num_channels=3', 'image_size': 'image_size=224', 'patch_size': 'patch_size=32', 'hidden_act': "hidden_act='quick_gelu'", 'layer_norm_eps': 'layer_norm_eps=1e-05', 'attention_dropout': 'attention_dropout=0.0', 'initializer_range': 'initializer_range=0.02', 'initializer_factor': 'initializer_factor=1.0'
+}, model_name='CLIPVisionModel', library='transformers', import_path='transformers.models.clip'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'vision_config': 'vision_config=None', 'projection_dim': 'projection_dim=512', 'logit_scale_init_value': 'logit_scale_init_value=2.6592', 'extract_layers': 'extract_layers=[
+ 3,
+ 6,
+ 9
+ ]', 'reduce_dim': 'reduce_dim=64', 'decoder_num_attention_heads': 'decoder_num_attention_heads=4', 'decoder_attention_dropout': 'decoder_attention_dropout=0.0', 'decoder_hidden_act': "decoder_hidden_act='quick_gelu'", 'decoder_intermediate_size': 'decoder_intermediate_size=2048', 'conditional_layer': 'conditional_layer=0', 'use_complex_transposed_convolution': 'use_complex_transposed_convolution=False'
+}, model_name='CLIPSegModel', library='transformers', import_path='transformers.models.clipseg'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': "bos_token: str = '<|startoftext|>'", 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|endoftext|>'"
+}, model_name='CLIPTokenizer', library='transformers', import_path='transformers.models.clip'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'speech_config': 'speech_config=None', 'decoder_config': 'decoder_config=None', 'projection_dim': 'projection_dim=768', 'logit_scale_init_value': 'logit_scale_init_value=2.6592', 'initializer_factor': 'initializer_factor=1.0'
+}, model_name='ClvpModelForConditionalGeneration', library='transformers', import_path='transformers.models.clvp'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab_file': 'vocab_file', 'merges_file': 'merges_file', 'errors': "errors='replace'", 'unk_token': "unk_token='[UNK]'", 'bos_token': "bos_token='<|endoftext|>'", 'eos_token': "eos_token='[STOP]'", 'pad_token': "pad_token='[STOP]'", 'add_prefix_space': 'add_prefix_space=False'
+}, model_name='ClvpTokenizer', library='transformers', import_path='transformers.models.clvp'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 32000', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 11008', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 2048', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-06', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = None', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2', 'pretraining_tp': 'pretraining_tp: Optional[int
+ ] = 1', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'mlp_bias': 'mlp_bias: Optional[bool
+ ] = False', 'head_dim': 'head_dim: Optional[int
+ ] = None'
+}, model_name='LlamaModel', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=50400', 'n_positions': 'n_positions=2048', 'n_ctx': 'n_ctx=2048', 'n_embd': 'n_embd=4096', 'n_layer': 'n_layer=28', 'n_head': 'n_head=16', 'rotary_dim': 'rotary_dim=64', 'n_inner': 'n_inner=None', 'activation_function': "activation_function='gelu_new'", 'resid_pdrop': 'resid_pdrop=0.0', 'embd_pdrop': 'embd_pdrop=0.0', 'attn_pdrop': 'attn_pdrop=0.0', 'layer_norm_epsilon': 'layer_norm_epsilon=1e-05', 'initializer_range': 'initializer_range=0.02', 'bos_token_id': 'bos_token_id=50256', 'eos_token_id': 'eos_token_id=50256', 'tie_word_embeddings': 'tie_word_embeddings=False'
+}, model_name='CodeGenModel', library='transformers', import_path='transformers.models.codegen'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'bos_token': "bos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'eos_token': "eos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'pad_token': 'pad_token: Union[tokenizers.AddedToken, str, NoneType
+ ] = None', 'add_prefix_space': 'add_prefix_space=False'
+}, model_name='GPT2Tokenizer', library='transformers', import_path='transformers.models.gpt2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 256000', 'hidden_size': 'hidden_size: Optional[int
+ ] = 8192', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 22528', 'logit_scale': 'logit_scale: Optional[float
+ ] = 0.0625', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 40', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 64', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 8192', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'layer_norm_eps': 'layer_norm_eps: Optional[int
+ ] = 1e-05', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 0', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 5', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 255001', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = True', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'use_qk_norm': 'use_qk_norm: Optional[bool
+ ] = False'
+}, model_name='CohereModel', library='transformers', import_path='transformers.models.cohere'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: str = ''", 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = '<|END_OF_TURN_TOKEN|>'", 'pad_token': "pad_token: str = ''", 'cls_token': "cls_token: str = ''", 'sep_token': "sep_token: str = ''", 'mask_token': "mask_token: str = ''", 'use_default_system_prompt': 'use_default_system_prompt: bool = False', 'add_prefix_space': 'add_prefix_space: bool = False'
+}, model_name='CohereTokenizer', library='transformers', import_path='transformers.models.cohere'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 256000', 'hidden_size': 'hidden_size: Optional[int
+ ] = 8192', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 22528', 'logit_scale': 'logit_scale: Optional[float
+ ] = 0.0625', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 40', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 64', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 8192', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'layer_norm_eps': 'layer_norm_eps: Optional[int
+ ] = 1e-05', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 0', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 5', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 255001', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = True', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'sliding_window': 'sliding_window: Optional[int
+ ] = 4096', 'layer_types': 'layer_types: Optional[list[str
+ ]
+ ] = None'
+}, model_name='Cohere2Model', library='transformers', import_path='transformers.models.cohere2'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: str = ''", 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = '<|END_OF_TURN_TOKEN|>'", 'pad_token': "pad_token: str = ''", 'cls_token': "cls_token: str = ''", 'sep_token': "sep_token: str = ''", 'mask_token': "mask_token: str = ''", 'use_default_system_prompt': 'use_default_system_prompt: bool = False', 'add_prefix_space': 'add_prefix_space: bool = False'
+}, model_name='CohereTokenizer', library='transformers', import_path='transformers.models.cohere'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'text_config': 'text_config=None', 'downsample_factor': 'downsample_factor=2', 'image_token_id': 'image_token_id=255036', 'alignment_intermediate_size': 'alignment_intermediate_size=36864'
+}, model_name='Cohere2VisionModel', library='transformers', import_path='transformers.models.cohere2_vision'), ModelAttributes(model=, model_type='model', model_parameters={'use_timm_backbone': 'use_timm_backbone=True', 'backbone_config': 'backbone_config=None', 'num_channels': 'num_channels=3', 'num_queries': 'num_queries=300', 'encoder_layers': 'encoder_layers=6', 'encoder_ffn_dim': 'encoder_ffn_dim=2048', 'encoder_attention_heads': 'encoder_attention_heads=8', 'decoder_layers': 'decoder_layers=6', 'decoder_ffn_dim': 'decoder_ffn_dim=2048', 'decoder_attention_heads': 'decoder_attention_heads=8', 'encoder_layerdrop': 'encoder_layerdrop=0.0', 'decoder_layerdrop': 'decoder_layerdrop=0.0', 'is_encoder_decoder': 'is_encoder_decoder=True', 'activation_function': "activation_function='relu'", 'd_model': 'd_model=256', 'dropout': 'dropout=0.1', 'attention_dropout': 'attention_dropout=0.0', 'activation_dropout': 'activation_dropout=0.0', 'init_std': 'init_std=0.02', 'init_xavier_std': 'init_xavier_std=1.0', 'auxiliary_loss': 'auxiliary_loss=False', 'position_embedding_type': "position_embedding_type='sine'", 'backbone': "backbone='resnet50'", 'use_pretrained_backbone': 'use_pretrained_backbone=True', 'backbone_kwargs': 'backbone_kwargs=None', 'dilation': 'dilation=False', 'class_cost': 'class_cost=2', 'bbox_cost': 'bbox_cost=5', 'giou_cost': 'giou_cost=2', 'mask_loss_coefficient': 'mask_loss_coefficient=1', 'dice_loss_coefficient': 'dice_loss_coefficient=1', 'cls_loss_coefficient': 'cls_loss_coefficient=2', 'bbox_loss_coefficient': 'bbox_loss_coefficient=5', 'giou_loss_coefficient': 'giou_loss_coefficient=2', 'focal_alpha': 'focal_alpha=0.25'
+}, model_name='ConditionalDetrModel', library='transformers', import_path='transformers.models.conditional_detr'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=30522', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=2', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'pad_token_id': 'pad_token_id=1', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=2', 'embedding_size': 'embedding_size=768', 'head_ratio': 'head_ratio=2', 'conv_kernel_size': 'conv_kernel_size=9', 'num_groups': 'num_groups=1', 'classifier_dropout': 'classifier_dropout=None'
+}, model_name='ConvBertModel', library='transformers', import_path='transformers.models.convbert'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'do_lower_case': 'do_lower_case: bool = False', 'unk_token': "unk_token: str = '[UNK]'", 'sep_token': "sep_token: str = '[SEP]'", 'pad_token': "pad_token: str = '[PAD]'", 'cls_token': "cls_token: str = '[CLS]'", 'mask_token': "mask_token: str = '[MASK]'", 'tokenize_chinese_chars': 'tokenize_chinese_chars: bool = True', 'strip_accents': 'strip_accents: Optional[bool
+ ] = None'
+}, model_name='BertTokenizer', library='transformers', import_path='transformers.models.bert'), ModelAttributes(model=, model_type='model', model_parameters={'num_channels': 'num_channels=3', 'patch_size': 'patch_size=4', 'num_stages': 'num_stages=4', 'hidden_sizes': 'hidden_sizes=None', 'depths': 'depths=None', 'hidden_act': "hidden_act='gelu'", 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'layer_scale_init_value': 'layer_scale_init_value=1e-06', 'drop_path_rate': 'drop_path_rate=0.0', 'image_size': 'image_size=224', 'out_features': 'out_features=None', 'out_indices': 'out_indices=None'
+}, model_name='ConvNextModel', library='transformers', import_path='transformers.models.convnext'), ModelAttributes(model=, model_type='model', model_parameters={'num_channels': 'num_channels=3', 'patch_size': 'patch_size=4', 'num_stages': 'num_stages=4', 'hidden_sizes': 'hidden_sizes=None', 'depths': 'depths=None', 'hidden_act': "hidden_act='gelu'", 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'drop_path_rate': 'drop_path_rate=0.0', 'image_size': 'image_size=224', 'out_features': 'out_features=None', 'out_indices': 'out_indices=None'
+}, model_name='ConvNextV2Model', library='transformers', import_path='transformers.models.convnextv2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: int = 30720', 'hidden_size': 'hidden_size: int = 4096', 'num_attention_heads': 'num_attention_heads: int = 32', 'dim_head': 'dim_head: int = 128', 'dim_ff': 'dim_ff: int = 10240', 'num_hidden_layers': 'num_hidden_layers: int = 48', 'dropout_p': 'dropout_p: int = 0.0', 'position_bias_num_buckets': 'position_bias_num_buckets: int = 512', 'position_bias_max_distance': 'position_bias_max_distance: int = 2048', 'eps': 'eps: int = 1e-06', 'init_std': 'init_std: float = 1.0', 'prompt_types': 'prompt_types: int = 32', 'prompt_length': 'prompt_length: int = 32', 'segment_types': 'segment_types: int = 32'
+}, model_name='CpmAntModel', library='transformers', import_path='transformers.models.cpmant'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab_file': 'vocab_file', 'bod_token': "bod_token=''", 'eod_token': "eod_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'pad_token': "pad_token=''", 'unk_token': "unk_token=''", 'line_token': "line_token=''", 'space_token': "space_token=''", 'padding_side': "padding_side='left'"
+}, model_name='CpmAntTokenizer', library='transformers', import_path='transformers.models.cpmant'), ModelAttributes(model=, model_type='model', model_parameters={'num_codebooks': 'num_codebooks: Optional[int
+ ] = 32', 'vocab_size': 'vocab_size: Optional[int
+ ] = 2051', 'text_vocab_size': 'text_vocab_size: Optional[int
+ ] = 128256', 'hidden_size': 'hidden_size: Optional[int
+ ] = 2048', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 8192', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 16', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 8', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 2048', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-05', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 128002', 'codebook_pad_token_id': 'codebook_pad_token_id: Optional[int
+ ] = 2050', 'codebook_eos_token_id': 'codebook_eos_token_id: Optional[int
+ ] = 0', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 128000', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = None', 'audio_token_id': 'audio_token_id: Optional[int
+ ] = 128002', 'audio_eos_token_id': 'audio_eos_token_id: Optional[int
+ ] = 128003', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'mlp_bias': 'mlp_bias: Optional[bool
+ ] = False', 'head_dim': 'head_dim: Optional[int
+ ] = None', 'tie_codebooks_embeddings': 'tie_codebooks_embeddings: Optional[bool
+ ] = True', 'depth_decoder_config': 'depth_decoder_config: Optional[dict
+ ] = None', 'codec_config': 'codec_config: Optional[dict
+ ] = None'
+}, model_name='CsmForConditionalGeneration', library='transformers', import_path='transformers.models.csm'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'args': '*args'
+}, model_name='TokenizersBackend', library='transformers', import_path='transformers'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=246534', 'n_positions': 'n_positions=256', 'n_embd': 'n_embd=1280', 'dff': 'dff=8192', 'n_layer': 'n_layer=48', 'n_head': 'n_head=16', 'resid_pdrop': 'resid_pdrop=0.1', 'embd_pdrop': 'embd_pdrop=0.1', 'layer_norm_epsilon': 'layer_norm_epsilon=1e-06', 'initializer_range': 'initializer_range=0.02'
+}, model_name='CTRLModel', library='transformers', import_path='transformers.models.ctrl'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab_file': 'vocab_file', 'merges_file': 'merges_file', 'unk_token': "unk_token=''"
+}, model_name='CTRLTokenizer', library='transformers', import_path='transformers.models.ctrl'), ModelAttributes(model=, model_type='model', model_parameters={'num_channels': 'num_channels=3', 'patch_sizes': 'patch_sizes=[
+ 7,
+ 3,
+ 3
+ ]', 'patch_stride': 'patch_stride=[
+ 4,
+ 2,
+ 2
+ ]', 'patch_padding': 'patch_padding=[
+ 2,
+ 1,
+ 1
+ ]', 'embed_dim': 'embed_dim=[
+ 64,
+ 192,
+ 384
+ ]', 'num_heads': 'num_heads=[
+ 1,
+ 3,
+ 6
+ ]', 'depth': 'depth=[
+ 1,
+ 2,
+ 10
+ ]', 'mlp_ratio': 'mlp_ratio=[
+ 4.0,
+ 4.0,
+ 4.0
+ ]', 'attention_drop_rate': 'attention_drop_rate=[
+ 0.0,
+ 0.0,
+ 0.0
+ ]', 'drop_rate': 'drop_rate=[
+ 0.0,
+ 0.0,
+ 0.0
+ ]', 'drop_path_rate': 'drop_path_rate=[
+ 0.0,
+ 0.0,
+ 0.1
+ ]', 'qkv_bias': 'qkv_bias=[True, True, True
+ ]', 'cls_token': 'cls_token=[False, False, True
+ ]', 'qkv_projection_method': "qkv_projection_method=['dw_bn', 'dw_bn', 'dw_bn']", 'kernel_qkv': 'kernel_qkv=[
+ 3,
+ 3,
+ 3
+ ]', 'padding_kv': 'padding_kv=[
+ 1,
+ 1,
+ 1
+ ]', 'stride_kv': 'stride_kv=[
+ 2,
+ 2,
+ 2
+ ]', 'padding_q': 'padding_q=[
+ 1,
+ 1,
+ 1
+ ]', 'stride_q': 'stride_q=[
+ 1,
+ 1,
+ 1
+ ]', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12'
+}, model_name='CvtModel', library='transformers', import_path='transformers.models.cvt'), ModelAttributes(model=, model_type='model', model_parameters={'n_head': [''
+ ]
+}, model_name='CwmModel', library='transformers', import_path='transformers.models.cwm'), ModelAttributes(model=, model_type='model', model_parameters={'initializer_range': 'initializer_range=0.01', 'initializer_bias_prior_prob': 'initializer_bias_prior_prob=None', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'batch_norm_eps': 'batch_norm_eps=1e-05', 'backbone_config': 'backbone_config=None', 'backbone': 'backbone=None', 'use_pretrained_backbone': 'use_pretrained_backbone=False', 'use_timm_backbone': 'use_timm_backbone=False', 'freeze_backbone_batch_norms': 'freeze_backbone_batch_norms=True', 'backbone_kwargs': 'backbone_kwargs=None', 'encoder_hidden_dim': 'encoder_hidden_dim=256', 'encoder_in_channels': 'encoder_in_channels=[
+ 512,
+ 1024,
+ 2048
+ ]', 'feat_strides': 'feat_strides=[
+ 8,
+ 16,
+ 32
+ ]', 'encoder_layers': 'encoder_layers=1', 'encoder_ffn_dim': 'encoder_ffn_dim=1024', 'encoder_attention_heads': 'encoder_attention_heads=8', 'dropout': 'dropout=0.0', 'activation_dropout': 'activation_dropout=0.0', 'encode_proj_layers': 'encode_proj_layers=[
+ 2
+ ]', 'positional_encoding_temperature': 'positional_encoding_temperature=10000', 'encoder_activation_function': "encoder_activation_function='gelu'", 'activation_function': "activation_function='silu'", 'eval_size': 'eval_size=None', 'normalize_before': 'normalize_before=False', 'hidden_expansion': 'hidden_expansion=1.0', 'd_model': 'd_model=256', 'num_queries': 'num_queries=300', 'decoder_in_channels': 'decoder_in_channels=[
+ 256,
+ 256,
+ 256
+ ]', 'decoder_ffn_dim': 'decoder_ffn_dim=1024', 'num_feature_levels': 'num_feature_levels=3', 'decoder_n_points': 'decoder_n_points=4', 'decoder_layers': 'decoder_layers=6', 'decoder_attention_heads': 'decoder_attention_heads=8', 'decoder_activation_function': "decoder_activation_function='relu'", 'attention_dropout': 'attention_dropout=0.0', 'num_denoising': 'num_denoising=100', 'label_noise_ratio': 'label_noise_ratio=0.5', 'box_noise_scale': 'box_noise_scale=1.0', 'learn_initial_query': 'learn_initial_query=False', 'anchor_image_size': 'anchor_image_size=None', 'with_box_refine': 'with_box_refine=True', 'is_encoder_decoder': 'is_encoder_decoder=True', 'matcher_alpha': 'matcher_alpha=0.25', 'matcher_gamma': 'matcher_gamma=2.0', 'matcher_class_cost': 'matcher_class_cost=2.0', 'matcher_bbox_cost': 'matcher_bbox_cost=5.0', 'matcher_giou_cost': 'matcher_giou_cost=2.0', 'use_focal_loss': 'use_focal_loss=True', 'auxiliary_loss': 'auxiliary_loss=True', 'focal_loss_alpha': 'focal_loss_alpha=0.75', 'focal_loss_gamma': 'focal_loss_gamma=2.0', 'weight_loss_vfl': 'weight_loss_vfl=1.0', 'weight_loss_bbox': 'weight_loss_bbox=5.0', 'weight_loss_giou': 'weight_loss_giou=2.0', 'weight_loss_fgl': 'weight_loss_fgl=0.15', 'weight_loss_ddf': 'weight_loss_ddf=1.5', 'eos_coefficient': 'eos_coefficient=0.0001', 'eval_idx': 'eval_idx=-1', 'layer_scale': 'layer_scale=1', 'max_num_bins': 'max_num_bins=32', 'reg_scale': 'reg_scale=4.0', 'depth_mult': 'depth_mult=1.0', 'top_prob_values': 'top_prob_values=4', 'lqe_hidden_dim': 'lqe_hidden_dim=64', 'lqe_layers': 'lqe_layers=2', 'decoder_offset_scale': 'decoder_offset_scale=0.5', 'decoder_method': "decoder_method='default'", 'up': 'up=0.5'
+}, model_name='DFineModel', library='transformers', import_path='transformers.models.d_fine'), ModelAttributes(model=, model_type='model', model_parameters={'use_timm_backbone': 'use_timm_backbone=True', 'backbone_config': 'backbone_config=None', 'backbone': "backbone='resnet50'", 'use_pretrained_backbone': 'use_pretrained_backbone=True', 'backbone_kwargs': 'backbone_kwargs=None', 'num_queries': 'num_queries=300', 'encoder_layers': 'encoder_layers=6', 'encoder_ffn_dim': 'encoder_ffn_dim=2048', 'encoder_attention_heads': 'encoder_attention_heads=8', 'decoder_layers': 'decoder_layers=6', 'decoder_ffn_dim': 'decoder_ffn_dim=2048', 'decoder_attention_heads': 'decoder_attention_heads=8', 'is_encoder_decoder': 'is_encoder_decoder=True', 'activation_function': "activation_function='prelu'", 'hidden_size': 'hidden_size=256', 'dropout': 'dropout=0.1', 'attention_dropout': 'attention_dropout=0.0', 'activation_dropout': 'activation_dropout=0.0', 'init_std': 'init_std=0.02', 'init_xavier_std': 'init_xavier_std=1.0', 'auxiliary_loss': 'auxiliary_loss=False', 'dilation': 'dilation=False', 'class_cost': 'class_cost=2', 'bbox_cost': 'bbox_cost=5', 'giou_cost': 'giou_cost=2', 'cls_loss_coefficient': 'cls_loss_coefficient=2', 'bbox_loss_coefficient': 'bbox_loss_coefficient=5', 'giou_loss_coefficient': 'giou_loss_coefficient=2', 'focal_alpha': 'focal_alpha=0.25', 'temperature_height': 'temperature_height=20', 'temperature_width': 'temperature_width=20', 'query_dim': 'query_dim=4', 'random_refpoints_xy': 'random_refpoints_xy=False', 'keep_query_pos': 'keep_query_pos=False', 'num_patterns': 'num_patterns=0', 'normalize_before': 'normalize_before=False', 'sine_position_embedding_scale': 'sine_position_embedding_scale=None', 'initializer_bias_prior_prob': 'initializer_bias_prior_prob=None'
+}, model_name='DabDetrModel', library='transformers', import_path='transformers.models.dab_detr'), ModelAttributes(model=, model_type='model', model_parameters={'encoder_hidden_size': 'encoder_hidden_size=64', 'downsampling_ratios': 'downsampling_ratios=[
+ 2,
+ 4,
+ 8,
+ 8
+ ]', 'decoder_hidden_size': 'decoder_hidden_size=1536', 'n_codebooks': 'n_codebooks=9', 'codebook_size': 'codebook_size=1024', 'codebook_dim': 'codebook_dim=8', 'quantizer_dropout': 'quantizer_dropout=0', 'commitment_loss_weight': 'commitment_loss_weight=0.25', 'codebook_loss_weight': 'codebook_loss_weight=1.0', 'sampling_rate': 'sampling_rate=16000'
+}, model_name='DacModel', library='transformers', import_path='transformers.models.dac'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=32', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout': 'hidden_dropout=0.1', 'activation_dropout': 'activation_dropout=0.1', 'attention_dropout': 'attention_dropout=0.1', 'feat_proj_dropout': 'feat_proj_dropout=0.0', 'final_dropout': 'final_dropout=0.1', 'layerdrop': 'layerdrop=0.1', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'feat_extract_activation': "feat_extract_activation='gelu'", 'conv_dim': 'conv_dim=(512,
+ 512,
+ 512,
+ 512,
+ 512,
+ 512,
+ 512)', 'conv_stride': 'conv_stride=(5,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2)', 'conv_kernel': 'conv_kernel=(10,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2)', 'conv_bias': 'conv_bias=False', 'num_conv_pos_embedding_groups': 'num_conv_pos_embedding_groups=16', 'conv_pos_kernel_size': 'conv_pos_kernel_size=19', 'num_conv_pos_embeddings': 'num_conv_pos_embeddings=5', 'mask_time_prob': 'mask_time_prob=0.05', 'mask_time_length': 'mask_time_length=10', 'mask_time_min_masks': 'mask_time_min_masks=2', 'mask_feature_prob': 'mask_feature_prob=0.0', 'mask_feature_length': 'mask_feature_length=10', 'mask_feature_min_masks': 'mask_feature_min_masks=0', 'ctc_loss_reduction': "ctc_loss_reduction='sum'", 'ctc_zero_infinity': 'ctc_zero_infinity=False', 'use_weighted_layer_sum': 'use_weighted_layer_sum=False', 'classifier_proj_size': 'classifier_proj_size=256', 'tdnn_dim': 'tdnn_dim=(512,
+ 512,
+ 512,
+ 512,
+ 1500)', 'tdnn_kernel': 'tdnn_kernel=(5,
+ 3,
+ 3,
+ 1,
+ 1)', 'tdnn_dilation': 'tdnn_dilation=(1,
+ 2,
+ 3,
+ 1,
+ 1)', 'xvector_output_dim': 'xvector_output_dim=512', 'pad_token_id': 'pad_token_id=0', 'bos_token_id': 'bos_token_id=1', 'eos_token_id': 'eos_token_id=2', 'add_adapter': 'add_adapter=False', 'adapter_kernel_size': 'adapter_kernel_size=3', 'adapter_stride': 'adapter_stride=2', 'num_adapter_layers': 'num_adapter_layers=3', 'output_hidden_size': 'output_hidden_size=None'
+}, model_name='Data2VecAudioModel', library='transformers', import_path='transformers.models.data2vec'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab_file': 'vocab_file', 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'unk_token': "unk_token=''", 'pad_token': "pad_token=''", 'word_delimiter_token': "word_delimiter_token='|'", 'replace_word_delimiter_char': "replace_word_delimiter_char=' '", 'do_lower_case': 'do_lower_case=False', 'target_lang': 'target_lang=None'
+}, model_name='Wav2Vec2CTCTokenizer', library='transformers', import_path='transformers.models.wav2vec2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=30522', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=2', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'pad_token_id': 'pad_token_id=1', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=2', 'classifier_dropout': 'classifier_dropout=None'
+}, model_name='Data2VecTextModel', library='transformers', import_path='transformers.models.data2vec'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = ''", 'sep_token': "sep_token: str = ''", 'cls_token': "cls_token: str = ''", 'unk_token': "unk_token: str = ''", 'pad_token': "pad_token: str = ''", 'mask_token': "mask_token: str = ''", 'add_prefix_space': 'add_prefix_space: bool = False', 'trim_offsets': 'trim_offsets: bool = True'
+}, model_name='RobertaTokenizer', library='transformers', import_path='transformers.models.roberta'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.0', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.0', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'image_size': 'image_size=224', 'patch_size': 'patch_size=16', 'num_channels': 'num_channels=3', 'use_mask_token': 'use_mask_token=False', 'use_absolute_position_embeddings': 'use_absolute_position_embeddings=False', 'use_relative_position_bias': 'use_relative_position_bias=False', 'use_shared_relative_position_bias': 'use_shared_relative_position_bias=False', 'layer_scale_init_value': 'layer_scale_init_value=0.1', 'drop_path_rate': 'drop_path_rate=0.1', 'use_mean_pooling': 'use_mean_pooling=True', 'out_indices': 'out_indices=[
+ 3,
+ 5,
+ 7,
+ 11
+ ]', 'pool_scales': 'pool_scales=[
+ 1,
+ 2,
+ 3,
+ 6
+ ]', 'use_auxiliary_head': 'use_auxiliary_head=True', 'auxiliary_loss_weight': 'auxiliary_loss_weight=0.4', 'auxiliary_channels': 'auxiliary_channels=256', 'auxiliary_num_convs': 'auxiliary_num_convs=1', 'auxiliary_concat_input': 'auxiliary_concat_input=False', 'semantic_loss_ignore_index': 'semantic_loss_ignore_index=255'
+}, model_name='Data2VecVisionModel', library='transformers', import_path='transformers.models.data2vec'), ModelAttributes(model=, model_type='model', model_parameters={'d_model': 'd_model: Optional[int
+ ] = 2048', 'n_heads': 'n_heads: Optional[int
+ ] = 16', 'n_layers': 'n_layers: Optional[int
+ ] = 24', 'max_seq_len': 'max_seq_len: Optional[int
+ ] = 2048', 'vocab_size': 'vocab_size: Optional[int
+ ] = 32000', 'resid_pdrop': 'resid_pdrop: Optional[float
+ ] = 0.0', 'emb_pdrop': 'emb_pdrop: Optional[float
+ ] = 0.0', 'attn_config': 'attn_config: Optional[transformers.models.dbrx.configuration_dbrx.DbrxAttentionConfig
+ ] = None', 'ffn_config': 'ffn_config: Optional[transformers.models.dbrx.configuration_dbrx.DbrxFFNConfig
+ ] = None', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'output_router_logits': 'output_router_logits: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None'
+}, model_name='DbrxModel', library='transformers', import_path='transformers.models.dbrx'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'bos_token': "bos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'eos_token': "eos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'pad_token': 'pad_token: Union[tokenizers.AddedToken, str, NoneType
+ ] = None', 'add_prefix_space': 'add_prefix_space=False'
+}, model_name='GPT2Tokenizer', library='transformers', import_path='transformers.models.gpt2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=50265', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=0', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-07', 'relative_attention': 'relative_attention=False', 'max_relative_positions': 'max_relative_positions=-1', 'pad_token_id': 'pad_token_id=0', 'position_biased_input': 'position_biased_input=True', 'pos_att_type': 'pos_att_type=None', 'pooler_dropout': 'pooler_dropout=0', 'pooler_hidden_act': "pooler_hidden_act='gelu'", 'legacy': 'legacy=True'
+}, model_name='DebertaModel', library='transformers', import_path='transformers.models.deberta'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors='replace'", 'bos_token': "bos_token='[CLS]'", 'eos_token': "eos_token='[SEP]'", 'sep_token': "sep_token='[SEP]'", 'cls_token': "cls_token='[CLS]'", 'unk_token': "unk_token='[UNK]'", 'pad_token': "pad_token='[PAD]'", 'mask_token': "mask_token='[MASK]'", 'add_prefix_space': 'add_prefix_space=False'
+}, model_name='DebertaTokenizer', library='transformers', import_path='transformers.models.deberta'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=128100', 'hidden_size': 'hidden_size=1536', 'num_hidden_layers': 'num_hidden_layers=24', 'num_attention_heads': 'num_attention_heads=24', 'intermediate_size': 'intermediate_size=6144', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=0', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-07', 'relative_attention': 'relative_attention=False', 'max_relative_positions': 'max_relative_positions=-1', 'pad_token_id': 'pad_token_id=0', 'position_biased_input': 'position_biased_input=True', 'pos_att_type': 'pos_att_type=None', 'pooler_dropout': 'pooler_dropout=0', 'pooler_hidden_act': "pooler_hidden_act='gelu'", 'legacy': 'legacy=True'
+}, model_name='DebertaV2Model', library='transformers', import_path='transformers.models.deberta_v2'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'do_lower_case': 'do_lower_case=False', 'split_by_punct': 'split_by_punct=False', 'bos_token': "bos_token='[CLS]'", 'eos_token': "eos_token='[SEP]'", 'unk_token': "unk_token='[UNK]'", 'sep_token': "sep_token='[SEP]'", 'pad_token': "pad_token='[PAD]'", 'cls_token': "cls_token='[CLS]'", 'mask_token': "mask_token='[MASK]'", 'add_prefix_space': 'add_prefix_space=True', 'unk_id': 'unk_id=1'
+}, model_name='DebertaV2Tokenizer', library='transformers', import_path='transformers.models.deberta_v2'), ModelAttributes(model=, model_type='model', model_parameters={'state_dim': 'state_dim=17', 'act_dim': 'act_dim=4', 'hidden_size': 'hidden_size=128', 'max_ep_len': 'max_ep_len=4096', 'action_tanh': 'action_tanh=True', 'vocab_size': 'vocab_size=1', 'n_positions': 'n_positions=1024', 'n_layer': 'n_layer=3', 'n_head': 'n_head=1', 'n_inner': 'n_inner=None', 'activation_function': "activation_function='relu'", 'resid_pdrop': 'resid_pdrop=0.1', 'embd_pdrop': 'embd_pdrop=0.1', 'attn_pdrop': 'attn_pdrop=0.1', 'layer_norm_epsilon': 'layer_norm_epsilon=1e-05', 'initializer_range': 'initializer_range=0.02', 'scale_attn_weights': 'scale_attn_weights=True', 'bos_token_id': 'bos_token_id=50256', 'eos_token_id': 'eos_token_id=50256', 'scale_attn_by_inverse_layer_idx': 'scale_attn_by_inverse_layer_idx=False', 'reorder_and_upcast_attn': 'reorder_and_upcast_attn=False'
+}, model_name='DecisionTransformerModel', library='transformers', import_path='transformers.models.decision_transformer'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 32000', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 11008', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 2048', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-06', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = None', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'mlp_bias': 'mlp_bias: Optional[bool
+ ] = False', 'first_k_dense_replace': 'first_k_dense_replace: Optional[int
+ ] = 0', 'kv_lora_rank': 'kv_lora_rank: Optional[int
+ ] = 512', 'q_lora_rank': 'q_lora_rank: Optional[int
+ ] = 1536', 'n_group': 'n_group: Optional[int
+ ] = None', 'n_routed_experts': 'n_routed_experts: Optional[int
+ ] = 64', 'n_shared_experts': 'n_shared_experts: Optional[int
+ ] = 2', 'qk_nope_head_dim': 'qk_nope_head_dim: Optional[int
+ ] = 128', 'qk_rope_head_dim': 'qk_rope_head_dim: Optional[int
+ ] = 64', 'routed_scaling_factor': 'routed_scaling_factor: Optional[float
+ ] = 1.0', 'topk_group': 'topk_group: Optional[int
+ ] = None', 'topk_method': "topk_method: Optional[str] = 'greedy'", 'norm_topk_prob': 'norm_topk_prob: Optional[bool
+ ] = False', 'v_head_dim': 'v_head_dim: Optional[int
+ ] = 128', 'num_experts_per_tok': 'num_experts_per_tok: Optional[int
+ ] = None', 'moe_intermediate_size': 'moe_intermediate_size: Optional[int
+ ] = 1407'
+}, model_name='DeepseekV2Model', library='transformers', import_path='transformers.models.deepseek_v2'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 129280', 'hidden_size': 'hidden_size: Optional[int
+ ] = 7168', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 18432', 'moe_intermediate_size': 'moe_intermediate_size: Optional[int
+ ] = 2048', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 61', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 128', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 128', 'n_shared_experts': 'n_shared_experts: Optional[int
+ ] = 1', 'n_routed_experts': 'n_routed_experts: Optional[int
+ ] = 256', 'routed_scaling_factor': 'routed_scaling_factor: Optional[float
+ ] = 2.5', 'kv_lora_rank': 'kv_lora_rank: Optional[int
+ ] = 512', 'q_lora_rank': 'q_lora_rank: Optional[int
+ ] = 1536', 'qk_rope_head_dim': 'qk_rope_head_dim: Optional[int
+ ] = 64', 'v_head_dim': 'v_head_dim: Optional[int
+ ] = 128', 'qk_nope_head_dim': 'qk_nope_head_dim: Optional[int
+ ] = 128', 'n_group': 'n_group: Optional[int
+ ] = 8', 'topk_group': 'topk_group: Optional[int
+ ] = 4', 'num_experts_per_tok': 'num_experts_per_tok: Optional[int
+ ] = 8', 'first_k_dense_replace': 'first_k_dense_replace: Optional[int
+ ] = 3', 'norm_topk_prob': 'norm_topk_prob: Optional[bool
+ ] = True', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 4096', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-06', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = None', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 0', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 1', 'pretraining_tp': 'pretraining_tp: Optional[int
+ ] = 1', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'rope_interleave': 'rope_interleave: Optional[bool
+ ] = True', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0'
+}, model_name='DeepseekV3Model', library='transformers', import_path='transformers.models.deepseek_v3'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config: Optional[transformers.models.auto.configuration_auto.AutoConfig
+ ] = None', 'vision_config': 'vision_config: Optional[transformers.models.auto.configuration_auto.AutoConfig
+ ] = None', 'image_token_id': 'image_token_id: int = 100015'
+}, model_name='DeepseekVLModel', library='transformers', import_path='transformers.models.deepseek_vl'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config: Optional[transformers.models.auto.configuration_auto.AutoConfig
+ ] = None', 'vision_config': 'vision_config: Optional[transformers.models.auto.configuration_auto.AutoConfig
+ ] = None', 'high_res_vision_config': 'high_res_vision_config: Optional[transformers.models.auto.configuration_auto.AutoConfig
+ ] = None', 'image_token_id': 'image_token_id: int = 100015'
+}, model_name='DeepseekVLHybridModel', library='transformers', import_path='transformers.models.deepseek_vl_hybrid'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'use_timm_backbone': 'use_timm_backbone=True', 'backbone_config': 'backbone_config=None', 'num_channels': 'num_channels=3', 'num_queries': 'num_queries=300', 'max_position_embeddings': 'max_position_embeddings=1024', 'encoder_layers': 'encoder_layers=6', 'encoder_ffn_dim': 'encoder_ffn_dim=1024', 'encoder_attention_heads': 'encoder_attention_heads=8', 'decoder_layers': 'decoder_layers=6', 'decoder_ffn_dim': 'decoder_ffn_dim=1024', 'decoder_attention_heads': 'decoder_attention_heads=8', 'encoder_layerdrop': 'encoder_layerdrop=0.0', 'is_encoder_decoder': 'is_encoder_decoder=True', 'activation_function': "activation_function='relu'", 'd_model': 'd_model=256', 'dropout': 'dropout=0.1', 'attention_dropout': 'attention_dropout=0.0', 'activation_dropout': 'activation_dropout=0.0', 'init_std': 'init_std=0.02', 'init_xavier_std': 'init_xavier_std=1.0', 'return_intermediate': 'return_intermediate=True', 'auxiliary_loss': 'auxiliary_loss=False', 'position_embedding_type': "position_embedding_type='sine'", 'backbone': "backbone='resnet50'", 'use_pretrained_backbone': 'use_pretrained_backbone=True', 'backbone_kwargs': 'backbone_kwargs=None', 'dilation': 'dilation=False', 'num_feature_levels': 'num_feature_levels=4', 'encoder_n_points': 'encoder_n_points=4', 'decoder_n_points': 'decoder_n_points=4', 'two_stage': 'two_stage=False', 'two_stage_num_proposals': 'two_stage_num_proposals=300', 'with_box_refine': 'with_box_refine=False', 'class_cost': 'class_cost=1', 'bbox_cost': 'bbox_cost=5', 'giou_cost': 'giou_cost=2', 'mask_loss_coefficient': 'mask_loss_coefficient=1', 'dice_loss_coefficient': 'dice_loss_coefficient=1', 'bbox_loss_coefficient': 'bbox_loss_coefficient=5', 'giou_loss_coefficient': 'giou_loss_coefficient=2', 'eos_coefficient': 'eos_coefficient=0.1', 'focal_alpha': 'focal_alpha=0.25', 'disable_custom_kernels': 'disable_custom_kernels=False'
+}, model_name='DeformableDetrModel', library='transformers', import_path='transformers.models.deformable_detr'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.0', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.0', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'image_size': 'image_size=224', 'patch_size': 'patch_size=16', 'num_channels': 'num_channels=3', 'qkv_bias': 'qkv_bias=True', 'encoder_stride': 'encoder_stride=16', 'pooler_output_size': 'pooler_output_size=None', 'pooler_act': "pooler_act='tanh'"
+}, model_name='DeiTModel', library='transformers', import_path='transformers.models.deit'), ModelAttributes(model=, model_type='model', model_parameters={'fusion_hidden_size': 'fusion_hidden_size=256', 'patch_size': 'patch_size=384', 'initializer_range': 'initializer_range=0.02', 'intermediate_hook_ids': 'intermediate_hook_ids=[
+ 11,
+ 5
+ ]', 'intermediate_feature_dims': 'intermediate_feature_dims=[
+ 256,
+ 256
+ ]', 'scaled_images_ratios': 'scaled_images_ratios=[
+ 0.25,
+ 0.5,
+ 1
+ ]', 'scaled_images_overlap_ratios': 'scaled_images_overlap_ratios=[
+ 0.0,
+ 0.5,
+ 0.25
+ ]', 'scaled_images_feature_dims': 'scaled_images_feature_dims=[
+ 1024,
+ 1024,
+ 512
+ ]', 'merge_padding_value': 'merge_padding_value=3', 'use_batch_norm_in_fusion_residual': 'use_batch_norm_in_fusion_residual=False', 'use_bias_in_fusion_residual': 'use_bias_in_fusion_residual=True', 'use_fov_model': 'use_fov_model=False', 'num_fov_head_layers': 'num_fov_head_layers=2', 'image_model_config': 'image_model_config=None', 'patch_model_config': 'patch_model_config=None', 'fov_model_config': 'fov_model_config=None'
+}, model_name='DepthProModel', library='transformers', import_path='transformers.models.depth_pro'), ModelAttributes(model=, model_type='model', model_parameters={'use_timm_backbone': 'use_timm_backbone=True', 'backbone_config': 'backbone_config=None', 'num_channels': 'num_channels=3', 'num_queries': 'num_queries=100', 'encoder_layers': 'encoder_layers=6', 'encoder_ffn_dim': 'encoder_ffn_dim=2048', 'encoder_attention_heads': 'encoder_attention_heads=8', 'decoder_layers': 'decoder_layers=6', 'decoder_ffn_dim': 'decoder_ffn_dim=2048', 'decoder_attention_heads': 'decoder_attention_heads=8', 'encoder_layerdrop': 'encoder_layerdrop=0.0', 'decoder_layerdrop': 'decoder_layerdrop=0.0', 'is_encoder_decoder': 'is_encoder_decoder=True', 'activation_function': "activation_function='relu'", 'd_model': 'd_model=256', 'dropout': 'dropout=0.1', 'attention_dropout': 'attention_dropout=0.0', 'activation_dropout': 'activation_dropout=0.0', 'init_std': 'init_std=0.02', 'init_xavier_std': 'init_xavier_std=1.0', 'auxiliary_loss': 'auxiliary_loss=False', 'position_embedding_type': "position_embedding_type='sine'", 'backbone': "backbone='resnet50'", 'use_pretrained_backbone': 'use_pretrained_backbone=True', 'backbone_kwargs': 'backbone_kwargs=None', 'dilation': 'dilation=False', 'class_cost': 'class_cost=1', 'bbox_cost': 'bbox_cost=5', 'giou_cost': 'giou_cost=2', 'mask_loss_coefficient': 'mask_loss_coefficient=1', 'dice_loss_coefficient': 'dice_loss_coefficient=1', 'bbox_loss_coefficient': 'bbox_loss_coefficient=5', 'giou_loss_coefficient': 'giou_loss_coefficient=2', 'eos_coefficient': 'eos_coefficient=0.1'
+}, model_name='DetrModel', library='transformers', import_path='transformers.models.detr'), ModelAttributes(model=, model_type='model', model_parameters={'encoder_config': 'encoder_config: Optional[transformers.models.dia.configuration_dia.DiaEncoderConfig
+ ] = None', 'decoder_config': 'decoder_config: Optional[transformers.models.dia.configuration_dia.DiaDecoderConfig
+ ] = None', 'norm_eps': 'norm_eps: float = 1e-05', 'is_encoder_decoder': 'is_encoder_decoder: bool = True', 'pad_token_id': 'pad_token_id: int = 1025', 'eos_token_id': 'eos_token_id: int = 1024', 'bos_token_id': 'bos_token_id: int = 1026', 'delay_pattern': 'delay_pattern: Optional[list[int
+ ]
+ ] = None', 'initializer_range': 'initializer_range: float = 0.02'
+}, model_name='DiaModel', library='transformers', import_path='transformers.models.dia'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'pad_token': "pad_token: Optional[str] = ''", 'unk_token': "unk_token: Optional[str] = ''", 'max_length': 'max_length: Optional[int
+ ] = 1024', 'offset': 'offset: int = 0'
+}, model_name='DiaTokenizer', library='transformers', import_path='transformers.models.dia'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 32000', 'hidden_size': 'hidden_size: Optional[int
+ ] = 2048', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 8192', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 16', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 2048', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-05', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = None', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'lambda_std_dev': 'lambda_std_dev: Optional[float
+ ] = 0.1', 'head_dim': 'head_dim: Optional[int
+ ] = None'
+}, model_name='DiffLlamaModel', library='transformers', import_path='transformers.models.diffllama'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'patch_size': 'patch_size=4', 'num_channels': 'num_channels=3', 'embed_dim': 'embed_dim=64', 'depths': 'depths=[
+ 3,
+ 4,
+ 6,
+ 5
+ ]', 'num_heads': 'num_heads=[
+ 2,
+ 4,
+ 8,
+ 16
+ ]', 'kernel_size': 'kernel_size=7', 'dilations': 'dilations=[
+ [
+ 1,
+ 8,
+ 1
+ ],
+ [
+ 1,
+ 4,
+ 1,
+ 4
+ ],
+ [
+ 1,
+ 2,
+ 1,
+ 2,
+ 1,
+ 2
+ ],
+ [
+ 1,
+ 1,
+ 1,
+ 1,
+ 1
+ ]
+ ]', 'mlp_ratio': 'mlp_ratio=3.0', 'qkv_bias': 'qkv_bias=True', 'hidden_dropout_prob': 'hidden_dropout_prob=0.0', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.0', 'drop_path_rate': 'drop_path_rate=0.1', 'hidden_act': "hidden_act='gelu'", 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'layer_scale_init_value': 'layer_scale_init_value=0.0', 'out_features': 'out_features=None', 'out_indices': 'out_indices=None'
+}, model_name='DinatModel', library='transformers', import_path='transformers.models.dinat'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'mlp_ratio': 'mlp_ratio=4', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.0', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.0', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-06', 'image_size': 'image_size=224', 'patch_size': 'patch_size=14', 'num_channels': 'num_channels=3', 'qkv_bias': 'qkv_bias=True', 'layerscale_value': 'layerscale_value=1.0', 'drop_path_rate': 'drop_path_rate=0.0', 'use_swiglu_ffn': 'use_swiglu_ffn=False', 'out_features': 'out_features=None', 'out_indices': 'out_indices=None', 'apply_layernorm': 'apply_layernorm=True', 'reshape_hidden_states': 'reshape_hidden_states=True', 'use_mask_token': 'use_mask_token=True'
+}, model_name='Dinov2Model', library='transformers', import_path='transformers.models.dinov2'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'mlp_ratio': 'mlp_ratio=4', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.0', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.0', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-06', 'image_size': 'image_size=224', 'patch_size': 'patch_size=16', 'num_channels': 'num_channels=3', 'qkv_bias': 'qkv_bias=True', 'layerscale_value': 'layerscale_value=1.0', 'drop_path_rate': 'drop_path_rate=0.0', 'use_swiglu_ffn': 'use_swiglu_ffn=False', 'num_register_tokens': 'num_register_tokens=4', 'out_features': 'out_features=None', 'out_indices': 'out_indices=None', 'apply_layernorm': 'apply_layernorm=True', 'reshape_hidden_states': 'reshape_hidden_states=True'
+}, model_name='Dinov2WithRegistersModel', library='transformers', import_path='transformers.models.dinov2_with_registers'), ModelAttributes(model=, model_type='model', model_parameters={'num_channels': 'num_channels: int = 3', 'hidden_sizes': 'hidden_sizes: Optional[list[int
+ ]
+ ] = None', 'depths': 'depths: Optional[list[int
+ ]
+ ] = None', 'hidden_act': "hidden_act: str = 'gelu'", 'initializer_range': 'initializer_range: float = 0.02', 'layer_norm_eps': 'layer_norm_eps: float = 1e-06', 'layer_scale_init_value': 'layer_scale_init_value: float = 1e-06', 'drop_path_rate': 'drop_path_rate: float = 0.0', 'image_size': 'image_size: int = 224', 'out_features': 'out_features: Optional[list[str
+ ]
+ ] = None', 'out_indices': 'out_indices: Optional[list[int
+ ]
+ ] = None'
+}, model_name='DINOv3ConvNextModel', library='transformers', import_path='transformers.models.dinov3_convnext'), ModelAttributes(model=, model_type='model', model_parameters={'patch_size': 'patch_size: int = 16', 'hidden_size': 'hidden_size: int = 384', 'intermediate_size': 'intermediate_size: int = 1536', 'num_hidden_layers': 'num_hidden_layers: int = 12', 'num_attention_heads': 'num_attention_heads: int = 6', 'hidden_act': "hidden_act: str = 'gelu'", 'attention_dropout': 'attention_dropout: float = 0.0', 'initializer_range': 'initializer_range: float = 0.02', 'layer_norm_eps': 'layer_norm_eps: float = 1e-05', 'rope_theta': 'rope_theta: float = 100.0', 'image_size': 'image_size: int = 224', 'num_channels': 'num_channels: int = 3', 'query_bias': 'query_bias: bool = True', 'key_bias': 'key_bias: bool = False', 'value_bias': 'value_bias: bool = True', 'proj_bias': 'proj_bias: bool = True', 'mlp_bias': 'mlp_bias: bool = True', 'layerscale_value': 'layerscale_value: float = 1.0', 'drop_path_rate': 'drop_path_rate: float = 0.0', 'use_gated_mlp': 'use_gated_mlp: bool = False', 'num_register_tokens': 'num_register_tokens: int = 0', 'pos_embed_shift': 'pos_embed_shift: Optional[float
+ ] = None', 'pos_embed_jitter': 'pos_embed_jitter: Optional[float
+ ] = None', 'pos_embed_rescale': 'pos_embed_rescale: Optional[float
+ ] = 2.0', 'out_features': 'out_features: Optional[list[str
+ ]
+ ] = None', 'out_indices': 'out_indices: Optional[list[int
+ ]
+ ] = None', 'apply_layernorm': 'apply_layernorm: bool = True', 'reshape_hidden_states': 'reshape_hidden_states: bool = True'
+}, model_name='DINOv3ViTModel', library='transformers', import_path='transformers.models.dinov3_vit'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=30522', 'max_position_embeddings': 'max_position_embeddings=512', 'sinusoidal_pos_embds': 'sinusoidal_pos_embds=False', 'n_layers': 'n_layers=6', 'n_heads': 'n_heads=12', 'dim': 'dim=768', 'hidden_dim': 'hidden_dim=3072', 'dropout': 'dropout=0.1', 'attention_dropout': 'attention_dropout=0.1', 'activation': "activation='gelu'", 'initializer_range': 'initializer_range=0.02', 'qa_dropout': 'qa_dropout=0.1', 'seq_classif_dropout': 'seq_classif_dropout=0.2', 'pad_token_id': 'pad_token_id=0'
+}, model_name='DistilBertModel', library='transformers', import_path='transformers.models.distilbert'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'do_lower_case': 'do_lower_case: bool = False', 'unk_token': "unk_token: str = '[UNK]'", 'sep_token': "sep_token: str = '[SEP]'", 'pad_token': "pad_token: str = '[PAD]'", 'cls_token': "cls_token: str = '[CLS]'", 'mask_token': "mask_token: str = '[MASK]'", 'tokenize_chinese_chars': 'tokenize_chinese_chars: bool = True', 'strip_accents': 'strip_accents: Optional[bool
+ ] = None'
+}, model_name='BertTokenizer', library='transformers', import_path='transformers.models.bert'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 32768', 'hidden_size': 'hidden_size: Optional[int
+ ] = 1024', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 2048', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'hidden_dropout': 'hidden_dropout: Optional[float
+ ] = 0.0', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-06', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 2048', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 8', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'mlp_bias': 'mlp_bias: Optional[bool
+ ] = False', 'sliding_window': 'sliding_window: Optional[int
+ ] = None', 'keep_window_size': 'keep_window_size: Optional[int
+ ] = 2048', 'is_moe': 'is_moe: Optional[bool
+ ] = False', 'num_experts': 'num_experts: Optional[int
+ ] = 16384', 'num_experts_per_tok': 'num_experts_per_tok: Optional[int
+ ] = 64', 'norm_topk_prob': 'norm_topk_prob: Optional[bool
+ ] = False', 'output_router_logits': 'output_router_logits: Optional[bool
+ ] = False', 'router_aux_loss_coef': 'router_aux_loss_coef: Optional[float
+ ] = 0.001'
+}, model_name='DogeModel', library='transformers', import_path='transformers.models.doge'), ModelAttributes(model=, model_type='model', model_parameters={'image_size': 'image_size=224', 'patch_size': 'patch_size=4', 'num_channels': 'num_channels=3', 'embed_dim': 'embed_dim=96', 'depths': 'depths=[
+ 2,
+ 2,
+ 6,
+ 2
+ ]', 'num_heads': 'num_heads=[
+ 3,
+ 6,
+ 12,
+ 24
+ ]', 'window_size': 'window_size=7', 'mlp_ratio': 'mlp_ratio=4.0', 'qkv_bias': 'qkv_bias=True', 'hidden_dropout_prob': 'hidden_dropout_prob=0.0', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.0', 'drop_path_rate': 'drop_path_rate=0.1', 'hidden_act': "hidden_act='gelu'", 'use_absolute_embeddings': 'use_absolute_embeddings=False', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-05'
+}, model_name='DonutSwinModel', library='transformers', import_path='transformers.models.donut'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 152064', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4608', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 10944', 'moe_intermediate_size': 'moe_intermediate_size: Optional[int
+ ] = 1408', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 62', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 32', 'n_shared_experts': 'n_shared_experts: Optional[int
+ ] = None', 'n_routed_experts': 'n_routed_experts: Optional[int
+ ] = None', 'n_group': 'n_group: Optional[int
+ ] = 1', 'topk_group': 'topk_group: Optional[int
+ ] = 1', 'num_experts_per_tok': 'num_experts_per_tok: Optional[int
+ ] = None', 'first_k_dense_replace': 'first_k_dense_replace: Optional[int
+ ] = 0', 'norm_topk_prob': 'norm_topk_prob: Optional[bool
+ ] = False', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 2048', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-06', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'routed_scaling_factor': 'routed_scaling_factor: Optional[float
+ ] = 1.0', 'sliding_window': 'sliding_window: Optional[int
+ ] = 4096', 'max_window_layers': 'max_window_layers: Optional[int
+ ] = 62', 'layer_types': 'layer_types: Optional[list[str
+ ]
+ ] = None'
+}, model_name='Dots1Model', library='transformers', import_path='transformers.models.dots1'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=30522', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=2', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'pad_token_id': 'pad_token_id=0', 'projection_dim': 'projection_dim: int = 0'
+}, model_name='DPRQuestionEncoder', library='transformers', import_path='transformers.models.dpr'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'do_lower_case': 'do_lower_case: bool = False', 'unk_token': "unk_token: str = '[UNK]'", 'sep_token': "sep_token: str = '[SEP]'", 'pad_token': "pad_token: str = '[PAD]'", 'cls_token': "cls_token: str = '[CLS]'", 'mask_token': "mask_token: str = '[MASK]'", 'tokenize_chinese_chars': 'tokenize_chinese_chars: bool = True', 'strip_accents': 'strip_accents: Optional[bool
+ ] = None'
+}, model_name='DPRQuestionEncoderTokenizerFast', library='transformers', import_path='transformers.models.dpr'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.0', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.0', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'image_size': 'image_size=384', 'patch_size': 'patch_size=16', 'num_channels': 'num_channels=3', 'is_hybrid': 'is_hybrid=False', 'qkv_bias': 'qkv_bias=True', 'backbone_out_indices': 'backbone_out_indices=[
+ 2,
+ 5,
+ 8,
+ 11
+ ]', 'readout_type': "readout_type='project'", 'reassemble_factors': 'reassemble_factors=[
+ 4,
+ 2,
+ 1,
+ 0.5
+ ]', 'neck_hidden_sizes': 'neck_hidden_sizes=[
+ 96,
+ 192,
+ 384,
+ 768
+ ]', 'fusion_hidden_size': 'fusion_hidden_size=256', 'head_in_index': 'head_in_index=-1', 'use_batch_norm_in_fusion_residual': 'use_batch_norm_in_fusion_residual=False', 'use_bias_in_fusion_residual': 'use_bias_in_fusion_residual=None', 'add_projection': 'add_projection=False', 'use_auxiliary_head': 'use_auxiliary_head=True', 'auxiliary_loss_weight': 'auxiliary_loss_weight=0.4', 'semantic_loss_ignore_index': 'semantic_loss_ignore_index=255', 'semantic_classifier_dropout': 'semantic_classifier_dropout=0.1', 'backbone_featmap_shape': 'backbone_featmap_shape=[
+ 1,
+ 1024,
+ 24,
+ 24
+ ]', 'neck_ignore_stages': 'neck_ignore_stages=[
+ 0,
+ 1
+ ]', 'backbone_config': 'backbone_config=None', 'backbone': 'backbone=None', 'use_pretrained_backbone': 'use_pretrained_backbone=False', 'use_timm_backbone': 'use_timm_backbone=False', 'backbone_kwargs': 'backbone_kwargs=None', 'pooler_output_size': 'pooler_output_size=None', 'pooler_act': "pooler_act='tanh'"
+}, model_name='DPTModel', library='transformers', import_path='transformers.models.dpt'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'prompt_encoder_config': 'prompt_encoder_config=None', 'mask_decoder_config': 'mask_decoder_config=None', 'initializer_range': 'initializer_range=0.02'
+}, model_name='EdgeTamModel', library='transformers', import_path='transformers.models.edgetam'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'prompt_encoder_config': 'prompt_encoder_config=None', 'mask_decoder_config': 'mask_decoder_config=None', 'initializer_range': 'initializer_range=0.02', 'num_maskmem': 'num_maskmem=7', 'image_size': 'image_size=1024', 'sigmoid_scale_for_mem_enc': 'sigmoid_scale_for_mem_enc=20.0', 'sigmoid_bias_for_mem_enc': 'sigmoid_bias_for_mem_enc=-10.0', 'enable_occlusion_spatial_embedding': 'enable_occlusion_spatial_embedding=True', 'multimask_output_in_sam': 'multimask_output_in_sam=True', 'multimask_min_pt_num': 'multimask_min_pt_num=0', 'multimask_max_pt_num': 'multimask_max_pt_num=1', 'multimask_output_for_tracking': 'multimask_output_for_tracking=True', 'max_object_pointers_in_encoder': 'max_object_pointers_in_encoder=16', 'max_cond_frame_num': 'max_cond_frame_num=-1', 'enable_temporal_pos_encoding_for_object_pointers': 'enable_temporal_pos_encoding_for_object_pointers=True', 'memory_attention_hidden_size': 'memory_attention_hidden_size=256', 'memory_attention_num_layers': 'memory_attention_num_layers=2', 'memory_attention_num_attention_heads': 'memory_attention_num_attention_heads=1', 'memory_attention_downsample_rate': 'memory_attention_downsample_rate=1', 'memory_attention_mlp_hidden_size': 'memory_attention_mlp_hidden_size=2048', 'memory_attention_mlp_hidden_act': "memory_attention_mlp_hidden_act='relu'", 'memory_attention_dropout': 'memory_attention_dropout=0.1', 'memory_attention_rope_theta': 'memory_attention_rope_theta=10000', 'memory_attention_rope_feat_sizes': 'memory_attention_rope_feat_sizes=None', 'memory_attention_rope_k_sizes': 'memory_attention_rope_k_sizes=None', 'memory_attention_rope_dropout': 'memory_attention_rope_dropout=0.1', 'perceiver_resampler_num_latents': 'perceiver_resampler_num_latents=256', 'perceiver_resampler_num_latents_2d': 'perceiver_resampler_num_latents_2d=256', 'perceiver_resampler_hidden_size': 'perceiver_resampler_hidden_size=64', 'perceiver_resampler_mlp_intermediate_size': 'perceiver_resampler_mlp_intermediate_size=256', 'perceiver_resampler_num_attention_heads': 'perceiver_resampler_num_attention_heads=1', 'perceiver_resampler_attention_head_dim': 'perceiver_resampler_attention_head_dim=64', 'perceiver_resampler_num_layers': 'perceiver_resampler_num_layers=2', 'perceiver_resampler_hidden_dropout': 'perceiver_resampler_hidden_dropout=0.0', 'perceiver_resampler_attention_dropout': 'perceiver_resampler_attention_dropout=0.0', 'memory_encoder_hidden_size': 'memory_encoder_hidden_size=256', 'memory_encoder_output_channels': 'memory_encoder_output_channels=64', 'mask_downsampler_embed_dim': 'mask_downsampler_embed_dim=256', 'memory_fuser_intermediate_dim': 'memory_fuser_intermediate_dim=1024', 'mask_downsampler_kernel_size': 'mask_downsampler_kernel_size=3', 'mask_downsampler_stride': 'mask_downsampler_stride=2', 'mask_downsampler_padding': 'mask_downsampler_padding=1', 'mask_downsampler_total_stride': 'mask_downsampler_total_stride=16', 'mask_downsampler_hidden_act': "mask_downsampler_hidden_act='gelu'", 'memory_fuser_num_layers': 'memory_fuser_num_layers=2', 'memory_fuser_embed_dim': 'memory_fuser_embed_dim=256', 'memory_fuser_kernel_size': 'memory_fuser_kernel_size=7', 'memory_fuser_padding': 'memory_fuser_padding=3', 'memory_fuser_layer_scale_init_value': 'memory_fuser_layer_scale_init_value=1e-06', 'memory_fuser_hidden_act': "memory_fuser_hidden_act='gelu'"
+}, model_name='EdgeTamVideoModel', library='transformers', import_path='transformers.models.edgetam_video'), ModelAttributes(model=, model_type='model', model_parameters={'backbone_config': 'backbone_config=None', 'backbone_channel_list': 'backbone_channel_list=None', 'backbone_feature_sizes': 'backbone_feature_sizes=None', 'fpn_hidden_size': 'fpn_hidden_size=256', 'fpn_kernel_size': 'fpn_kernel_size=1', 'fpn_stride': 'fpn_stride=1', 'fpn_padding': 'fpn_padding=0', 'fpn_top_down_levels': 'fpn_top_down_levels=None', 'num_feature_levels': 'num_feature_levels=3', 'hidden_act': "hidden_act='gelu'", 'layer_norm_eps': 'layer_norm_eps=1e-06', 'initializer_range': 'initializer_range=0.02'
+}, model_name='EdgeTamVisionModel', library='transformers', import_path='transformers.models.edgetam'), ModelAttributes(model=, model_type='model', model_parameters={'stage_num_blocks': 'stage_num_blocks: Optional[list[int
+ ]
+ ] = None', 'out_features': 'out_features: Optional[list[int
+ ]
+ ] = None', 'stage_stride': 'stage_stride: Optional[list[int
+ ]
+ ] = None', 'hidden_size': 'hidden_size: int = 256', 'activation_function': "activation_function: str = 'relu'", 'q_aggregation_kernel_size': 'q_aggregation_kernel_size: int = 4', 'kv_aggregation_kernel_size': 'kv_aggregation_kernel_size: int = 4', 'q_aggregation_stride': 'q_aggregation_stride: int = 4', 'kv_aggregation_stride': 'kv_aggregation_stride: int = 4', 'num_attention_layers': 'num_attention_layers: int = 4', 'num_attention_heads': 'num_attention_heads: int = 8', 'attention_dropout': 'attention_dropout: float = 0.0', 'attention_bias': 'attention_bias: bool = False', 'mlp_activation_function': "mlp_activation_function: str = 'leaky_relu'", 'coarse_matching_skip_softmax': 'coarse_matching_skip_softmax: bool = False', 'coarse_matching_threshold': 'coarse_matching_threshold: float = 0.2', 'coarse_matching_temperature': 'coarse_matching_temperature: float = 0.1', 'coarse_matching_border_removal': 'coarse_matching_border_removal: int = 2', 'fine_kernel_size': 'fine_kernel_size: int = 8', 'batch_norm_eps': 'batch_norm_eps: float = 1e-05', 'rope_parameters': 'rope_parameters: Optional[dict
+ ] = None', 'fine_matching_slice_dim': 'fine_matching_slice_dim: int = 8', 'fine_matching_regress_temperature': 'fine_matching_regress_temperature: float = 10.0', 'initializer_range': 'initializer_range: float = 0.02'
+}, model_name='EfficientLoFTRModel', library='transformers', import_path='transformers.models.efficientloftr'), ModelAttributes(model=, model_type='model', model_parameters={'num_channels': 'num_channels: int = 3', 'image_size': 'image_size: int = 600', 'width_coefficient': 'width_coefficient: float = 2.0', 'depth_coefficient': 'depth_coefficient: float = 3.1', 'depth_divisor': 'depth_divisor: int = 8', 'kernel_sizes': 'kernel_sizes: list[int
+ ] = [
+ 3,
+ 3,
+ 5,
+ 3,
+ 5,
+ 5,
+ 3
+ ]', 'in_channels': 'in_channels: list[int
+ ] = [
+ 32,
+ 16,
+ 24,
+ 40,
+ 80,
+ 112,
+ 192
+ ]', 'out_channels': 'out_channels: list[int
+ ] = [
+ 16,
+ 24,
+ 40,
+ 80,
+ 112,
+ 192,
+ 320
+ ]', 'depthwise_padding': 'depthwise_padding: list[int
+ ] = []', 'strides': 'strides: list[int
+ ] = [
+ 1,
+ 2,
+ 2,
+ 2,
+ 1,
+ 2,
+ 1
+ ]', 'num_block_repeats': 'num_block_repeats: list[int
+ ] = [
+ 1,
+ 2,
+ 2,
+ 3,
+ 3,
+ 4,
+ 1
+ ]', 'expand_ratios': 'expand_ratios: list[int
+ ] = [
+ 1,
+ 6,
+ 6,
+ 6,
+ 6,
+ 6,
+ 6
+ ]', 'squeeze_expansion_ratio': 'squeeze_expansion_ratio: float = 0.25', 'hidden_act': "hidden_act: str = 'swish'", 'hidden_dim': 'hidden_dim: int = 2560', 'pooling_type': "pooling_type: str = 'mean'", 'initializer_range': 'initializer_range: float = 0.02', 'batch_norm_eps': 'batch_norm_eps: float = 0.001', 'batch_norm_momentum': 'batch_norm_momentum: float = 0.99', 'dropout_rate': 'dropout_rate: float = 0.5', 'drop_connect_rate': 'drop_connect_rate: float = 0.2'
+}, model_name='EfficientNetModel', library='transformers', import_path='transformers.models.efficientnet'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=30522', 'embedding_size': 'embedding_size=128', 'hidden_size': 'hidden_size=256', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=4', 'intermediate_size': 'intermediate_size=1024', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=2', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'summary_type': "summary_type='first'", 'summary_use_proj': 'summary_use_proj=True', 'summary_activation': "summary_activation='gelu'", 'summary_last_dropout': 'summary_last_dropout=0.1', 'pad_token_id': 'pad_token_id=0', 'classifier_dropout': 'classifier_dropout=None'
+}, model_name='ElectraModel', library='transformers', import_path='transformers.models.electra'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'do_lower_case': 'do_lower_case: bool = False', 'unk_token': "unk_token: str = '[UNK]'", 'sep_token': "sep_token: str = '[SEP]'", 'pad_token': "pad_token: str = '[PAD]'", 'cls_token': "cls_token: str = '[CLS]'", 'mask_token': "mask_token: str = '[MASK]'", 'tokenize_chinese_chars': 'tokenize_chinese_chars: bool = True', 'strip_accents': 'strip_accents: Optional[bool
+ ] = None'
+}, model_name='BertTokenizer', library='transformers', import_path='transformers.models.bert'), ModelAttributes(model=, model_type='model', model_parameters={'vq_config': 'vq_config: Union[dict, transformers.models.emu3.configuration_emu3.Emu3VQVAEConfig
+ ] = None', 'text_config': 'text_config: Union[dict, transformers.models.emu3.configuration_emu3.Emu3TextConfig
+ ] = None', 'vocabulary_map': 'vocabulary_map: Optional[dict[int, int
+ ]
+ ] = None'
+}, model_name='Emu3Model', library='transformers', import_path='transformers.models.emu3'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'bos_token': "bos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'eos_token': "eos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'pad_token': 'pad_token: Union[tokenizers.AddedToken, str, NoneType
+ ] = None', 'add_prefix_space': 'add_prefix_space=False'
+}, model_name='GPT2Tokenizer', library='transformers', import_path='transformers.models.gpt2'), ModelAttributes(model=, model_type='model', model_parameters={'target_bandwidths': 'target_bandwidths=[
+ 1.5,
+ 3.0,
+ 6.0,
+ 12.0,
+ 24.0
+ ]', 'sampling_rate': 'sampling_rate=24000', 'audio_channels': 'audio_channels=1', 'normalize': 'normalize=False', 'chunk_length_s': 'chunk_length_s=None', 'overlap': 'overlap=None', 'hidden_size': 'hidden_size=128', 'num_filters': 'num_filters=32', 'num_residual_layers': 'num_residual_layers=1', 'upsampling_ratios': 'upsampling_ratios=[
+ 8,
+ 5,
+ 4,
+ 2
+ ]', 'norm_type': "norm_type='weight_norm'", 'kernel_size': 'kernel_size=7', 'last_kernel_size': 'last_kernel_size=7', 'residual_kernel_size': 'residual_kernel_size=3', 'dilation_growth_rate': 'dilation_growth_rate=2', 'use_causal_conv': 'use_causal_conv=True', 'pad_mode': "pad_mode='reflect'", 'compress': 'compress=2', 'num_lstm_layers': 'num_lstm_layers=2', 'trim_right_ratio': 'trim_right_ratio=1.0', 'codebook_size': 'codebook_size=1024', 'codebook_dim': 'codebook_dim=None', 'use_conv_shortcut': 'use_conv_shortcut=True'
+}, model_name='EncodecModel', library='transformers', import_path='transformers.models.encodec'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=30522', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=2', 'task_type_vocab_size': 'task_type_vocab_size=3', 'use_task_id': 'use_task_id=False', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'pad_token_id': 'pad_token_id=0', 'classifier_dropout': 'classifier_dropout=None'
+}, model_name='ErnieModel', library='transformers', import_path='transformers.models.ernie'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'do_lower_case': 'do_lower_case: bool = False', 'unk_token': "unk_token: str = '[UNK]'", 'sep_token': "sep_token: str = '[SEP]'", 'pad_token': "pad_token: str = '[PAD]'", 'cls_token': "cls_token: str = '[CLS]'", 'mask_token': "mask_token: str = '[MASK]'", 'tokenize_chinese_chars': 'tokenize_chinese_chars: bool = True', 'strip_accents': 'strip_accents: Optional[bool
+ ] = None'
+}, model_name='BertTokenizer', library='transformers', import_path='transformers.models.bert'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 103424', 'hidden_size': 'hidden_size: Optional[int
+ ] = 1024', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 3072', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 18', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 16', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 2', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 131072', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-05', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 0', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = True', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'use_bias': 'use_bias: Optional[bool
+ ] = False', 'head_dim': 'head_dim: Optional[int
+ ] = 128'
+}, model_name='Ernie4_5Model', library='transformers', import_path='transformers.models.ernie4_5'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 103424', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 0', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2', 'hidden_size': 'hidden_size: Optional[int
+ ] = 2560', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 12288', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 28', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 20', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 4', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 131072', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-05', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = True', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'use_bias': 'use_bias: Optional[int
+ ] = False', 'moe_intermediate_size': 'moe_intermediate_size: Optional[int
+ ] = 1536', 'moe_k': 'moe_k: Optional[int
+ ] = 6', 'moe_num_experts': 'moe_num_experts: Optional[int
+ ] = 64', 'moe_num_shared_experts': 'moe_num_shared_experts: Optional[int
+ ] = 2', 'moe_layer_start_index': 'moe_layer_start_index: Optional[int
+ ] = 1', 'moe_layer_end_index': 'moe_layer_end_index: Optional[int
+ ] = -1', 'moe_layer_interval': 'moe_layer_interval: Optional[int
+ ] = 1', 'moe_norm_min': 'moe_norm_min: Optional[int
+ ] = 1e-12', 'output_router_logits': 'output_router_logits: Optional[bool
+ ] = False', 'router_aux_loss_coef': 'router_aux_loss_coef: Optional[float
+ ] = 0.001'
+}, model_name='Ernie4_5_MoeModel', library='transformers', import_path='transformers.models.ernie4_5_moe'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'vision_config': 'vision_config=None', 'image_start_token_id': 'image_start_token_id=101304', 'image_end_token_id': 'image_end_token_id=101305', 'image_token_id': 'image_token_id=100295', 'video_start_token_id': 'video_start_token_id=101306', 'video_end_token_id': 'video_end_token_id=101307', 'video_token_id': 'video_token_id=103367'
+}, model_name='Ernie4_5_VL_MoeModel', library='transformers', import_path='transformers.models.ernie4_5_vl_moe'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'args': '*args'
+}, model_name='TokenizersBackend', library='transformers', import_path='transformers'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=None', 'mask_token_id': 'mask_token_id=None', 'pad_token_id': 'pad_token_id=None', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=1026', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'position_embedding_type': "position_embedding_type='absolute'", 'emb_layer_norm_before': 'emb_layer_norm_before=None', 'token_dropout': 'token_dropout=False', 'is_folding_model': 'is_folding_model=False', 'esmfold_config': 'esmfold_config=None', 'vocab_list': 'vocab_list=None'
+}, model_name='EsmModel', library='transformers', import_path='transformers.models.esm'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab_file': 'vocab_file', 'unk_token': "unk_token=''", 'cls_token': "cls_token=''", 'pad_token': "pad_token=''", 'mask_token': "mask_token=''", 'eos_token': "eos_token=''"
+}, model_name='EsmTokenizer', library='transformers', import_path='transformers.models.esm'), ModelAttributes(model=, model_type='model', model_parameters={'protein_encoder_config': 'protein_encoder_config: Optional[dict
+ ] = None', 'vocab_size': 'vocab_size: Optional[int
+ ] = 128256', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 14336', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 8', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 8192', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-05', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'mlp_bias': 'mlp_bias: Optional[bool
+ ] = False', 'aligner_ffn_mult': 'aligner_ffn_mult: Optional[int
+ ] = 4', 'aligner_enable_bias': 'aligner_enable_bias: Optional[bool
+ ] = True', 'aligner_attention_probs_dropout_prob': 'aligner_attention_probs_dropout_prob: Optional[float
+ ] = 0.1', 'aligner_num_add_layers': 'aligner_num_add_layers: Optional[int
+ ] = 8', 'resampler_depth': 'resampler_depth: Optional[int
+ ] = 6', 'resampler_dim_head': 'resampler_dim_head: Optional[int
+ ] = 64', 'resampler_heads': 'resampler_heads: Optional[int
+ ] = 8', 'resampler_num_latents': 'resampler_num_latents: Optional[int
+ ] = 64', 'resampler_ff_mult': 'resampler_ff_mult: Optional[int
+ ] = 4', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = None', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 128000', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 128009', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False'
+}, model_name='EvollaModel', library='transformers', import_path='transformers.models.evolla'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 102400', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 16384', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 32', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 2048', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-05', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 0', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'sliding_window': 'sliding_window: Optional[int
+ ] = 4096', 'sliding_window_pattern': 'sliding_window_pattern: Optional[int
+ ] = 4', 'layer_types': 'layer_types: Optional[list[str
+ ]
+ ] = None'
+}, model_name='Exaone4Model', library='transformers', import_path='transformers.models.exaone4'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'bos_token': "bos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'eos_token': "eos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'pad_token': 'pad_token: Union[tokenizers.AddedToken, str, NoneType
+ ] = None', 'add_prefix_space': 'add_prefix_space=False'
+}, model_name='GPT2Tokenizer', library='transformers', import_path='transformers.models.gpt2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 65024', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4544', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 71', 'num_ln_in_parallel_attn': 'num_ln_in_parallel_attn: Optional[int
+ ] = None', 'layer_norm_epsilon': 'layer_norm_epsilon: Optional[int
+ ] = 1e-05', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'hidden_dropout': 'hidden_dropout: Optional[float
+ ] = 0.0', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'num_kv_heads': 'num_kv_heads: Optional[int
+ ] = None', 'alibi': 'alibi: Optional[bool
+ ] = False', 'new_decoder_architecture': 'new_decoder_architecture: Optional[bool
+ ] = False', 'multi_query': 'multi_query: Optional[bool
+ ] = True', 'parallel_attn': 'parallel_attn: Optional[bool
+ ] = True', 'bias': 'bias: Optional[bool
+ ] = False', 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 2048', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 11', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 11', 'ffn_hidden_size': 'ffn_hidden_size: Optional[int
+ ] = None', 'activation': "activation: Optional[str] = 'gelu'"
+}, model_name='FalconModel', library='transformers', import_path='transformers.models.falcon'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'args': '*args'
+}, model_name='TokenizersBackend', library='transformers', import_path='transformers'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 128000', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 14336', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 8', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-05', 'num_logits_to_keep': 'num_logits_to_keep: Optional[int
+ ] = 1', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 0', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2', 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 8192', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'mamba_d_ssm': 'mamba_d_ssm: Optional[int
+ ] = 1024', 'mamba_n_heads': 'mamba_n_heads: Optional[int
+ ] = 128', 'mamba_d_head': "mamba_d_head: Optional[str] = 'auto'", 'mamba_n_groups': 'mamba_n_groups: Optional[int
+ ] = 1', 'mamba_d_state': 'mamba_d_state: Optional[int
+ ] = 256', 'mamba_d_conv': 'mamba_d_conv: Optional[int
+ ] = 4', 'mamba_expand': 'mamba_expand: Optional[int
+ ] = 2', 'mamba_chunk_size': 'mamba_chunk_size: Optional[int
+ ] = 256', 'mamba_conv_bias': 'mamba_conv_bias: Optional[bool
+ ] = True', 'mamba_proj_bias': 'mamba_proj_bias: Optional[bool
+ ] = False', 'mamba_norm_before_gate': 'mamba_norm_before_gate: Optional[bool
+ ] = True', 'mamba_rms_norm': 'mamba_rms_norm: Optional[bool
+ ] = False', 'projectors_bias': 'projectors_bias: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'lm_head_multiplier': 'lm_head_multiplier: Optional[float
+ ] = 1.0', 'embedding_multiplier': 'embedding_multiplier: Optional[float
+ ] = 1.0', 'mlp_multipliers': 'mlp_multipliers: Optional[int
+ ] = None', 'key_multiplier': 'key_multiplier: Optional[int
+ ] = None', 'attention_out_multiplier': 'attention_out_multiplier: Optional[int
+ ] = None', 'attention_in_multiplier': 'attention_in_multiplier: Optional[int
+ ] = None', 'ssm_multipliers': 'ssm_multipliers: Optional[int
+ ] = None', 'ssm_in_multiplier': 'ssm_in_multiplier: Optional[int
+ ] = None', 'ssm_out_multiplier': 'ssm_out_multiplier: Optional[int
+ ] = None'
+}, model_name='FalconH1Model', library='transformers', import_path='transformers.models.falcon_h1'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=50280', 'hidden_size': 'hidden_size=768', 'state_size': 'state_size=16', 'num_hidden_layers': 'num_hidden_layers=32', 'layer_norm_epsilon': 'layer_norm_epsilon=1e-05', 'pad_token_id': 'pad_token_id=0', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=0', 'expand': 'expand=2', 'conv_kernel': 'conv_kernel=4', 'use_bias': 'use_bias=False', 'use_conv_bias': 'use_conv_bias=True', 'hidden_act': "hidden_act='silu'", 'initializer_range': 'initializer_range=0.1', 'residual_in_fp32': 'residual_in_fp32=True', 'time_step_rank': "time_step_rank='auto'", 'time_step_scale': 'time_step_scale=1.0', 'time_step_min': 'time_step_min=0.001', 'time_step_max': 'time_step_max=0.1', 'time_step_init_scheme': "time_step_init_scheme='random'", 'time_step_floor': 'time_step_floor=0.0001', 'rescale_prenorm_residual': 'rescale_prenorm_residual=False', 'use_falcon_mambapy': 'use_falcon_mambapy=False', 'mixer_rms_eps': 'mixer_rms_eps=1e-06'
+}, model_name='FalconMambaModel', library='transformers', import_path='transformers.models.falcon_mamba'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': "bos_token: str = '<|endoftext|>'", 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|padding|>'", 'add_prefix_space': 'add_prefix_space: bool = False', 'trim_offsets': 'trim_offsets: bool = True'
+}, model_name='GPTNeoXTokenizer', library='transformers', import_path='transformers.models.gpt_neox'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'text_config': 'text_config=None', 'image_token_id': 'image_token_id=151646', 'projector_hidden_act': "projector_hidden_act='gelu'", 'vision_feature_select_strategy': "vision_feature_select_strategy='full'", 'vision_feature_layer': 'vision_feature_layer=-1', 'multimodal_projector_bias': 'multimodal_projector_bias=True'
+}, model_name='FastVlmModel', library='transformers', import_path='transformers.models.fast_vlm'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=384', 'vocab_size': 'vocab_size=78', 'num_mel_bins': 'num_mel_bins=80', 'encoder_num_attention_heads': 'encoder_num_attention_heads=2', 'encoder_layers': 'encoder_layers=4', 'encoder_linear_units': 'encoder_linear_units=1536', 'decoder_layers': 'decoder_layers=4', 'decoder_num_attention_heads': 'decoder_num_attention_heads=2', 'decoder_linear_units': 'decoder_linear_units=1536', 'speech_decoder_postnet_layers': 'speech_decoder_postnet_layers=5', 'speech_decoder_postnet_units': 'speech_decoder_postnet_units=256', 'speech_decoder_postnet_kernel': 'speech_decoder_postnet_kernel=5', 'positionwise_conv_kernel_size': 'positionwise_conv_kernel_size=3', 'encoder_normalize_before': 'encoder_normalize_before=False', 'decoder_normalize_before': 'decoder_normalize_before=False', 'encoder_concat_after': 'encoder_concat_after=False', 'decoder_concat_after': 'decoder_concat_after=False', 'reduction_factor': 'reduction_factor=1', 'speaking_speed': 'speaking_speed=1.0', 'use_macaron_style_in_conformer': 'use_macaron_style_in_conformer=True', 'use_cnn_in_conformer': 'use_cnn_in_conformer=True', 'encoder_kernel_size': 'encoder_kernel_size=7', 'decoder_kernel_size': 'decoder_kernel_size=31', 'duration_predictor_layers': 'duration_predictor_layers=2', 'duration_predictor_channels': 'duration_predictor_channels=256', 'duration_predictor_kernel_size': 'duration_predictor_kernel_size=3', 'energy_predictor_layers': 'energy_predictor_layers=2', 'energy_predictor_channels': 'energy_predictor_channels=256', 'energy_predictor_kernel_size': 'energy_predictor_kernel_size=3', 'energy_predictor_dropout': 'energy_predictor_dropout=0.5', 'energy_embed_kernel_size': 'energy_embed_kernel_size=1', 'energy_embed_dropout': 'energy_embed_dropout=0.0', 'stop_gradient_from_energy_predictor': 'stop_gradient_from_energy_predictor=False', 'pitch_predictor_layers': 'pitch_predictor_layers=5', 'pitch_predictor_channels': 'pitch_predictor_channels=256', 'pitch_predictor_kernel_size': 'pitch_predictor_kernel_size=5', 'pitch_predictor_dropout': 'pitch_predictor_dropout=0.5', 'pitch_embed_kernel_size': 'pitch_embed_kernel_size=1', 'pitch_embed_dropout': 'pitch_embed_dropout=0.0', 'stop_gradient_from_pitch_predictor': 'stop_gradient_from_pitch_predictor=True', 'encoder_dropout_rate': 'encoder_dropout_rate=0.2', 'encoder_positional_dropout_rate': 'encoder_positional_dropout_rate=0.2', 'encoder_attention_dropout_rate': 'encoder_attention_dropout_rate=0.2', 'decoder_dropout_rate': 'decoder_dropout_rate=0.2', 'decoder_positional_dropout_rate': 'decoder_positional_dropout_rate=0.2', 'decoder_attention_dropout_rate': 'decoder_attention_dropout_rate=0.2', 'duration_predictor_dropout_rate': 'duration_predictor_dropout_rate=0.2', 'speech_decoder_postnet_dropout': 'speech_decoder_postnet_dropout=0.5', 'max_source_positions': 'max_source_positions=5000', 'use_masking': 'use_masking=True', 'use_weighted_masking': 'use_weighted_masking=False', 'num_speakers': 'num_speakers=None', 'num_languages': 'num_languages=None', 'speaker_embed_dim': 'speaker_embed_dim=None', 'is_encoder_decoder': 'is_encoder_decoder=True', 'convolution_bias': 'convolution_bias=True'
+}, model_name='FastSpeech2ConformerModel', library='transformers', import_path='transformers.models.fastspeech2_conformer'), ModelAttributes(model=, model_type='model', model_parameters={'model_config': 'model_config: Optional[dict
+ ] = None', 'vocoder_config': 'vocoder_config: Optional[dict
+ ] = None'
+}, model_name='FastSpeech2ConformerWithHifiGan', library='transformers', import_path='transformers.models.fastspeech2_conformer'), ModelAttributes(model=, model_type='model', model_parameters={'pre_norm': 'pre_norm=False', 'layerdrop': 'layerdrop=0.0', 'vocab_size': 'vocab_size=30145', 'emb_dim': 'emb_dim=2048', 'n_layers': 'n_layers=12', 'n_heads': 'n_heads=16', 'dropout': 'dropout=0.1', 'attention_dropout': 'attention_dropout=0.1', 'gelu_activation': 'gelu_activation=True', 'sinusoidal_embeddings': 'sinusoidal_embeddings=False', 'causal': 'causal=False', 'asm': 'asm=False', 'n_langs': 'n_langs=1', 'use_lang_emb': 'use_lang_emb=True', 'max_position_embeddings': 'max_position_embeddings=512', 'embed_init_std': 'embed_init_std=0.02209708691207961', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'init_std': 'init_std=0.02', 'bos_index': 'bos_index=0', 'eos_index': 'eos_index=1', 'pad_index': 'pad_index=2', 'unk_index': 'unk_index=3', 'mask_index': 'mask_index=5', 'is_encoder': 'is_encoder=True', 'summary_type': "summary_type='first'", 'summary_use_proj': 'summary_use_proj=True', 'summary_activation': 'summary_activation=None', 'summary_proj_to_labels': 'summary_proj_to_labels=True', 'summary_first_dropout': 'summary_first_dropout=0.1', 'start_n_top': 'start_n_top=5', 'end_n_top': 'end_n_top=5', 'mask_token_id': 'mask_token_id=0', 'lang_id': 'lang_id=0', 'pad_token_id': 'pad_token_id=2', 'bos_token_id': 'bos_token_id=0'
+}, model_name='FlaubertModel', library='transformers', import_path='transformers.models.flaubert'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab_file': 'vocab_file', 'merges_file': 'merges_file', 'do_lowercase': 'do_lowercase=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'sep_token': "sep_token=''", 'pad_token': "pad_token=''", 'cls_token': "cls_token=''", 'mask_token': "mask_token=''", 'additional_special_tokens': "additional_special_tokens=['', '', '', '', '', '', '', '', '', '']", 'lang2id': 'lang2id=None', 'id2lang': 'id2lang=None'
+}, model_name='FlaubertTokenizer', library='transformers', import_path='transformers.models.flaubert'), ModelAttributes(model=, model_type='model', model_parameters={'image_config': 'image_config: Optional[dict[str, Any
+ ]
+ ] = None', 'text_config': 'text_config: Optional[dict[str, Any
+ ]
+ ] = None', 'multimodal_config': 'multimodal_config: Optional[dict[str, Any
+ ]
+ ] = None', 'image_codebook_config': 'image_codebook_config: Optional[dict[str, Any
+ ]
+ ] = None', 'hidden_size': 'hidden_size: int = 768', 'layer_norm_eps': 'layer_norm_eps: float = 1e-12', 'projection_dim': 'projection_dim: int = 768', 'init_codebook': 'init_codebook: bool = True', 'logit_scale_init_value': 'logit_scale_init_value: float = 2.6592', 'initializer_range': 'initializer_range: float = 0.02', 'ce_ignore_index': 'ce_ignore_index: int = -100', 'mim_weight': 'mim_weight: float = 1.0', 'mlm_weight': 'mlm_weight: float = 1.0', 'global_contrastive_weight': 'global_contrastive_weight: float = 1.0', 'itm_weight': 'itm_weight: float = 1.0', 'mmm_image_weight': 'mmm_image_weight: float = 1.0', 'mmm_text_weight': 'mmm_text_weight: float = 1.0', 'global_backprop_contrastive': 'global_backprop_contrastive: bool = True', 'skip_unmasked_multimodal_encoder': 'skip_unmasked_multimodal_encoder: bool = True', 'return_loss': 'return_loss: bool = True'
+}, model_name='FlavaModel', library='transformers', import_path='transformers.models.flava'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'do_lower_case': 'do_lower_case: bool = False', 'unk_token': "unk_token: str = '[UNK]'", 'sep_token': "sep_token: str = '[SEP]'", 'pad_token': "pad_token: str = '[PAD]'", 'cls_token': "cls_token: str = '[CLS]'", 'mask_token': "mask_token: str = '[MASK]'", 'tokenize_chinese_chars': 'tokenize_chinese_chars: bool = True', 'strip_accents': 'strip_accents: Optional[bool
+ ] = None'
+}, model_name='BertTokenizer', library='transformers', import_path='transformers.models.bert'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 100352', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 11008', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 4096', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[float
+ ] = 1e-06', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 100277', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = None', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 100257', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'num_experts_per_tok': 'num_experts_per_tok: Optional[int
+ ] = 5', 'num_experts': 'num_experts: Optional[int
+ ] = 7', 'output_router_logits': 'output_router_logits: Optional[bool
+ ] = False', 'router_aux_loss_coef': 'router_aux_loss_coef: Optional[float
+ ] = 0.01', 'norm_topk_prob': 'norm_topk_prob: Optional[bool
+ ] = False'
+}, model_name='FlexOlmoModel', library='transformers', import_path='transformers.models.flex_olmo'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'bos_token': "bos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'eos_token': "eos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'pad_token': 'pad_token: Union[tokenizers.AddedToken, str, NoneType
+ ] = None', 'add_prefix_space': 'add_prefix_space=False'
+}, model_name='GPT2Tokenizer', library='transformers', import_path='transformers.models.gpt2'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'vision_config': 'vision_config=None', 'image_token_id': 'image_token_id=51289', 'is_encoder_decoder': 'is_encoder_decoder=True'
+}, model_name='Florence2Model', library='transformers', import_path='transformers.models.florence2'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = ''", 'sep_token': "sep_token: str = ''", 'cls_token': "cls_token: str = ''", 'unk_token': "unk_token: str = ''", 'pad_token': "pad_token: str = ''", 'mask_token': "mask_token: str = ''", 'add_prefix_space': 'add_prefix_space: bool = False', 'trim_offsets': 'trim_offsets: bool = True'
+}, model_name='RobertaTokenizer', library='transformers', import_path='transformers.models.roberta'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=32000', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu_new'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=4', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'use_tpu_fourier_optimizations': 'use_tpu_fourier_optimizations=False', 'tpu_short_seq_length': 'tpu_short_seq_length=512', 'pad_token_id': 'pad_token_id=3', 'bos_token_id': 'bos_token_id=1', 'eos_token_id': 'eos_token_id=2'
+}, model_name='FNetModel', library='transformers', import_path='transformers.models.fnet'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, list[tuple[str, float
+ ]
+ ], NoneType
+ ] = None', 'do_lower_case': 'do_lower_case: bool = True', 'keep_accents': 'keep_accents: bool = False', 'bos_token': "bos_token: str = '[CLS]'", 'eos_token': "eos_token: str = '[SEP]'", 'unk_token': "unk_token: str = ''", 'sep_token': "sep_token: str = '[SEP]'", 'pad_token': "pad_token: str = ''", 'cls_token': "cls_token: str = '[CLS]'", 'mask_token': "mask_token: str = '[MASK]'", 'add_prefix_space': 'add_prefix_space: bool = True', 'trim_offsets': 'trim_offsets: bool = True'
+}, model_name='FNetTokenizer', library='transformers', import_path='transformers.models.fnet'), ModelAttributes(model=, model_type='model', model_parameters={'image_size': 'image_size=224', 'patch_size': 'patch_size=4', 'num_channels': 'num_channels=3', 'embed_dim': 'embed_dim=96', 'use_conv_embed': 'use_conv_embed=False', 'hidden_sizes': 'hidden_sizes=[
+ 192,
+ 384,
+ 768,
+ 768
+ ]', 'depths': 'depths=[
+ 2,
+ 2,
+ 6,
+ 2
+ ]', 'focal_levels': 'focal_levels=[
+ 2,
+ 2,
+ 2,
+ 2
+ ]', 'focal_windows': 'focal_windows=[
+ 3,
+ 3,
+ 3,
+ 3
+ ]', 'hidden_act': "hidden_act='gelu'", 'mlp_ratio': 'mlp_ratio=4.0', 'hidden_dropout_prob': 'hidden_dropout_prob=0.0', 'drop_path_rate': 'drop_path_rate=0.1', 'use_layerscale': 'use_layerscale=False', 'layerscale_value': 'layerscale_value=0.0001', 'use_post_layernorm': 'use_post_layernorm=False', 'use_post_layernorm_in_modulation': 'use_post_layernorm_in_modulation=False', 'normalize_modulator': 'normalize_modulator=False', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'encoder_stride': 'encoder_stride=32', 'out_features': 'out_features=None', 'out_indices': 'out_indices=None'
+}, model_name='FocalNetModel', library='transformers', import_path='transformers.models.focalnet'), ModelAttributes(model=, model_type='model', model_parameters={'langs': "langs=['en', 'de']", 'src_vocab_size': 'src_vocab_size=42024', 'tgt_vocab_size': 'tgt_vocab_size=42024', 'activation_function': "activation_function='relu'", 'd_model': 'd_model=1024', 'max_length': 'max_length=200', 'max_position_embeddings': 'max_position_embeddings=1024', 'encoder_ffn_dim': 'encoder_ffn_dim=4096', 'encoder_layers': 'encoder_layers=12', 'encoder_attention_heads': 'encoder_attention_heads=16', 'encoder_layerdrop': 'encoder_layerdrop=0.0', 'decoder_ffn_dim': 'decoder_ffn_dim=4096', 'decoder_layers': 'decoder_layers=12', 'decoder_attention_heads': 'decoder_attention_heads=16', 'decoder_layerdrop': 'decoder_layerdrop=0.0', 'attention_dropout': 'attention_dropout=0.0', 'dropout': 'dropout=0.1', 'activation_dropout': 'activation_dropout=0.0', 'init_std': 'init_std=0.02', 'decoder_start_token_id': 'decoder_start_token_id=2', 'is_encoder_decoder': 'is_encoder_decoder=True', 'scale_embedding': 'scale_embedding=True', 'tie_word_embeddings': 'tie_word_embeddings=False', 'num_beams': 'num_beams=5', 'length_penalty': 'length_penalty=1.0', 'early_stopping': 'early_stopping=False', 'pad_token_id': 'pad_token_id=1', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=2', 'forced_eos_token_id': 'forced_eos_token_id=2', 'common_kwargs': '**common_kwargs'
+}, model_name='FSMTModel', library='transformers', import_path='transformers.models.fsmt'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'langs': 'langs=None', 'src_vocab_file': 'src_vocab_file=None', 'tgt_vocab_file': 'tgt_vocab_file=None', 'merges_file': 'merges_file=None', 'do_lower_case': 'do_lower_case=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'sep_token': "sep_token=''", 'pad_token': "pad_token=''"
+}, model_name='FSMTTokenizer', library='transformers', import_path='transformers.models.fsmt'), ModelAttributes(model=, model_type='model', model_parameters=None, model_name='FunnelModel', library='transformers', import_path='transformers.models.funnel'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'do_lower_case': 'do_lower_case: bool = True', 'unk_token': "unk_token: str = ''", 'sep_token': "sep_token: str = ''", 'pad_token': "pad_token: str = ''", 'cls_token': "cls_token: str = ''", 'mask_token': "mask_token: str = ''", 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = ''", 'clean_text': 'clean_text: bool = True', 'tokenize_chinese_chars': 'tokenize_chinese_chars: bool = True', 'strip_accents': 'strip_accents: Optional[bool
+ ] = None', 'wordpieces_prefix': "wordpieces_prefix: str = '##'"
+}, model_name='FunnelTokenizer', library='transformers', import_path='transformers.models.funnel'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 262144', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 16384', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 36', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 64', 'hidden_act': "hidden_act: Optional[str] = 'relu2'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 16384', 'image_size': 'image_size: Optional[int
+ ] = 300', 'patch_size': 'patch_size: Optional[int
+ ] = 30', 'num_channels': 'num_channels: Optional[int
+ ] = 3', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'layer_norm_eps': 'layer_norm_eps: Optional[int
+ ] = 1e-05', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'qk_layernorm': 'qk_layernorm: Optional[bool
+ ] = True', 'hidden_dropout': 'hidden_dropout: Optional[float
+ ] = 0.0', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = None', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2', 'image_token_id': 'image_token_id: Optional[int
+ ] = 71011', 'text_config': 'text_config: Optional[dict
+ ] = None'
+}, model_name='FuyuModel', library='transformers', import_path='transformers.models.fuyu'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'args': '*args'
+}, model_name='TokenizersBackend', library='transformers', import_path='transformers'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 256000', 'hidden_size': 'hidden_size: Optional[int
+ ] = 3072', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 24576', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 28', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 16', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 16', 'head_dim': 'head_dim: Optional[int
+ ] = 256', 'hidden_act': "hidden_act: Optional[str] = 'gelu_pytorch_tanh'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 8192', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-06', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 0', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 1', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 2', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = True', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'use_bidirectional_attention': 'use_bidirectional_attention: Optional[bool
+ ] = None'
+}, model_name='GemmaModel', library='transformers', import_path='transformers.models.gemma'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'unk_token': "unk_token: str = ''", 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = ''", 'pad_token': "pad_token: str = ''", 'mask_token': "mask_token: str = ''"
+}, model_name='GemmaTokenizer', library='transformers', import_path='transformers.models.gemma'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 256000', 'hidden_size': 'hidden_size: Optional[int
+ ] = 2304', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 9216', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 26', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 8', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 4', 'head_dim': 'head_dim: Optional[int
+ ] = 256', 'hidden_activation': "hidden_activation: Optional[str] = 'gelu_pytorch_tanh'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 8192', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-06', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 0', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 1', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 2', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = True', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'query_pre_attn_scalar': 'query_pre_attn_scalar: Optional[int
+ ] = 256', 'sliding_window': 'sliding_window: Optional[int
+ ] = 4096', 'layer_types': 'layer_types: Optional[list[str
+ ]
+ ] = None', 'final_logit_softcapping': 'final_logit_softcapping: Optional[float
+ ] = 30.0', 'attn_logit_softcapping': 'attn_logit_softcapping: Optional[float
+ ] = 50.0', 'use_bidirectional_attention': 'use_bidirectional_attention: Optional[bool
+ ] = None'
+}, model_name='Gemma2Model', library='transformers', import_path='transformers.models.gemma2'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'unk_token': "unk_token: str = ''", 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = ''", 'pad_token': "pad_token: str = ''", 'mask_token': "mask_token: str = ''"
+}, model_name='GemmaTokenizer', library='transformers', import_path='transformers.models.gemma'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config: Union[transformers.models.gemma3.configuration_gemma3.Gemma3TextConfig, dict[str, Any
+ ], NoneType
+ ] = None', 'vision_config': 'vision_config: Union[transformers.models.siglip.configuration_siglip.SiglipVisionConfig, dict[str, Any
+ ], NoneType
+ ] = None', 'mm_tokens_per_image': 'mm_tokens_per_image: int = 256', 'boi_token_index': 'boi_token_index: int = 255999', 'eoi_token_index': 'eoi_token_index: int = 256000', 'image_token_index': 'image_token_index: int = 262144', 'initializer_range': 'initializer_range: float = 0.02'
+}, model_name='Gemma3Model', library='transformers', import_path='transformers.models.gemma3'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'unk_token': "unk_token: str = ''", 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = ''", 'pad_token': "pad_token: str = ''", 'mask_token': "mask_token: str = ''"
+}, model_name='GemmaTokenizer', library='transformers', import_path='transformers.models.gemma'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 262208', 'hidden_size': 'hidden_size: Optional[int
+ ] = 2304', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 9216', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 26', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 8', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 4', 'head_dim': 'head_dim: Optional[int
+ ] = 256', 'hidden_activation': "hidden_activation: Optional[str] = 'gelu_pytorch_tanh'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 131072', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-06', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 0', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 1', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 2', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = True', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'query_pre_attn_scalar': 'query_pre_attn_scalar: Optional[int
+ ] = 256', 'sliding_window': 'sliding_window: Optional[int
+ ] = 4096', 'layer_types': 'layer_types: Optional[list[str
+ ]
+ ] = None', 'final_logit_softcapping': 'final_logit_softcapping: Optional[float
+ ] = None', 'attn_logit_softcapping': 'attn_logit_softcapping: Optional[float
+ ] = None', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'use_bidirectional_attention': 'use_bidirectional_attention: Optional[bool
+ ] = False'
+}, model_name='Gemma3TextModel', library='transformers', import_path='transformers.models.gemma3'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'unk_token': "unk_token: str = ''", 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = ''", 'pad_token': "pad_token: str = ''", 'mask_token': "mask_token: str = ''"
+}, model_name='GemmaTokenizer', library='transformers', import_path='transformers.models.gemma'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config: Union[transformers.models.gemma3n.configuration_gemma3n.Gemma3nTextConfig, dict[str, Any
+ ], NoneType
+ ] = None', 'vision_config': 'vision_config: Union[transformers.models.gemma3n.configuration_gemma3n.Gemma3nVisionConfig, dict[str, Any
+ ], NoneType
+ ] = None', 'audio_config': 'audio_config: Union[transformers.models.gemma3n.configuration_gemma3n.Gemma3nAudioConfig, dict[str, Any
+ ], NoneType
+ ] = None', 'audio_soft_tokens_per_image': 'audio_soft_tokens_per_image: int = 188', 'vision_soft_tokens_per_image': 'vision_soft_tokens_per_image: int = 256', 'boi_token_id': 'boi_token_id: int = 255999', 'eoi_token_id': 'eoi_token_id: int = 262144', 'image_token_id': 'image_token_id: int = 262145', 'boa_token_id': 'boa_token_id: int = 256000', 'eoa_token_id': 'eoa_token_id: int = 262272', 'audio_token_id': 'audio_token_id: int = 262273', 'initializer_range': 'initializer_range: float = 0.02'
+}, model_name='Gemma3nModel', library='transformers', import_path='transformers.models.gemma3n'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'unk_token': "unk_token: str = ''", 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = ''", 'pad_token': "pad_token: str = ''", 'mask_token': "mask_token: str = ''"
+}, model_name='GemmaTokenizer', library='transformers', import_path='transformers.models.gemma'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: int = 128', 'vocab_offset': 'vocab_offset: int = 262272', 'input_feat_size': 'input_feat_size: int = 128', 'hidden_size': 'hidden_size: int = 1536', 'rms_norm_eps': 'rms_norm_eps: float = 1e-06', 'gradient_clipping': 'gradient_clipping: float = 10000000000.0', 'conf_attention_chunk_size': 'conf_attention_chunk_size: int = 12', 'conf_attention_context_left': 'conf_attention_context_left: int = 13', 'conf_attention_context_right': 'conf_attention_context_right: int = 0', 'conf_attention_logit_cap': 'conf_attention_logit_cap: float = 50.0', 'conf_num_attention_heads': 'conf_num_attention_heads: int = 8', 'conf_num_hidden_layers': 'conf_num_hidden_layers: int = 12', 'conf_conv_kernel_size': 'conf_conv_kernel_size: int = 5', 'conf_reduction_factor': 'conf_reduction_factor: int = 4', 'conf_residual_weight': 'conf_residual_weight: float = 0.5', 'sscp_conv_channel_size': 'sscp_conv_channel_size: tuple[int, int
+ ] = (128,
+ 32)', 'sscp_conv_group_norm_eps': 'sscp_conv_group_norm_eps: float = 0.001', 'sscp_conv_kernel_size': 'sscp_conv_kernel_size: tuple[tuple[int, int
+ ], tuple[int, int
+ ]
+ ] = ((3,
+ 3), (3,
+ 3))', 'sscp_conv_stride_size': 'sscp_conv_stride_size: tuple[tuple[int, int
+ ], tuple[int, int
+ ]
+ ] = ((2,
+ 2), (2,
+ 2))'
+}, model_name='Gemma3nAudioEncoder', library='transformers', import_path='transformers.models.gemma3n'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: int = 262400', 'vocab_size_per_layer_input': 'vocab_size_per_layer_input: int = 262144', 'hidden_size': 'hidden_size: int = 2048', 'hidden_size_per_layer_input': 'hidden_size_per_layer_input: int = 256', 'intermediate_size': 'intermediate_size: Union[int, collections.abc.Sequence[int
+ ]
+ ] = 16384', 'num_hidden_layers': 'num_hidden_layers: int = 35', 'num_attention_heads': 'num_attention_heads: int = 8', 'num_key_value_heads': 'num_key_value_heads: int = 2', 'head_dim': 'head_dim: int = 256', 'hidden_activation': "hidden_activation: str = 'gelu_pytorch_tanh'", 'max_position_embeddings': 'max_position_embeddings: int = 32768', 'initializer_range': 'initializer_range: float = 0.02', 'rms_norm_eps': 'rms_norm_eps: float = 1e-06', 'pad_token_id': 'pad_token_id: int = 0', 'eos_token_id': 'eos_token_id: int = 1', 'bos_token_id': 'bos_token_id: int = 2', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: bool = False', 'attention_dropout': 'attention_dropout: float = 0.0', 'sliding_window': 'sliding_window: int = 512', 'layer_types': 'layer_types: Optional[collections.abc.Sequence[str
+ ]
+ ] = None', 'final_logit_softcapping': 'final_logit_softcapping: float = 30.0', 'altup_active_idx': 'altup_active_idx: int = 0', 'altup_coef_clip': 'altup_coef_clip: float = 120.0', 'altup_correct_scale': 'altup_correct_scale: bool = True', 'altup_num_inputs': 'altup_num_inputs: int = 4', 'num_kv_shared_layers': 'num_kv_shared_layers: int = 15', 'laurel_rank': 'laurel_rank: int = 64', 'activation_sparsity_pattern': 'activation_sparsity_pattern: Union[float, collections.abc.Sequence[float
+ ], NoneType
+ ] = None'
+}, model_name='Gemma3nTextModel', library='transformers', import_path='transformers.models.gemma3n'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'unk_token': "unk_token: str = ''", 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = ''", 'pad_token': "pad_token: str = ''", 'mask_token': "mask_token: str = ''"
+}, model_name='GemmaTokenizer', library='transformers', import_path='transformers.models.gemma'), ModelAttributes(model=, model_type='model', model_parameters={'_resnet_': [''
+ ]
+}, model_name='TimmWrapperModel', library='transformers', import_path='transformers.models.timm_wrapper'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'vocab_size': 'vocab_size=30522', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=6', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=1024', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'pad_token_id': 'pad_token_id=0', 'tie_word_embeddings': 'tie_word_embeddings=False', 'bos_token_id': 'bos_token_id=101', 'eos_token_id': 'eos_token_id=102', 'num_image_with_embedding': 'num_image_with_embedding=None'
+}, model_name='GitModel', library='transformers', import_path='transformers.models.git'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'do_lower_case': 'do_lower_case: bool = False', 'unk_token': "unk_token: str = '[UNK]'", 'sep_token': "sep_token: str = '[SEP]'", 'pad_token': "pad_token: str = '[PAD]'", 'cls_token': "cls_token: str = '[CLS]'", 'mask_token': "mask_token: str = '[MASK]'", 'tokenize_chinese_chars': 'tokenize_chinese_chars: bool = True', 'strip_accents': 'strip_accents: Optional[bool
+ ] = None'
+}, model_name='BertTokenizer', library='transformers', import_path='transformers.models.bert'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 151552', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 13696', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 40', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 2', 'head_dim': 'head_dim: Optional[int
+ ] = 128', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 131072', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[float
+ ] = 1.5625e-07', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 151329', 'eos_token_id': 'eos_token_id: Optional[list[int
+ ]
+ ] = [
+ 151329,
+ 151336,
+ 151338
+ ]', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = True'
+}, model_name='GlmModel', library='transformers', import_path='transformers.models.glm'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'args': '*args'
+}, model_name='TokenizersBackend', library='transformers', import_path='transformers'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 151552', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 13696', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 40', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 2', 'head_dim': 'head_dim: Optional[int
+ ] = 128', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 131072', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[float
+ ] = 1.5625e-07', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 151329', 'eos_token_id': 'eos_token_id: Optional[list[int
+ ]
+ ] = [
+ 151329,
+ 151336,
+ 151338
+ ]', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = True'
+}, model_name='Glm4Model', library='transformers', import_path='transformers.models.glm4'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'args': '*args'
+}, model_name='TokenizersBackend', library='transformers', import_path='transformers'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'vision_config': 'vision_config=None', 'image_token_id': 'image_token_id=151343', 'video_token_id': 'video_token_id=151344', 'image_start_token_id': 'image_start_token_id=151339', 'image_end_token_id': 'image_end_token_id=151340', 'video_start_token_id': 'video_start_token_id=151361', 'video_end_token_id': 'video_end_token_id=151362'
+}, model_name='Glm46VModel', library='transformers', import_path='transformers.models.glm46v'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 151552', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 10944', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 46', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 96', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 8', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 131072', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-05', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'moe_intermediate_size': 'moe_intermediate_size: Optional[int
+ ] = 1408', 'num_experts_per_tok': 'num_experts_per_tok: Optional[int
+ ] = 8', 'n_shared_experts': 'n_shared_experts: Optional[int
+ ] = 1', 'n_routed_experts': 'n_routed_experts: Optional[int
+ ] = 128', 'routed_scaling_factor': 'routed_scaling_factor: Optional[float
+ ] = 1.0', 'n_group': 'n_group: Optional[int
+ ] = 1', 'topk_group': 'topk_group: Optional[int
+ ] = 1', 'first_k_dense_replace': 'first_k_dense_replace: Optional[int
+ ] = 1', 'norm_topk_prob': 'norm_topk_prob: Optional[bool
+ ] = True', 'use_qk_norm': 'use_qk_norm: Optional[bool
+ ] = False'
+}, model_name='Glm4MoeModel', library='transformers', import_path='transformers.models.glm4_moe'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'args': '*args'
+}, model_name='TokenizersBackend', library='transformers', import_path='transformers'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'vision_config': 'vision_config=None', 'image_token_id': 'image_token_id=151343', 'video_token_id': 'video_token_id=151344', 'image_start_token_id': 'image_start_token_id=151339', 'image_end_token_id': 'image_end_token_id=151340', 'video_start_token_id': 'video_start_token_id=151341', 'video_end_token_id': 'video_end_token_id=151342'
+}, model_name='Glm4vModel', library='transformers', import_path='transformers.models.glm4v'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'args': '*args'
+}, model_name='TokenizersBackend', library='transformers', import_path='transformers'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'vision_config': 'vision_config=None', 'image_token_id': 'image_token_id=151363', 'video_token_id': 'video_token_id=151364', 'image_start_token_id': 'image_start_token_id=151339', 'image_end_token_id': 'image_end_token_id=151340', 'video_start_token_id': 'video_start_token_id=151341', 'video_end_token_id': 'video_end_token_id=151342'
+}, model_name='Glm4vMoeModel', library='transformers', import_path='transformers.models.glm4v_moe'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'args': '*args'
+}, model_name='TokenizersBackend', library='transformers', import_path='transformers'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 151424', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 10944', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 46', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 96', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 8', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 65536', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-05', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = True', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'moe_intermediate_size': 'moe_intermediate_size: Optional[int
+ ] = 1408', 'num_experts_per_tok': 'num_experts_per_tok: Optional[int
+ ] = 8', 'n_shared_experts': 'n_shared_experts: Optional[int
+ ] = 1', 'n_routed_experts': 'n_routed_experts: Optional[int
+ ] = 128', 'routed_scaling_factor': 'routed_scaling_factor: Optional[float
+ ] = 1.0', 'n_group': 'n_group: Optional[int
+ ] = 1', 'topk_group': 'topk_group: Optional[int
+ ] = 1', 'first_k_dense_replace': 'first_k_dense_replace: Optional[int
+ ] = 1', 'norm_topk_prob': 'norm_topk_prob: Optional[bool
+ ] = True', 'router_aux_loss_coef': 'router_aux_loss_coef: Optional[float
+ ] = 0.0001'
+}, model_name='Glm4vMoeTextModel', library='transformers', import_path='transformers.models.glm4v_moe'), ModelAttributes(model=, model_type='model', model_parameters={'depth': 'depth=24', 'hidden_size': 'hidden_size=1536', 'hidden_act': "hidden_act='silu'", 'attention_bias': 'attention_bias=False', 'attention_dropout': 'attention_dropout=0.0', 'num_heads': 'num_heads=12', 'in_channels': 'in_channels=3', 'image_size': 'image_size=336', 'patch_size': 'patch_size=14', 'rms_norm_eps': 'rms_norm_eps=1e-05', 'spatial_merge_size': 'spatial_merge_size=2', 'temporal_patch_size': 'temporal_patch_size=2', 'out_hidden_size': 'out_hidden_size=4096', 'intermediate_size': 'intermediate_size=13696', 'initializer_range': 'initializer_range=0.02'
+}, model_name='Glm4vMoeVisionModel', library='transformers', import_path='transformers.models.glm4v_moe'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 151552', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 13696', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 40', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 2', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 32768', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-05', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None'
+}, model_name='Glm4vTextModel', library='transformers', import_path='transformers.models.glm4v'), ModelAttributes(model=, model_type='model', model_parameters={'depth': 'depth=24', 'hidden_size': 'hidden_size=1536', 'hidden_act': "hidden_act='silu'", 'attention_bias': 'attention_bias=False', 'attention_dropout': 'attention_dropout=0.0', 'num_heads': 'num_heads=12', 'in_channels': 'in_channels=3', 'image_size': 'image_size=336', 'patch_size': 'patch_size=14', 'rms_norm_eps': 'rms_norm_eps=1e-05', 'spatial_merge_size': 'spatial_merge_size=2', 'temporal_patch_size': 'temporal_patch_size=2', 'out_hidden_size': 'out_hidden_size=4096', 'intermediate_size': 'intermediate_size=13696', 'initializer_range': 'initializer_range=0.02'
+}, model_name='Glm4vVisionModel', library='transformers', import_path='transformers.models.glm4v'), ModelAttributes(model=, model_type='model', model_parameters={'audio_config': 'audio_config=None', 'text_config': 'text_config=None', 'audio_token_id': 'audio_token_id=59260', 'projector_hidden_act': "projector_hidden_act='gelu'"
+}, model_name='GlmAsrForConditionalGeneration', library='transformers', import_path='transformers.models.glmasr'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'args': '*args'
+}, model_name='TokenizersBackend', library='transformers', import_path='transformers'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=1280', 'intermediate_size': 'intermediate_size=5120', 'num_hidden_layers': 'num_hidden_layers=32', 'num_attention_heads': 'num_attention_heads=20', 'num_key_value_heads': 'num_key_value_heads=None', 'hidden_act': "hidden_act='gelu'", 'max_position_embeddings': 'max_position_embeddings=1500', 'initializer_range': 'initializer_range=0.02', 'rope_parameters': 'rope_parameters=None', 'attention_dropout': 'attention_dropout=0.0', 'num_mel_bins': 'num_mel_bins=128'
+}, model_name='GlmAsrEncoder', library='transformers', import_path='transformers.models.glmasr'), ModelAttributes(model=, model_type='model', model_parameters={'num_channels': 'num_channels=3', 'num_encoder_blocks': 'num_encoder_blocks=4', 'depths': 'depths=[
+ 2,
+ 2,
+ 2,
+ 2
+ ]', 'sr_ratios': 'sr_ratios=[
+ 8,
+ 4,
+ 2,
+ 1
+ ]', 'hidden_sizes': 'hidden_sizes=[
+ 32,
+ 64,
+ 160,
+ 256
+ ]', 'patch_sizes': 'patch_sizes=[
+ 7,
+ 3,
+ 3,
+ 3
+ ]', 'strides': 'strides=[
+ 4,
+ 2,
+ 2,
+ 2
+ ]', 'num_attention_heads': 'num_attention_heads=[
+ 1,
+ 2,
+ 5,
+ 8
+ ]', 'mlp_ratios': 'mlp_ratios=[
+ 4,
+ 4,
+ 4,
+ 4
+ ]', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.0', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.0', 'initializer_range': 'initializer_range=0.02', 'drop_path_rate': 'drop_path_rate=0.1', 'layer_norm_eps': 'layer_norm_eps=1e-06', 'decoder_hidden_size': 'decoder_hidden_size=64', 'max_depth': 'max_depth=10', 'head_in_index': 'head_in_index=-1'
+}, model_name='GLPNModel', library='transformers', import_path='transformers.models.glpn'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config: Optional[dict
+ ] = None', 'text_config': 'text_config: Optional[dict
+ ] = None', 'image_token_index': 'image_token_index: Optional[int
+ ] = 151859', 'image_seq_length': 'image_seq_length: Optional[int
+ ] = 576', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = -1'
+}, model_name='GotOcr2Model', library='transformers', import_path='transformers.models.got_ocr2'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'args': '*args'
+}, model_name='TokenizersBackend', library='transformers', import_path='transformers'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=50257', 'n_positions': 'n_positions=1024', 'n_embd': 'n_embd=768', 'n_layer': 'n_layer=12', 'n_head': 'n_head=12', 'n_inner': 'n_inner=None', 'activation_function': "activation_function='gelu_new'", 'resid_pdrop': 'resid_pdrop=0.1', 'embd_pdrop': 'embd_pdrop=0.1', 'attn_pdrop': 'attn_pdrop=0.1', 'layer_norm_epsilon': 'layer_norm_epsilon=1e-05', 'initializer_range': 'initializer_range=0.02', 'summary_type': "summary_type='cls_index'", 'summary_use_proj': 'summary_use_proj=True', 'summary_activation': 'summary_activation=None', 'summary_proj_to_labels': 'summary_proj_to_labels=True', 'summary_first_dropout': 'summary_first_dropout=0.1', 'scale_attn_weights': 'scale_attn_weights=True', 'bos_token_id': 'bos_token_id=50256', 'eos_token_id': 'eos_token_id=50256', 'scale_attn_by_inverse_layer_idx': 'scale_attn_by_inverse_layer_idx=False', 'reorder_and_upcast_attn': 'reorder_and_upcast_attn=False'
+}, model_name='GPT2Model', library='transformers', import_path='transformers.models.gpt2'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'bos_token': "bos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'eos_token': "eos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'pad_token': 'pad_token: Union[tokenizers.AddedToken, str, NoneType
+ ] = None', 'add_prefix_space': 'add_prefix_space=False'
+}, model_name='GPT2Tokenizer', library='transformers', import_path='transformers.models.gpt2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=50257', 'n_positions': 'n_positions=1024', 'n_embd': 'n_embd=768', 'n_layer': 'n_layer=12', 'n_head': 'n_head=12', 'n_inner': 'n_inner=None', 'activation_function': "activation_function='gelu_new'", 'resid_pdrop': 'resid_pdrop=0.1', 'embd_pdrop': 'embd_pdrop=0.1', 'attn_pdrop': 'attn_pdrop=0.1', 'layer_norm_epsilon': 'layer_norm_epsilon=1e-05', 'initializer_range': 'initializer_range=0.02', 'summary_type': "summary_type='cls_index'", 'summary_use_proj': 'summary_use_proj=True', 'summary_activation': 'summary_activation=None', 'summary_proj_to_labels': 'summary_proj_to_labels=True', 'summary_first_dropout': 'summary_first_dropout=0.1', 'scale_attn_weights': 'scale_attn_weights=True', 'bos_token_id': 'bos_token_id=50256', 'eos_token_id': 'eos_token_id=50256', 'scale_attn_by_inverse_layer_idx': 'scale_attn_by_inverse_layer_idx=False', 'reorder_and_upcast_attn': 'reorder_and_upcast_attn=False'
+}, model_name='GPT2Model', library='transformers', import_path='transformers.models.gpt2'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'bos_token': "bos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'eos_token': "eos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'pad_token': 'pad_token: Union[tokenizers.AddedToken, str, NoneType
+ ] = None', 'add_prefix_space': 'add_prefix_space=False'
+}, model_name='GPT2Tokenizer', library='transformers', import_path='transformers.models.gpt2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=50257', 'n_positions': 'n_positions=1024', 'n_embd': 'n_embd=768', 'n_layer': 'n_layer=12', 'n_head': 'n_head=12', 'n_inner': 'n_inner=None', 'activation_function': "activation_function='gelu_pytorch_tanh'", 'resid_pdrop': 'resid_pdrop=0.1', 'embd_pdrop': 'embd_pdrop=0.1', 'attn_pdrop': 'attn_pdrop=0.1', 'layer_norm_epsilon': 'layer_norm_epsilon=1e-05', 'initializer_range': 'initializer_range=0.02', 'scale_attn_weights': 'scale_attn_weights=True', 'bos_token_id': 'bos_token_id=50256', 'eos_token_id': 'eos_token_id=50256', 'attention_softmax_in_fp32': 'attention_softmax_in_fp32=True', 'scale_attention_softmax_in_fp32': 'scale_attention_softmax_in_fp32=True', 'multi_query': 'multi_query=True'
+}, model_name='GPTBigCodeModel', library='transformers', import_path='transformers.models.gpt_bigcode'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'bos_token': "bos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'eos_token': "eos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'pad_token': 'pad_token: Union[tokenizers.AddedToken, str, NoneType
+ ] = None', 'add_prefix_space': 'add_prefix_space=False'
+}, model_name='GPT2Tokenizer', library='transformers', import_path='transformers.models.gpt2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=50257', 'max_position_embeddings': 'max_position_embeddings=2048', 'hidden_size': 'hidden_size=2048', 'num_layers': 'num_layers=24', 'attention_types': "attention_types=[[['global', 'local'], 12]]", 'num_heads': 'num_heads=16', 'intermediate_size': 'intermediate_size=None', 'window_size': 'window_size=256', 'activation_function': "activation_function='gelu_new'", 'resid_dropout': 'resid_dropout=0.0', 'embed_dropout': 'embed_dropout=0.0', 'attention_dropout': 'attention_dropout=0.0', 'classifier_dropout': 'classifier_dropout=0.1', 'layer_norm_epsilon': 'layer_norm_epsilon=1e-05', 'initializer_range': 'initializer_range=0.02', 'bos_token_id': 'bos_token_id=50256', 'eos_token_id': 'eos_token_id=50256'
+}, model_name='GPTNeoModel', library='transformers', import_path='transformers.models.gpt_neo'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'bos_token': "bos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'eos_token': "eos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'pad_token': 'pad_token: Union[tokenizers.AddedToken, str, NoneType
+ ] = None', 'add_prefix_space': 'add_prefix_space=False'
+}, model_name='GPT2Tokenizer', library='transformers', import_path='transformers.models.gpt2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 50432', 'hidden_size': 'hidden_size: Optional[int
+ ] = 6144', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 44', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 64', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 24576', 'hidden_act': "hidden_act: Optional[str] = 'gelu'", 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'hidden_dropout': 'hidden_dropout: Optional[float
+ ] = 0.0', 'classifier_dropout': 'classifier_dropout: Optional[float
+ ] = 0.1', 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 2048', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'layer_norm_eps': 'layer_norm_eps: Optional[int
+ ] = 1e-05', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 0', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'use_parallel_residual': 'use_parallel_residual: Optional[bool
+ ] = True', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = True'
+}, model_name='GPTNeoXModel', library='transformers', import_path='transformers.models.gpt_neox'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': "bos_token: str = '<|endoftext|>'", 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|padding|>'", 'add_prefix_space': 'add_prefix_space: bool = False', 'trim_offsets': 'trim_offsets: bool = True'
+}, model_name='GPTNeoXTokenizer', library='transformers', import_path='transformers.models.gpt_neox'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 32000', 'hidden_size': 'hidden_size: Optional[int
+ ] = 2560', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'intermediate_multiple_size': 'intermediate_multiple_size: Optional[int
+ ] = 4', 'hidden_act': "hidden_act: Optional[str] = 'gelu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 2048', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'layer_norm_eps': 'layer_norm_eps: Optional[int
+ ] = 1e-05', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 31996', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 31999', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.1', 'hidden_dropout': 'hidden_dropout: Optional[float
+ ] = 0.0'
+}, model_name='GPTNeoXJapaneseModel', library='transformers', import_path='transformers.models.gpt_neox_japanese'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab_file': 'vocab_file', 'emoji_file': 'emoji_file', 'unk_token': "unk_token='<|endoftext|>'", 'pad_token': "pad_token='<|endoftext|>'", 'bos_token': "bos_token='<|startoftext|>'", 'eos_token': "eos_token='<|endoftext|>'", 'do_clean_text': 'do_clean_text=False'
+}, model_name='GPTNeoXJapaneseTokenizer', library='transformers', import_path='transformers.models.gpt_neox_japanese'), ModelAttributes(model=, model_type='model', model_parameters={'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 36', 'num_local_experts': 'num_local_experts: Optional[int
+ ] = 128', 'vocab_size': 'vocab_size: Optional[int
+ ] = 201088', 'hidden_size': 'hidden_size: Optional[int
+ ] = 2880', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 2880', 'head_dim': 'head_dim: Optional[int
+ ] = 64', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 64', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 8', 'sliding_window': 'sliding_window: Optional[int
+ ] = 128', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 131072', 'rms_norm_eps': 'rms_norm_eps: Optional[float
+ ] = 1e-05', 'rope_parameters': "rope_parameters: Optional[transformers.modeling_rope_utils.RopeParameters] = {'rope_type': 'yarn', 'factor': 32.0, 'beta_fast': 32.0, 'beta_slow': 1.0, 'truncate': False, 'original_max_position_embeddings': 4096}", 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'num_experts_per_tok': 'num_experts_per_tok: Optional[int
+ ] = 4', 'router_aux_loss_coef': 'router_aux_loss_coef: Optional[float
+ ] = 0.9', 'output_router_logits': 'output_router_logits: Optional[bool
+ ] = False', 'layer_types': 'layer_types: Optional[list[str
+ ]
+ ] = None'
+}, model_name='GptOssModel', library='transformers', import_path='transformers.models.gpt_oss'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'args': '*args'
+}, model_name='TokenizersBackend', library='transformers', import_path='transformers'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=50400', 'n_positions': 'n_positions=2048', 'n_embd': 'n_embd=4096', 'n_layer': 'n_layer=28', 'n_head': 'n_head=16', 'rotary_dim': 'rotary_dim=64', 'n_inner': 'n_inner=None', 'activation_function': "activation_function='gelu_new'", 'resid_pdrop': 'resid_pdrop=0.0', 'embd_pdrop': 'embd_pdrop=0.0', 'attn_pdrop': 'attn_pdrop=0.0', 'layer_norm_epsilon': 'layer_norm_epsilon=1e-05', 'initializer_range': 'initializer_range=0.02', 'bos_token_id': 'bos_token_id=50256', 'eos_token_id': 'eos_token_id=50256', 'tie_word_embeddings': 'tie_word_embeddings=False'
+}, model_name='GPTJModel', library='transformers', import_path='transformers.models.gptj'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'bos_token': "bos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'eos_token': "eos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'pad_token': 'pad_token: Union[tokenizers.AddedToken, str, NoneType
+ ] = None', 'add_prefix_space': 'add_prefix_space=False'
+}, model_name='GPT2Tokenizer', library='transformers', import_path='transformers.models.gpt2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 32000', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 11008', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 2048', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-06', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = None', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'mlp_bias': 'mlp_bias: Optional[bool
+ ] = False', 'embedding_multiplier': 'embedding_multiplier: Optional[float
+ ] = 1.0', 'logits_scaling': 'logits_scaling: Optional[float
+ ] = 1.0', 'residual_multiplier': 'residual_multiplier: Optional[float
+ ] = 1.0', 'attention_multiplier': 'attention_multiplier: Optional[float
+ ] = 1.0'
+}, model_name='GraniteModel', library='transformers', import_path='transformers.models.granite'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'bos_token': "bos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'eos_token': "eos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'pad_token': 'pad_token: Union[tokenizers.AddedToken, str, NoneType
+ ] = None', 'add_prefix_space': 'add_prefix_space=False'
+}, model_name='GPT2Tokenizer', library='transformers', import_path='transformers.models.gpt2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 32000', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 11008', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 2048', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-06', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = None', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'embedding_multiplier': 'embedding_multiplier: Optional[float
+ ] = 1.0', 'logits_scaling': 'logits_scaling: Optional[float
+ ] = 1.0', 'residual_multiplier': 'residual_multiplier: Optional[float
+ ] = 1.0', 'attention_multiplier': 'attention_multiplier: Optional[float
+ ] = 1.0', 'num_local_experts': 'num_local_experts: Optional[int
+ ] = 8', 'num_experts_per_tok': 'num_experts_per_tok: Optional[int
+ ] = 2', 'output_router_logits': 'output_router_logits: Optional[bool
+ ] = False', 'router_aux_loss_coef': 'router_aux_loss_coef: Optional[float
+ ] = 0.001'
+}, model_name='GraniteMoeModel', library='transformers', import_path='transformers.models.granitemoe'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'bos_token': "bos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'eos_token': "eos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'pad_token': 'pad_token: Union[tokenizers.AddedToken, str, NoneType
+ ] = None', 'add_prefix_space': 'add_prefix_space=False'
+}, model_name='GPT2Tokenizer', library='transformers', import_path='transformers.models.gpt2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 32000', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 11008', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 2048', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-06', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = None', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'embedding_multiplier': 'embedding_multiplier: Optional[float
+ ] = 1.0', 'logits_scaling': 'logits_scaling: Optional[float
+ ] = 1.0', 'residual_multiplier': 'residual_multiplier: Optional[float
+ ] = 1.0', 'attention_multiplier': 'attention_multiplier: Optional[float
+ ] = 1.0', 'num_local_experts': 'num_local_experts: Optional[int
+ ] = 8', 'num_experts_per_tok': 'num_experts_per_tok: Optional[int
+ ] = 2', 'output_router_logits': 'output_router_logits: Optional[bool
+ ] = False', 'router_aux_loss_coef': 'router_aux_loss_coef: Optional[float
+ ] = 0.001', 'shared_intermediate_size': 'shared_intermediate_size: Optional[int
+ ] = 1024', 'position_embedding_type': 'position_embedding_type: Optional[str
+ ] = None', 'layer_types': 'layer_types: Optional[list[str
+ ]
+ ] = None', 'mamba_n_heads': 'mamba_n_heads: Optional[int
+ ] = 128', 'mamba_n_groups': 'mamba_n_groups: Optional[int
+ ] = 1', 'mamba_d_state': 'mamba_d_state: Optional[int
+ ] = 256', 'mamba_d_head': "mamba_d_head: Optional[str] = 'auto'", 'mamba_d_conv': 'mamba_d_conv: Optional[int
+ ] = 4', 'mamba_expand': 'mamba_expand: Optional[int
+ ] = 2', 'mamba_chunk_size': 'mamba_chunk_size: Optional[int
+ ] = 256', 'mamba_conv_bias': 'mamba_conv_bias: Optional[bool
+ ] = True', 'mamba_proj_bias': 'mamba_proj_bias: Optional[bool
+ ] = False'
+}, model_name='GraniteMoeHybridModel', library='transformers', import_path='transformers.models.granitemoehybrid'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'bos_token': "bos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'eos_token': "eos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'pad_token': 'pad_token: Union[tokenizers.AddedToken, str, NoneType
+ ] = None', 'add_prefix_space': 'add_prefix_space=False'
+}, model_name='GPT2Tokenizer', library='transformers', import_path='transformers.models.gpt2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 32000', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 11008', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 2048', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-06', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = None', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'embedding_multiplier': 'embedding_multiplier: Optional[float
+ ] = 1.0', 'logits_scaling': 'logits_scaling: Optional[float
+ ] = 1.0', 'residual_multiplier': 'residual_multiplier: Optional[float
+ ] = 1.0', 'attention_multiplier': 'attention_multiplier: Optional[float
+ ] = 1.0', 'num_local_experts': 'num_local_experts: Optional[int
+ ] = 8', 'num_experts_per_tok': 'num_experts_per_tok: Optional[int
+ ] = 2', 'output_router_logits': 'output_router_logits: Optional[bool
+ ] = False', 'router_aux_loss_coef': 'router_aux_loss_coef: Optional[float
+ ] = 0.001', 'shared_intermediate_size': 'shared_intermediate_size: Optional[int
+ ] = 0'
+}, model_name='GraniteMoeSharedModel', library='transformers', import_path='transformers.models.granitemoeshared'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'bos_token': "bos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'eos_token': "eos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'pad_token': 'pad_token: Union[tokenizers.AddedToken, str, NoneType
+ ] = None', 'add_prefix_space': 'add_prefix_space=False'
+}, model_name='GPT2Tokenizer', library='transformers', import_path='transformers.models.gpt2'), ModelAttributes(model=, model_type='model', model_parameters={'backbone_config': 'backbone_config=None', 'backbone': 'backbone=None', 'use_pretrained_backbone': 'use_pretrained_backbone=False', 'use_timm_backbone': 'use_timm_backbone=False', 'backbone_kwargs': 'backbone_kwargs=None', 'text_config': 'text_config=None', 'num_queries': 'num_queries=900', 'encoder_layers': 'encoder_layers=6', 'encoder_ffn_dim': 'encoder_ffn_dim=2048', 'encoder_attention_heads': 'encoder_attention_heads=8', 'decoder_layers': 'decoder_layers=6', 'decoder_ffn_dim': 'decoder_ffn_dim=2048', 'decoder_attention_heads': 'decoder_attention_heads=8', 'is_encoder_decoder': 'is_encoder_decoder=True', 'activation_function': "activation_function='relu'", 'd_model': 'd_model=256', 'dropout': 'dropout=0.1', 'attention_dropout': 'attention_dropout=0.0', 'activation_dropout': 'activation_dropout=0.0', 'auxiliary_loss': 'auxiliary_loss=False', 'position_embedding_type': "position_embedding_type='sine'", 'num_feature_levels': 'num_feature_levels=4', 'encoder_n_points': 'encoder_n_points=4', 'decoder_n_points': 'decoder_n_points=4', 'two_stage': 'two_stage=True', 'class_cost': 'class_cost=1.0', 'bbox_cost': 'bbox_cost=5.0', 'giou_cost': 'giou_cost=2.0', 'bbox_loss_coefficient': 'bbox_loss_coefficient=5.0', 'giou_loss_coefficient': 'giou_loss_coefficient=2.0', 'focal_alpha': 'focal_alpha=0.25', 'disable_custom_kernels': 'disable_custom_kernels=False', 'max_text_len': 'max_text_len=256', 'text_enhancer_dropout': 'text_enhancer_dropout=0.0', 'fusion_droppath': 'fusion_droppath=0.1', 'fusion_dropout': 'fusion_dropout=0.0', 'embedding_init_target': 'embedding_init_target=True', 'query_dim': 'query_dim=4', 'decoder_bbox_embed_share': 'decoder_bbox_embed_share=True', 'two_stage_bbox_embed_share': 'two_stage_bbox_embed_share=False', 'positional_embedding_temperature': 'positional_embedding_temperature=20', 'init_std': 'init_std=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-05'
+}, model_name='GroundingDinoModel', library='transformers', import_path='transformers.models.grounding_dino'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'do_lower_case': 'do_lower_case: bool = False', 'unk_token': "unk_token: str = '[UNK]'", 'sep_token': "sep_token: str = '[SEP]'", 'pad_token': "pad_token: str = '[PAD]'", 'cls_token': "cls_token: str = '[CLS]'", 'mask_token': "mask_token: str = '[MASK]'", 'tokenize_chinese_chars': 'tokenize_chinese_chars: bool = True', 'strip_accents': 'strip_accents: Optional[bool
+ ] = None'
+}, model_name='BertTokenizer', library='transformers', import_path='transformers.models.bert'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'vision_config': 'vision_config=None', 'projection_dim': 'projection_dim=256', 'projection_intermediate_dim': 'projection_intermediate_dim=4096', 'logit_scale_init_value': 'logit_scale_init_value=2.6592'
+}, model_name='GroupViTModel', library='transformers', import_path='transformers.models.groupvit'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': "bos_token: str = '<|startoftext|>'", 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|endoftext|>'"
+}, model_name='CLIPTokenizer', library='transformers', import_path='transformers.models.clip'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 48000', 'hidden_size': 'hidden_size: Optional[int
+ ] = 2560', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 7040', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 24', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 20', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 20', 'head_dim': 'head_dim: Optional[int
+ ] = 128', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 4096', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-08', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 3', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'mlp_bias': 'mlp_bias: Optional[bool
+ ] = False'
+}, model_name='HeliumModel', library='transformers', import_path='transformers.models.helium'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'args': '*args'
+}, model_name='TokenizersBackend', library='transformers', import_path='transformers'), ModelAttributes(model=, model_type='model', model_parameters={'num_channels': 'num_channels=3', 'embedding_size': 'embedding_size=64', 'depths': 'depths=[
+ 3,
+ 4,
+ 6,
+ 3
+ ]', 'hidden_sizes': 'hidden_sizes=[
+ 256,
+ 512,
+ 1024,
+ 2048
+ ]', 'hidden_act': "hidden_act='relu'", 'out_features': 'out_features=None', 'out_indices': 'out_indices=None', 'stem_channels': 'stem_channels=[
+ 3,
+ 32,
+ 48
+ ]', 'stage_in_channels': 'stage_in_channels=[
+ 48,
+ 128,
+ 512,
+ 1024
+ ]', 'stage_mid_channels': 'stage_mid_channels=[
+ 48,
+ 96,
+ 192,
+ 384
+ ]', 'stage_out_channels': 'stage_out_channels=[
+ 128,
+ 512,
+ 1024,
+ 2048
+ ]', 'stage_num_blocks': 'stage_num_blocks=[
+ 1,
+ 1,
+ 3,
+ 1
+ ]', 'stage_downsample': 'stage_downsample=[False, True, True, True
+ ]', 'stage_light_block': 'stage_light_block=[False, False, True, True
+ ]', 'stage_kernel_size': 'stage_kernel_size=[
+ 3,
+ 3,
+ 5,
+ 5
+ ]', 'stage_numb_of_layers': 'stage_numb_of_layers=[
+ 6,
+ 6,
+ 6,
+ 6
+ ]', 'use_learnable_affine_block': 'use_learnable_affine_block=False', 'initializer_range': 'initializer_range=0.02'
+}, model_name='HGNetV2Backbone', library='transformers', import_path='transformers.models.hgnet_v2'), ModelAttributes(model=, model_type='model', model_parameters={'embed_dim': 'embed_dim=96', 'image_size': 'image_size=[
+ 224,
+ 224
+ ]', 'patch_size': 'patch_size=[
+ 7,
+ 7
+ ]', 'patch_stride': 'patch_stride=[
+ 4,
+ 4
+ ]', 'patch_padding': 'patch_padding=[
+ 3,
+ 3
+ ]', 'mlp_ratio': 'mlp_ratio=4.0', 'depths': 'depths=[
+ 2,
+ 3,
+ 16,
+ 3
+ ]', 'num_heads': 'num_heads=[
+ 1,
+ 2,
+ 4,
+ 8
+ ]', 'embed_dim_multiplier': 'embed_dim_multiplier=2.0', 'num_query_pool': 'num_query_pool=3', 'query_stride': 'query_stride=[
+ 2,
+ 2
+ ]', 'masked_unit_size': 'masked_unit_size=[
+ 8,
+ 8
+ ]', 'masked_unit_attention': 'masked_unit_attention=[True, True, False, False
+ ]', 'drop_path_rate': 'drop_path_rate=0.0', 'num_channels': 'num_channels=3', 'hidden_act': "hidden_act='gelu'", 'initializer_range': 'initializer_range=0.02', 'layer_norm_init': 'layer_norm_init=1.0', 'layer_norm_eps': 'layer_norm_eps=1e-06', 'decoder_hidden_size': 'decoder_hidden_size=None', 'decoder_depth': 'decoder_depth=None', 'decoder_num_heads': 'decoder_num_heads=None', 'normalize_pixel_loss': 'normalize_pixel_loss=True', 'mask_ratio': 'mask_ratio=0.6', 'out_features': 'out_features=None', 'out_indices': 'out_indices=None'
+}, model_name='HieraModel', library='transformers', import_path='transformers.models.hiera'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=32', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout': 'hidden_dropout=0.1', 'activation_dropout': 'activation_dropout=0.1', 'attention_dropout': 'attention_dropout=0.1', 'feat_proj_layer_norm': 'feat_proj_layer_norm=True', 'feat_proj_dropout': 'feat_proj_dropout=0.0', 'final_dropout': 'final_dropout=0.1', 'layerdrop': 'layerdrop=0.1', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'feat_extract_norm': "feat_extract_norm='group'", 'feat_extract_activation': "feat_extract_activation='gelu'", 'conv_dim': 'conv_dim=(512,
+ 512,
+ 512,
+ 512,
+ 512,
+ 512,
+ 512)', 'conv_stride': 'conv_stride=(5,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2)', 'conv_kernel': 'conv_kernel=(10,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2)', 'conv_bias': 'conv_bias=False', 'num_conv_pos_embeddings': 'num_conv_pos_embeddings=128', 'num_conv_pos_embedding_groups': 'num_conv_pos_embedding_groups=16', 'conv_pos_batch_norm': 'conv_pos_batch_norm=False', 'do_stable_layer_norm': 'do_stable_layer_norm=False', 'apply_spec_augment': 'apply_spec_augment=True', 'mask_time_prob': 'mask_time_prob=0.05', 'mask_time_length': 'mask_time_length=10', 'mask_time_min_masks': 'mask_time_min_masks=2', 'mask_feature_prob': 'mask_feature_prob=0.0', 'mask_feature_length': 'mask_feature_length=10', 'mask_feature_min_masks': 'mask_feature_min_masks=0', 'ctc_loss_reduction': "ctc_loss_reduction='sum'", 'ctc_zero_infinity': 'ctc_zero_infinity=False', 'use_weighted_layer_sum': 'use_weighted_layer_sum=False', 'classifier_proj_size': 'classifier_proj_size=256', 'pad_token_id': 'pad_token_id=0', 'bos_token_id': 'bos_token_id=1', 'eos_token_id': 'eos_token_id=2'
+}, model_name='HubertModel', library='transformers', import_path='transformers.models.hubert'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab_file': 'vocab_file', 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'unk_token': "unk_token=''", 'pad_token': "pad_token=''", 'word_delimiter_token': "word_delimiter_token='|'", 'replace_word_delimiter_char': "replace_word_delimiter_char=' '", 'do_lower_case': 'do_lower_case=False', 'target_lang': 'target_lang=None'
+}, model_name='Wav2Vec2CTCTokenizer', library='transformers', import_path='transformers.models.wav2vec2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 290943', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 11008', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 2048', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[float
+ ] = 1e-05', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 0', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2', 'eod_token_id': 'eod_token_id: Optional[int
+ ] = 3', 'pretraining_tp': 'pretraining_tp: Optional[int
+ ] = 1', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'head_dim': 'head_dim: Optional[int
+ ] = None'
+}, model_name='HunYuanDenseV1Model', library='transformers', import_path='transformers.models.hunyuan_v1_dense'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 290943', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 11008', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 2048', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[float
+ ] = 1e-05', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 0', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2', 'eod_token_id': 'eod_token_id: Optional[int
+ ] = 3', 'sep_token_id': 'sep_token_id: Optional[int
+ ] = 4', 'pretraining_tp': 'pretraining_tp: Optional[int
+ ] = 1', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'num_experts': 'num_experts: Union[int, list
+ ] = 1', 'moe_topk': 'moe_topk: Union[int, list
+ ] = 1', 'head_dim': 'head_dim: Optional[int
+ ] = None'
+}, model_name='HunYuanMoEV1Model', library='transformers', import_path='transformers.models.hunyuan_v1_moe'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=30522', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=2', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'pad_token_id': 'pad_token_id=1', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=2', 'quant_mode': 'quant_mode=False', 'force_dequant': "force_dequant='none'"
+}, model_name='IBertModel', library='transformers', import_path='transformers.models.ibert'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = ''", 'sep_token': "sep_token: str = ''", 'cls_token': "cls_token: str = ''", 'unk_token': "unk_token: str = ''", 'pad_token': "pad_token: str = ''", 'mask_token': "mask_token: str = ''", 'add_prefix_space': 'add_prefix_space: bool = False', 'trim_offsets': 'trim_offsets: bool = True'
+}, model_name='RobertaTokenizer', library='transformers', import_path='transformers.models.roberta'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=32000', 'additional_vocab_size': 'additional_vocab_size=0', 'hidden_size': 'hidden_size=4096', 'intermediate_size': 'intermediate_size=11008', 'num_hidden_layers': 'num_hidden_layers=32', 'num_attention_heads': 'num_attention_heads=32', 'dropout': 'dropout=0.0', 'hidden_act': "hidden_act='silu'", 'initializer_range': 'initializer_range=0.02', 'alpha_initializer': "alpha_initializer='zeros'", 'alphas_initializer_range': 'alphas_initializer_range=0.0', 'alpha_type': "alpha_type='float'", 'rms_norm_eps': 'rms_norm_eps=1e-06', 'pad_token_id': 'pad_token_id=0', 'bos_token_id': 'bos_token_id=1', 'eos_token_id': 'eos_token_id=2', 'tie_word_embeddings': 'tie_word_embeddings=False', 'cross_layer_interval': 'cross_layer_interval=1', 'qk_layer_norms': 'qk_layer_norms=False', 'freeze_text_layers': 'freeze_text_layers=True', 'freeze_text_module_exceptions': 'freeze_text_module_exceptions=[]', 'freeze_lm_head': 'freeze_lm_head=False', 'freeze_vision_layers': 'freeze_vision_layers=True', 'freeze_vision_module_exceptions': 'freeze_vision_module_exceptions=[]', 'use_resampler': 'use_resampler=False', 'vision_config': 'vision_config=None', 'perceiver_config': 'perceiver_config=None'
+}, model_name='IdeficsModel', library='transformers', import_path='transformers.models.idefics'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'image_token_id': 'image_token_id=32001', 'tie_word_embeddings': 'tie_word_embeddings=False', 'vision_config': 'vision_config=None', 'perceiver_config': 'perceiver_config=None', 'text_config': 'text_config=None'
+}, model_name='Idefics2Model', library='transformers', import_path='transformers.models.idefics2'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'image_token_id': 'image_token_id=128257', 'tie_word_embeddings': 'tie_word_embeddings=False', 'vision_config': 'vision_config=None', 'text_config': 'text_config=None', 'scale_factor': 'scale_factor=2', 'pad_token_id': 'pad_token_id=128002'
+}, model_name='Idefics3Model', library='transformers', import_path='transformers.models.idefics3'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=1152', 'intermediate_size': 'intermediate_size=3072', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=16', 'num_channels': 'num_channels=3', 'image_size': 'image_size=224', 'patch_size': 'patch_size=32', 'hidden_act': "hidden_act='gelu_pytorch_tanh'", 'layer_norm_eps': 'layer_norm_eps=1e-06', 'attention_dropout': 'attention_dropout=0.0', 'initializer_range': 'initializer_range=0.02'
+}, model_name='Idefics3VisionTransformer', library='transformers', import_path='transformers.models.idefics3'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.0', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.0', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'image_size': 'image_size=224', 'patch_size': 'patch_size=16', 'num_channels': 'num_channels=3', 'qkv_bias': 'qkv_bias=True', 'pooler_output_size': 'pooler_output_size=None', 'pooler_act': "pooler_act='tanh'"
+}, model_name='IJepaModel', library='transformers', import_path='transformers.models.ijepa'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=513', 'n_positions': 'n_positions=1024', 'n_embd': 'n_embd=512', 'n_layer': 'n_layer=24', 'n_head': 'n_head=8', 'n_inner': 'n_inner=None', 'activation_function': "activation_function='quick_gelu'", 'resid_pdrop': 'resid_pdrop=0.1', 'embd_pdrop': 'embd_pdrop=0.1', 'attn_pdrop': 'attn_pdrop=0.1', 'layer_norm_epsilon': 'layer_norm_epsilon=1e-05', 'initializer_range': 'initializer_range=0.02', 'scale_attn_weights': 'scale_attn_weights=True', 'tie_word_embeddings': 'tie_word_embeddings=False', 'scale_attn_by_inverse_layer_idx': 'scale_attn_by_inverse_layer_idx=False', 'reorder_and_upcast_attn': 'reorder_and_upcast_attn=False'
+}, model_name='ImageGPTModel', library='transformers', import_path='transformers.models.imagegpt'), ModelAttributes(model=, model_type='model', model_parameters={'prediction_length': 'prediction_length: Optional[int
+ ] = None', 'context_length': 'context_length: Optional[int
+ ] = None', 'distribution_output': "distribution_output: str = 'student_t'", 'loss': "loss: str = 'nll'", 'input_size': 'input_size: int = 1', 'lags_sequence': 'lags_sequence: Optional[list[int
+ ]
+ ] = None', 'scaling': "scaling: Union[str, bool, NoneType] = 'mean'", 'num_dynamic_real_features': 'num_dynamic_real_features: int = 0', 'num_static_real_features': 'num_static_real_features: int = 0', 'num_static_categorical_features': 'num_static_categorical_features: int = 0', 'num_time_features': 'num_time_features: int = 0', 'cardinality': 'cardinality: Optional[list[int
+ ]
+ ] = None', 'embedding_dimension': 'embedding_dimension: Optional[list[int
+ ]
+ ] = None', 'd_model': 'd_model: int = 64', 'encoder_ffn_dim': 'encoder_ffn_dim: int = 32', 'decoder_ffn_dim': 'decoder_ffn_dim: int = 32', 'encoder_attention_heads': 'encoder_attention_heads: int = 2', 'decoder_attention_heads': 'decoder_attention_heads: int = 2', 'encoder_layers': 'encoder_layers: int = 2', 'decoder_layers': 'decoder_layers: int = 2', 'is_encoder_decoder': 'is_encoder_decoder: bool = True', 'activation_function': "activation_function: str = 'gelu'", 'dropout': 'dropout: float = 0.05', 'encoder_layerdrop': 'encoder_layerdrop: float = 0.1', 'decoder_layerdrop': 'decoder_layerdrop: float = 0.1', 'attention_dropout': 'attention_dropout: float = 0.1', 'activation_dropout': 'activation_dropout: float = 0.1', 'num_parallel_samples': 'num_parallel_samples: int = 100', 'init_std': 'init_std: float = 0.02', 'attention_type': "attention_type: str = 'prob'", 'sampling_factor': 'sampling_factor: int = 5', 'distil': 'distil: bool = True'
+}, model_name='InformerModel', library='transformers', import_path='transformers.models.informer'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'qformer_config': 'qformer_config=None', 'text_config': 'text_config=None', 'num_query_tokens': 'num_query_tokens=32', 'image_token_index': 'image_token_index=None'
+}, model_name='InstructBlipModel', library='transformers', import_path='transformers.models.instructblip'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'bos_token': "bos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'eos_token': "eos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'pad_token': 'pad_token: Union[tokenizers.AddedToken, str, NoneType
+ ] = None', 'add_prefix_space': 'add_prefix_space=False'
+}, model_name='GPT2Tokenizer', library='transformers', import_path='transformers.models.gpt2'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'qformer_config': 'qformer_config=None', 'text_config': 'text_config=None', 'num_query_tokens': 'num_query_tokens=32', 'video_token_index': 'video_token_index=None'
+}, model_name='InstructBlipVideoModel', library='transformers', import_path='transformers.models.instructblipvideo'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'bos_token': "bos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'eos_token': "eos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'pad_token': 'pad_token: Union[tokenizers.AddedToken, str, NoneType
+ ] = None', 'add_prefix_space': 'add_prefix_space=False'
+}, model_name='GPT2Tokenizer', library='transformers', import_path='transformers.models.gpt2'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'text_config': 'text_config=None', 'image_token_id': 'image_token_id=151667', 'image_seq_length': 'image_seq_length=256', 'downsample_ratio': 'downsample_ratio=0.5', 'projector_hidden_act': "projector_hidden_act='gelu'", 'vision_feature_layer': 'vision_feature_layer=-1', 'vision_feature_select_strategy': "vision_feature_select_strategy='default'"
+}, model_name='InternVLModel', library='transformers', import_path='transformers.models.internvl'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'vocab_file': 'vocab_file=None', 'merges_file': 'merges_file=None', 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': 'bos_token=None', 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|endoftext|>'", 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='Qwen2Tokenizer', library='transformers', import_path='transformers.models.qwen2'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=1024', 'num_hidden_layers': 'num_hidden_layers=24', 'num_attention_heads': 'num_attention_heads=16', 'attention_bias': 'attention_bias=False', 'use_qk_norm': 'use_qk_norm=False', 'intermediate_size': 'intermediate_size=4096', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.0', 'attention_dropout': 'attention_dropout=0.0', 'projection_dropout': 'projection_dropout=0.0', 'initializer_range': 'initializer_range=0.02', 'norm_type': "norm_type='layer_norm'", 'layer_norm_eps': 'layer_norm_eps=1e-06', 'image_size': 'image_size=[
+ 448,
+ 448
+ ]', 'patch_size': 'patch_size=[
+ 14,
+ 14
+ ]', 'num_channels': 'num_channels=3', 'use_mask_token': 'use_mask_token=False', 'use_absolute_position_embeddings': 'use_absolute_position_embeddings=True', 'layer_scale_init_value': 'layer_scale_init_value=0.1', 'use_mean_pooling': 'use_mean_pooling=True'
+}, model_name='InternVLVisionModel', library='transformers', import_path='transformers.models.internvl'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 150272', 'hidden_size': 'hidden_size: Optional[int
+ ] = 3328', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 26624', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 26', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'hidden_act': "hidden_act: Optional[str] = 'relu2'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 8192', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'layer_norm_eps': 'layer_norm_eps: Optional[float
+ ] = 1e-05', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = None', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 0', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 150024', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'attention_bias': 'attention_bias: Optional[bool
+ ] = True', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'mlp_bias': 'mlp_bias: Optional[bool
+ ] = True', 'head_dim': 'head_dim: Optional[int
+ ] = None', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None'
+}, model_name='Jais2Model', library='transformers', import_path='transformers.models.jais2'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'bos_token': "bos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'eos_token': "eos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'pad_token': 'pad_token: Union[tokenizers.AddedToken, str, NoneType
+ ] = None', 'add_prefix_space': 'add_prefix_space=False'
+}, model_name='GPT2Tokenizer', library='transformers', import_path='transformers.models.gpt2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=65536', 'tie_word_embeddings': 'tie_word_embeddings=False', 'hidden_size': 'hidden_size=4096', 'intermediate_size': 'intermediate_size=14336', 'num_hidden_layers': 'num_hidden_layers=32', 'num_attention_heads': 'num_attention_heads=32', 'num_key_value_heads': 'num_key_value_heads=8', 'hidden_act': "hidden_act='silu'", 'initializer_range': 'initializer_range=0.02', 'rms_norm_eps': 'rms_norm_eps=1e-06', 'output_router_logits': 'output_router_logits=False', 'router_aux_loss_coef': 'router_aux_loss_coef=0.001', 'pad_token_id': 'pad_token_id=0', 'bos_token_id': 'bos_token_id=1', 'eos_token_id': 'eos_token_id=2', 'max_position_embeddings': 'max_position_embeddings=262144', 'attention_dropout': 'attention_dropout=0.0', 'num_experts_per_tok': 'num_experts_per_tok=2', 'num_experts': 'num_experts=16', 'expert_layer_period': 'expert_layer_period=2', 'expert_layer_offset': 'expert_layer_offset=1', 'attn_layer_period': 'attn_layer_period=8', 'attn_layer_offset': 'attn_layer_offset=4', 'use_mamba_kernels': 'use_mamba_kernels=True', 'mamba_d_state': 'mamba_d_state=16', 'mamba_d_conv': 'mamba_d_conv=4', 'mamba_expand': 'mamba_expand=2', 'mamba_dt_rank': "mamba_dt_rank='auto'", 'mamba_conv_bias': 'mamba_conv_bias=True', 'mamba_proj_bias': 'mamba_proj_bias=False'
+}, model_name='JambaModel', library='transformers', import_path='transformers.models.jamba'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'vision_config': 'vision_config=None', 'vq_config': 'vq_config=None', 'image_token_id': 'image_token_id=100581'
+}, model_name='JanusModel', library='transformers', import_path='transformers.models.janus'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 32000', 'hidden_size': 'hidden_size: Optional[int
+ ] = 2048', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 12', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 16', 'kv_channels': 'kv_channels: Optional[int
+ ] = 128', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 5632', 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 4096', 'activation_function': "activation_function: Optional[str] = 'silu'", 'num_local_experts': 'num_local_experts: Optional[int
+ ] = 8', 'num_experts_per_tok': 'num_experts_per_tok: Optional[int
+ ] = 2', 'output_router_logits': 'output_router_logits: Optional[bool
+ ] = False', 'aux_loss_coef': 'aux_loss_coef: Optional[float
+ ] = 0.01', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = True', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-06', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.01', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0'
+}, model_name='JetMoeModel', library='transformers', import_path='transformers.models.jetmoe'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'vision_config': 'vision_config=None', 'latent_query_num': 'latent_query_num=64'
+}, model_name='Kosmos2Model', library='transformers', import_path='transformers.models.kosmos2'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, list[tuple[str, float
+ ]
+ ], NoneType
+ ] = None', 'add_prefix_space': 'add_prefix_space: bool = True', 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = ''", 'sep_token': "sep_token: str = ''", 'cls_token': "cls_token: str = ''", 'unk_token': "unk_token: str = ''", 'pad_token': "pad_token: str = ''", 'mask_token': "mask_token: str = ''"
+}, model_name='XLMRobertaTokenizer', library='transformers', import_path='transformers.models.xlm_roberta'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'vision_config': 'vision_config=None', 'latent_query_num': 'latent_query_num=2048'
+}, model_name='Kosmos2_5Model', library='transformers', import_path='transformers.models.kosmos2_5'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'args': '*args'
+}, model_name='TokenizersBackend', library='transformers', import_path='transformers'), ModelAttributes(model=, model_type='model', model_parameters={'codebook_vocab_size': 'codebook_vocab_size: Optional[int
+ ] = 2049', 'vocab_size': 'vocab_size: Optional[int
+ ] = 4001', 'hidden_size': 'hidden_size: Optional[int
+ ] = 2048', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 48', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 750', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'head_dim': 'head_dim: Optional[int
+ ] = None', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'sliding_window': 'sliding_window: Optional[int
+ ] = 375', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'ffn_dim': 'ffn_dim: Optional[int
+ ] = 11264', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-08', 'num_codebooks': 'num_codebooks: Optional[int
+ ] = 32', 'audio_bos_token_id': 'audio_bos_token_id: Optional[int
+ ] = 2048', 'audio_pad_token_id': 'audio_pad_token_id: Optional[int
+ ] = 69569', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 3', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 48000', 'codec_config': 'codec_config: Optional[dict
+ ] = None'
+}, model_name='KyutaiSpeechToTextModel', library='transformers', import_path='transformers.models.kyutai_speech_to_text'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=512', 'ctc_loss_reduction': "ctc_loss_reduction='mean'", 'ctc_zero_infinity': 'ctc_zero_infinity=True', 'encoder_config': 'encoder_config: Union[dict, transformers.models.lasr.configuration_lasr.LasrEncoderConfig
+ ] = None', 'pad_token_id': 'pad_token_id=0'
+}, model_name='LasrForCTC', library='transformers', import_path='transformers.models.lasr'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'args': '*args'
+}, model_name='ParakeetTokenizerFast', library='transformers', import_path='transformers.models.parakeet'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=512', 'num_hidden_layers': 'num_hidden_layers=17', 'num_attention_heads': 'num_attention_heads=8', 'intermediate_size': 'intermediate_size=2048', 'hidden_act': "hidden_act='silu'", 'attention_bias': 'attention_bias=False', 'convolution_bias': 'convolution_bias=False', 'conv_kernel_size': 'conv_kernel_size=32', 'subsampling_conv_channels': 'subsampling_conv_channels=256', 'subsampling_conv_kernel_size': 'subsampling_conv_kernel_size=5', 'subsampling_conv_stride': 'subsampling_conv_stride=2', 'num_mel_bins': 'num_mel_bins=128', 'dropout': 'dropout=0.1', 'dropout_positions': 'dropout_positions=0.0', 'layerdrop': 'layerdrop=0.1', 'activation_dropout': 'activation_dropout=0.1', 'attention_dropout': 'attention_dropout=0.1', 'max_position_embeddings': 'max_position_embeddings=10000', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-06', 'feed_forward_residual_weights': 'feed_forward_residual_weights=[
+ 1.5,
+ 0.5
+ ]', 'conv_residual_weights': 'conv_residual_weights=[
+ 2.0,
+ 1.0
+ ]', 'batch_norm_momentum': 'batch_norm_momentum=0.01', 'rope_parameters': 'rope_parameters=None'
+}, model_name='LasrEncoder', library='transformers', import_path='transformers.models.lasr'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'args': '*args'
+}, model_name='ParakeetTokenizerFast', library='transformers', import_path='transformers.models.parakeet'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=30522', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=2', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'pad_token_id': 'pad_token_id=0', 'max_2d_position_embeddings': 'max_2d_position_embeddings=1024'
+}, model_name='LayoutLMModel', library='transformers', import_path='transformers.models.layoutlm'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'do_lower_case': 'do_lower_case: bool = False', 'unk_token': "unk_token: str = '[UNK]'", 'sep_token': "sep_token: str = '[SEP]'", 'pad_token': "pad_token: str = '[PAD]'", 'cls_token': "cls_token: str = '[CLS]'", 'mask_token': "mask_token: str = '[MASK]'", 'tokenize_chinese_chars': 'tokenize_chinese_chars: bool = True', 'strip_accents': 'strip_accents: Optional[bool
+ ] = None'
+}, model_name='BertTokenizer', library='transformers', import_path='transformers.models.bert'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=30522', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=2', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'pad_token_id': 'pad_token_id=0', 'max_2d_position_embeddings': 'max_2d_position_embeddings=1024', 'max_rel_pos': 'max_rel_pos=128', 'rel_pos_bins': 'rel_pos_bins=32', 'fast_qkv': 'fast_qkv=True', 'max_rel_2d_pos': 'max_rel_2d_pos=256', 'rel_2d_pos_bins': 'rel_2d_pos_bins=64', 'convert_sync_batchnorm': 'convert_sync_batchnorm=True', 'image_feature_pool_shape': 'image_feature_pool_shape=[
+ 7,
+ 7,
+ 256
+ ]', 'coordinate_size': 'coordinate_size=128', 'shape_size': 'shape_size=128', 'has_relative_attention_bias': 'has_relative_attention_bias=True', 'has_spatial_attention_bias': 'has_spatial_attention_bias=True', 'has_visual_segment_embedding': 'has_visual_segment_embedding=False', 'detectron2_config_args': 'detectron2_config_args=None'
+}, model_name='LayoutLMv2Model', library='transformers', import_path='transformers.models.layoutlmv2'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'do_lower_case': 'do_lower_case=True', 'unk_token': "unk_token='[UNK]'", 'sep_token': "sep_token='[SEP]'", 'pad_token': "pad_token='[PAD]'", 'cls_token': "cls_token='[CLS]'", 'mask_token': "mask_token='[MASK]'", 'cls_token_box': 'cls_token_box=[
+ 0,
+ 0,
+ 0,
+ 0
+ ]', 'sep_token_box': 'sep_token_box=[
+ 1000,
+ 1000,
+ 1000,
+ 1000
+ ]', 'pad_token_box': 'pad_token_box=[
+ 0,
+ 0,
+ 0,
+ 0
+ ]', 'pad_token_label': 'pad_token_label=-100', 'only_label_first_subword': 'only_label_first_subword=True', 'tokenize_chinese_chars': 'tokenize_chinese_chars=True', 'strip_accents': 'strip_accents=None'
+}, model_name='LayoutLMv2Tokenizer', library='transformers', import_path='transformers.models.layoutlmv2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=50265', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=2', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'pad_token_id': 'pad_token_id=1', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=2', 'max_2d_position_embeddings': 'max_2d_position_embeddings=1024', 'coordinate_size': 'coordinate_size=128', 'shape_size': 'shape_size=128', 'has_relative_attention_bias': 'has_relative_attention_bias=True', 'rel_pos_bins': 'rel_pos_bins=32', 'max_rel_pos': 'max_rel_pos=128', 'rel_2d_pos_bins': 'rel_2d_pos_bins=64', 'max_rel_2d_pos': 'max_rel_2d_pos=256', 'has_spatial_attention_bias': 'has_spatial_attention_bias=True', 'text_embed': 'text_embed=True', 'visual_embed': 'visual_embed=True', 'input_size': 'input_size=224', 'num_channels': 'num_channels=3', 'patch_size': 'patch_size=16', 'classifier_dropout': 'classifier_dropout=None'
+}, model_name='LayoutLMv3Model', library='transformers', import_path='transformers.models.layoutlmv3'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'errors': "errors='replace'", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'sep_token': "sep_token=''", 'cls_token': "cls_token=''", 'unk_token': "unk_token=''", 'pad_token': "pad_token=''", 'mask_token': "mask_token=''", 'add_prefix_space': 'add_prefix_space=True', 'cls_token_box': 'cls_token_box=[
+ 0,
+ 0,
+ 0,
+ 0
+ ]', 'sep_token_box': 'sep_token_box=[
+ 0,
+ 0,
+ 0,
+ 0
+ ]', 'pad_token_box': 'pad_token_box=[
+ 0,
+ 0,
+ 0,
+ 0
+ ]', 'pad_token_label': 'pad_token_label=-100', 'only_label_first_subword': 'only_label_first_subword=True', 'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None'
+}, model_name='LayoutLMv3Tokenizer', library='transformers', import_path='transformers.models.layoutlmv3'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=50265', 'max_encoder_position_embeddings': 'max_encoder_position_embeddings=16384', 'max_decoder_position_embeddings': 'max_decoder_position_embeddings=1024', 'encoder_layers': 'encoder_layers=12', 'encoder_ffn_dim': 'encoder_ffn_dim=4096', 'encoder_attention_heads': 'encoder_attention_heads=16', 'decoder_layers': 'decoder_layers=12', 'decoder_ffn_dim': 'decoder_ffn_dim=4096', 'decoder_attention_heads': 'decoder_attention_heads=16', 'encoder_layerdrop': 'encoder_layerdrop=0.0', 'decoder_layerdrop': 'decoder_layerdrop=0.0', 'is_encoder_decoder': 'is_encoder_decoder=True', 'activation_function': "activation_function='gelu'", 'd_model': 'd_model=1024', 'dropout': 'dropout=0.1', 'attention_dropout': 'attention_dropout=0.0', 'activation_dropout': 'activation_dropout=0.0', 'init_std': 'init_std=0.02', 'decoder_start_token_id': 'decoder_start_token_id=2', 'classifier_dropout': 'classifier_dropout=0.0', 'pad_token_id': 'pad_token_id=1', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=2', 'attention_window': 'attention_window: Union[list[int
+ ], int
+ ] = 512'
+}, model_name='LEDModel', library='transformers', import_path='transformers.models.led'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = ''", 'sep_token': "sep_token: str = ''", 'cls_token': "cls_token: str = ''", 'unk_token': "unk_token: str = ''", 'pad_token': "pad_token: str = ''", 'mask_token': "mask_token: str = ''", 'add_prefix_space': 'add_prefix_space: bool = False', 'trim_offsets': 'trim_offsets: bool = True'
+}, model_name='RobertaTokenizer', library='transformers', import_path='transformers.models.roberta'), ModelAttributes(model=, model_type='model', model_parameters={'image_size': 'image_size=224', 'num_channels': 'num_channels=3', 'kernel_size': 'kernel_size=3', 'stride': 'stride=2', 'padding': 'padding=1', 'patch_size': 'patch_size=16', 'hidden_sizes': 'hidden_sizes=[
+ 128,
+ 256,
+ 384
+ ]', 'num_attention_heads': 'num_attention_heads=[
+ 4,
+ 8,
+ 12
+ ]', 'depths': 'depths=[
+ 4,
+ 4,
+ 4
+ ]', 'key_dim': 'key_dim=[
+ 16,
+ 16,
+ 16
+ ]', 'drop_path_rate': 'drop_path_rate=0', 'mlp_ratio': 'mlp_ratio=[
+ 2,
+ 2,
+ 2
+ ]', 'attention_ratio': 'attention_ratio=[
+ 2,
+ 2,
+ 2
+ ]', 'initializer_range': 'initializer_range=0.02'
+}, model_name='LevitModel', library='transformers', import_path='transformers.models.levit'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 65536', 'hidden_size': 'hidden_size: Optional[int
+ ] = 2560', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 12288', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 8', 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 128000', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'norm_eps': 'norm_eps: Optional[float
+ ] = 1e-05', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 0', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = True', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'conv_bias': 'conv_bias: Optional[bool
+ ] = False', 'conv_L_cache': 'conv_L_cache: Optional[int
+ ] = 3', 'block_multiple_of': 'block_multiple_of: Optional[int
+ ] = 256', 'block_ffn_dim_multiplier': 'block_ffn_dim_multiplier: Optional[float
+ ] = 1.0', 'block_auto_adjust_ff_dim': 'block_auto_adjust_ff_dim: Optional[bool
+ ] = True', 'full_attn_idxs': 'full_attn_idxs: Optional[list[int
+ ]
+ ] = None', 'layer_types': 'layer_types: Optional[list[str
+ ]
+ ] = None'
+}, model_name='Lfm2Model', library='transformers', import_path='transformers.models.lfm2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: int = 65536', 'hidden_size': 'hidden_size: int = 2048', 'intermediate_size': 'intermediate_size: int = 7168', 'moe_intermediate_size': 'moe_intermediate_size: int = 1792', 'num_hidden_layers': 'num_hidden_layers: int = 32', 'pad_token_id': 'pad_token_id: int = 0', 'bos_token_id': 'bos_token_id: int = 1', 'eos_token_id': 'eos_token_id: int = 2', 'tie_word_embeddings': 'tie_word_embeddings: bool = True', 'rope_parameters': 'rope_parameters: transformers.modeling_rope_utils.RopeParameters = None', 'max_position_embeddings': 'max_position_embeddings: int = 128000', 'initializer_range': 'initializer_range: float = 0.02', 'norm_eps': 'norm_eps: float = 1e-05', 'num_attention_heads': 'num_attention_heads: int = 32', 'num_key_value_heads': 'num_key_value_heads: int = 8', 'conv_bias': 'conv_bias: bool = False', 'conv_L_cache': 'conv_L_cache: int = 3', 'num_dense_layers': 'num_dense_layers: int = 2', 'num_experts_per_tok': 'num_experts_per_tok: int = 4', 'num_experts': 'num_experts: int = 32', 'use_expert_bias': 'use_expert_bias: bool = True', 'routed_scaling_factor': 'routed_scaling_factor: float = 1.0', 'norm_topk_prob': 'norm_topk_prob: bool = True', 'layer_types': 'layer_types: Optional[list[str
+ ]
+ ] = None'
+}, model_name='Lfm2MoeModel', library='transformers', import_path='transformers.models.lfm2_moe'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'text_config': 'text_config=None', 'image_token_id': 'image_token_id=396', 'projector_hidden_act': "projector_hidden_act='gelu'", 'projector_hidden_size': 'projector_hidden_size=2560', 'projector_bias': 'projector_bias=True', 'projector_use_layernorm': 'projector_use_layernorm=True', 'downsample_factor': 'downsample_factor=2'
+}, model_name='Lfm2VlModel', library='transformers', import_path='transformers.models.lfm2_vl'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'args': '*args'
+}, model_name='TokenizersBackend', library='transformers', import_path='transformers'), ModelAttributes(model=, model_type='model', model_parameters={'keypoint_detector_config': 'keypoint_detector_config: transformers.models.superpoint.configuration_superpoint.SuperPointConfig = None', 'descriptor_dim': 'descriptor_dim: int = 256', 'num_hidden_layers': 'num_hidden_layers: int = 9', 'num_attention_heads': 'num_attention_heads: int = 4', 'num_key_value_heads': 'num_key_value_heads=None', 'depth_confidence': 'depth_confidence: float = 0.95', 'width_confidence': 'width_confidence: float = 0.99', 'filter_threshold': 'filter_threshold: float = 0.1', 'initializer_range': 'initializer_range: float = 0.02', 'hidden_act': "hidden_act: str = 'gelu'", 'attention_dropout': 'attention_dropout=0.0', 'attention_bias': 'attention_bias=True', 'trust_remote_code': 'trust_remote_code: bool = False'
+}, model_name='LightGlueForKeypointMatching', library='transformers', import_path='transformers.models.lightglue'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=30522', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=2', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'pad_token_id': 'pad_token_id=0', 'classifier_dropout': 'classifier_dropout=None', 'channel_shrink_ratio': 'channel_shrink_ratio=4', 'max_2d_position_embeddings': 'max_2d_position_embeddings=1024'
+}, model_name='LiltModel', library='transformers', import_path='transformers.models.lilt'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = ''", 'sep_token': "sep_token: str = ''", 'cls_token': "cls_token: str = ''", 'unk_token': "unk_token: str = ''", 'pad_token': "pad_token: str = ''", 'mask_token': "mask_token: str = ''", 'add_prefix_space': 'add_prefix_space: bool = False', 'trim_offsets': 'trim_offsets: bool = True'
+}, model_name='RobertaTokenizer', library='transformers', import_path='transformers.models.roberta'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 32000', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 11008', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 2048', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-06', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = None', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2', 'pretraining_tp': 'pretraining_tp: Optional[int
+ ] = 1', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'mlp_bias': 'mlp_bias: Optional[bool
+ ] = False', 'head_dim': 'head_dim: Optional[int
+ ] = None'
+}, model_name='LlamaModel', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'text_config': 'text_config=None', 'boi_token_index': 'boi_token_index=200080', 'eoi_token_index': 'eoi_token_index=200081', 'image_token_index': 'image_token_index=200092', 'tie_word_embeddings': 'tie_word_embeddings=False'
+}, model_name='Llama4ForConditionalGeneration', library='transformers', import_path='transformers.models.llama4'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=202048', 'hidden_size': 'hidden_size=5120', 'intermediate_size': 'intermediate_size=8192', 'intermediate_size_mlp': 'intermediate_size_mlp=16384', 'num_hidden_layers': 'num_hidden_layers=48', 'num_attention_heads': 'num_attention_heads=40', 'num_key_value_heads': 'num_key_value_heads=8', 'head_dim': 'head_dim=128', 'hidden_act': "hidden_act='silu'", 'max_position_embeddings': 'max_position_embeddings=131072', 'initializer_range': 'initializer_range=0.02', 'rms_norm_eps': 'rms_norm_eps=1e-05', 'pad_token_id': 'pad_token_id=None', 'bos_token_id': 'bos_token_id=1', 'eos_token_id': 'eos_token_id=2', 'tie_word_embeddings': 'tie_word_embeddings=False', 'attention_dropout': 'attention_dropout=0.0', 'num_experts_per_tok': 'num_experts_per_tok=1', 'num_local_experts': 'num_local_experts=16', 'moe_layers': 'moe_layers=None', 'interleave_moe_layer_step': 'interleave_moe_layer_step=1', 'use_qk_norm': 'use_qk_norm=True', 'output_router_logits': 'output_router_logits=False', 'router_aux_loss_coef': 'router_aux_loss_coef=0.001', 'router_jitter_noise': 'router_jitter_noise=0.0', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'no_rope_layers': 'no_rope_layers=None', 'no_rope_layer_interval': 'no_rope_layer_interval=4', 'attention_chunk_size': 'attention_chunk_size=8192', 'layer_types': 'layer_types=None', 'attn_temperature_tuning': 'attn_temperature_tuning=True', 'floor_scale': 'floor_scale=8192', 'attn_scale': 'attn_scale=0.1'
+}, model_name='Llama4TextModel', library='transformers', import_path='transformers.models.llama4'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'text_config': 'text_config=None', 'image_token_index': 'image_token_index=32000', 'projector_hidden_act': "projector_hidden_act='gelu'", 'vision_feature_select_strategy': "vision_feature_select_strategy='default'", 'vision_feature_layer': 'vision_feature_layer=-2', 'image_seq_length': 'image_seq_length=576', 'multimodal_projector_bias': 'multimodal_projector_bias=True'
+}, model_name='LlavaModel', library='transformers', import_path='transformers.models.llava'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'text_config': 'text_config=None', 'image_token_index': 'image_token_index=32000', 'projector_hidden_act': "projector_hidden_act='gelu'", 'vision_feature_select_strategy': "vision_feature_select_strategy='default'", 'vision_feature_layer': 'vision_feature_layer=-2', 'image_grid_pinpoints': 'image_grid_pinpoints=None', 'tie_word_embeddings': 'tie_word_embeddings=False', 'image_seq_length': 'image_seq_length=576', 'multimodal_projector_bias': 'multimodal_projector_bias=True'
+}, model_name='LlavaNextModel', library='transformers', import_path='transformers.models.llava_next'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'text_config': 'text_config=None', 'image_token_index': 'image_token_index=32001', 'projector_hidden_act': "projector_hidden_act='gelu'", 'multimodal_projector_bias': 'multimodal_projector_bias=True', 'vision_feature_select_strategy': "vision_feature_select_strategy='default'", 'vision_feature_layer': 'vision_feature_layer=-2', 'image_grid_pinpoints': 'image_grid_pinpoints=None', 'video_token_index': 'video_token_index=32000', 'spatial_pool_mode': "spatial_pool_mode='average'", 'spatial_pool_stride': 'spatial_pool_stride=2', 'image_seq_length': 'image_seq_length=576', 'video_seq_length': 'video_seq_length=288'
+}, model_name='LlavaNextVideoModel', library='transformers', import_path='transformers.models.llava_next_video'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'text_config': 'text_config=None', 'image_token_index': 'image_token_index=151646', 'video_token_index': 'video_token_index=151647', 'projector_hidden_act': "projector_hidden_act='gelu'", 'vision_feature_select_strategy': "vision_feature_select_strategy='full'", 'vision_feature_layer': 'vision_feature_layer=-1', 'vision_aspect_ratio': "vision_aspect_ratio='anyres_max_9'", 'image_grid_pinpoints': 'image_grid_pinpoints=None', 'multimodal_projector_bias': 'multimodal_projector_bias=True'
+}, model_name='LlavaOnevisionModel', library='transformers', import_path='transformers.models.llava_onevision'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 131072', 'hidden_size': 'hidden_size: Optional[int
+ ] = 6144', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 56', 'num_layers': 'num_layers: Optional[int
+ ] = 28', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 64', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 131072', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[float
+ ] = 1e-05', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = None', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'ffn_hidden_size': 'ffn_hidden_size: Optional[int
+ ] = 12288', 'q_lora_rank': 'q_lora_rank: Optional[int
+ ] = 1536', 'kv_lora_rank': 'kv_lora_rank: Optional[int
+ ] = 512', 'qk_nope_head_dim': 'qk_nope_head_dim: Optional[int
+ ] = 128', 'qk_rope_head_dim': 'qk_rope_head_dim: Optional[int
+ ] = 64', 'head_dim': 'head_dim: Optional[int
+ ] = 64', 'v_head_dim': 'v_head_dim: Optional[int
+ ] = 128', 'qk_head_dim': 'qk_head_dim: Optional[int
+ ] = None', 'moe_topk': 'moe_topk: Optional[int
+ ] = 12', 'n_routed_experts': 'n_routed_experts: Optional[int
+ ] = 512', 'zero_expert_num': 'zero_expert_num: Optional[int
+ ] = 256', 'expert_ffn_hidden_size': 'expert_ffn_hidden_size: Optional[int
+ ] = 2048', 'routed_scaling_factor': 'routed_scaling_factor: Optional[float
+ ] = 6.0'
+}, model_name='LongcatFlashModel', library='transformers', import_path='transformers.models.longcat_flash'), ModelAttributes(model=, model_type='model', model_parameters={'attention_window': 'attention_window: Union[list[int
+ ], int
+ ] = 512', 'sep_token_id': 'sep_token_id: int = 2', 'pad_token_id': 'pad_token_id: int = 1', 'bos_token_id': 'bos_token_id: int = 0', 'eos_token_id': 'eos_token_id: int = 2', 'vocab_size': 'vocab_size: int = 30522', 'hidden_size': 'hidden_size: int = 768', 'num_hidden_layers': 'num_hidden_layers: int = 12', 'num_attention_heads': 'num_attention_heads: int = 12', 'intermediate_size': 'intermediate_size: int = 3072', 'hidden_act': "hidden_act: str = 'gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob: float = 0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob: float = 0.1', 'max_position_embeddings': 'max_position_embeddings: int = 512', 'type_vocab_size': 'type_vocab_size: int = 2', 'initializer_range': 'initializer_range: float = 0.02', 'layer_norm_eps': 'layer_norm_eps: float = 1e-12', 'onnx_export': 'onnx_export: bool = False'
+}, model_name='LongformerModel', library='transformers', import_path='transformers.models.longformer'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = ''", 'sep_token': "sep_token: str = ''", 'cls_token': "cls_token: str = ''", 'unk_token': "unk_token: str = ''", 'pad_token': "pad_token: str = ''", 'mask_token': "mask_token: str = ''", 'add_prefix_space': 'add_prefix_space: bool = False', 'trim_offsets': 'trim_offsets: bool = True'
+}, model_name='RobertaTokenizer', library='transformers', import_path='transformers.models.roberta'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=32128', 'd_model': 'd_model=512', 'd_kv': 'd_kv=64', 'd_ff': 'd_ff=2048', 'num_layers': 'num_layers=6', 'num_decoder_layers': 'num_decoder_layers=None', 'num_heads': 'num_heads=8', 'local_radius': 'local_radius=127', 'global_block_size': 'global_block_size=16', 'relative_attention_num_buckets': 'relative_attention_num_buckets=32', 'relative_attention_max_distance': 'relative_attention_max_distance=128', 'dropout_rate': 'dropout_rate=0.1', 'layer_norm_epsilon': 'layer_norm_epsilon=1e-06', 'initializer_factor': 'initializer_factor=1.0', 'feed_forward_proj': "feed_forward_proj='relu'", 'is_encoder_decoder': 'is_encoder_decoder=True', 'encoder_attention_type': "encoder_attention_type='local'", 'pad_token_id': 'pad_token_id=0', 'eos_token_id': 'eos_token_id=1'
+}, model_name='LongT5Model', library='transformers', import_path='transformers.models.longt5'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, list[tuple[str, float
+ ]
+ ], NoneType
+ ] = None', 'eos_token': "eos_token=''", 'unk_token': "unk_token=''", 'pad_token': "pad_token=''", 'extra_ids': 'extra_ids=100', 'additional_special_tokens': 'additional_special_tokens=None'
+}, model_name='T5Tokenizer', library='transformers', import_path='transformers.models.t5'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=50267', 'entity_vocab_size': 'entity_vocab_size=500000', 'hidden_size': 'hidden_size=768', 'entity_emb_size': 'entity_emb_size=256', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=2', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'use_entity_aware_attention': 'use_entity_aware_attention=True', 'classifier_dropout': 'classifier_dropout=None', 'pad_token_id': 'pad_token_id=1', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=2'
+}, model_name='LukeModel', library='transformers', import_path='transformers.models.luke'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'entity_vocab': 'entity_vocab: Union[str, dict, list, NoneType
+ ] = None', 'errors': "errors='replace'", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'sep_token': "sep_token=''", 'cls_token': "cls_token=''", 'unk_token': "unk_token=''", 'pad_token': "pad_token=''", 'mask_token': "mask_token=''", 'add_prefix_space': 'add_prefix_space=False', 'task': 'task=None', 'max_entity_length': 'max_entity_length=32', 'max_mention_length': 'max_mention_length=30', 'entity_token_1': "entity_token_1=''", 'entity_token_2': "entity_token_2=''", 'entity_unk_token': "entity_unk_token='[UNK]'", 'entity_pad_token': "entity_pad_token='[PAD]'", 'entity_mask_token': "entity_mask_token='[MASK]'", 'entity_mask2_token': "entity_mask2_token='[MASK2]'"
+}, model_name='LukeTokenizer', library='transformers', import_path='transformers.models.luke'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=30522', 'hidden_size': 'hidden_size=768', 'num_attention_heads': 'num_attention_heads=12', 'num_qa_labels': 'num_qa_labels=9500', 'num_object_labels': 'num_object_labels=1600', 'num_attr_labels': 'num_attr_labels=400', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=2', 'initializer_range': 'initializer_range=0.02', 'l_layers': 'l_layers=9', 'x_layers': 'x_layers=5', 'r_layers': 'r_layers=5', 'visual_feat_dim': 'visual_feat_dim=2048', 'visual_pos_dim': 'visual_pos_dim=4', 'visual_loss_normalizer': 'visual_loss_normalizer=6.67', 'task_matched': 'task_matched=True', 'task_mask_lm': 'task_mask_lm=True', 'task_obj_predict': 'task_obj_predict=True', 'task_qa': 'task_qa=True', 'visual_obj_loss': 'visual_obj_loss=True', 'visual_attr_loss': 'visual_attr_loss=True', 'visual_feat_loss': 'visual_feat_loss=True'
+}, model_name='LxmertModel', library='transformers', import_path='transformers.models.lxmert'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'do_lower_case': 'do_lower_case: bool = False', 'unk_token': "unk_token: str = '[UNK]'", 'sep_token': "sep_token: str = '[SEP]'", 'pad_token': "pad_token: str = '[PAD]'", 'cls_token': "cls_token: str = '[CLS]'", 'mask_token': "mask_token: str = '[MASK]'", 'tokenize_chinese_chars': 'tokenize_chinese_chars: bool = True', 'strip_accents': 'strip_accents: Optional[bool
+ ] = None'
+}, model_name='BertTokenizer', library='transformers', import_path='transformers.models.bert'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=128112', 'max_position_embeddings': 'max_position_embeddings=1024', 'encoder_layers': 'encoder_layers=12', 'encoder_ffn_dim': 'encoder_ffn_dim=4096', 'encoder_attention_heads': 'encoder_attention_heads=16', 'decoder_layers': 'decoder_layers=12', 'decoder_ffn_dim': 'decoder_ffn_dim=4096', 'decoder_attention_heads': 'decoder_attention_heads=16', 'encoder_layerdrop': 'encoder_layerdrop=0.05', 'decoder_layerdrop': 'decoder_layerdrop=0.05', 'is_encoder_decoder': 'is_encoder_decoder=True', 'activation_function': "activation_function='relu'", 'd_model': 'd_model=1024', 'dropout': 'dropout=0.1', 'attention_dropout': 'attention_dropout=0.1', 'activation_dropout': 'activation_dropout=0.0', 'init_std': 'init_std=0.02', 'decoder_start_token_id': 'decoder_start_token_id=2', 'scale_embedding': 'scale_embedding=True', 'pad_token_id': 'pad_token_id=1', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=2'
+}, model_name='M2M100Model', library='transformers', import_path='transformers.models.m2m_100'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab_file': 'vocab_file', 'spm_file': 'spm_file', 'src_lang': 'src_lang=None', 'tgt_lang': 'tgt_lang=None', 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'sep_token': "sep_token=''", 'pad_token': "pad_token=''", 'unk_token': "unk_token=''", 'language_codes': "language_codes='m2m100'", 'sp_model_kwargs': 'sp_model_kwargs: Optional[dict[str, Any
+ ]
+ ] = None', 'num_madeup_words': 'num_madeup_words=8'
+}, model_name='M2M100Tokenizer', library='transformers', import_path='transformers.models.m2m_100'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=50280', 'hidden_size': 'hidden_size=768', 'state_size': 'state_size=16', 'num_hidden_layers': 'num_hidden_layers=32', 'layer_norm_epsilon': 'layer_norm_epsilon=1e-05', 'pad_token_id': 'pad_token_id=0', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=0', 'expand': 'expand=2', 'conv_kernel': 'conv_kernel=4', 'use_bias': 'use_bias=False', 'use_conv_bias': 'use_conv_bias=True', 'hidden_act': "hidden_act='silu'", 'initializer_range': 'initializer_range=0.1', 'residual_in_fp32': 'residual_in_fp32=True', 'time_step_rank': "time_step_rank='auto'", 'time_step_scale': 'time_step_scale=1.0', 'time_step_min': 'time_step_min=0.001', 'time_step_max': 'time_step_max=0.1', 'time_step_init_scheme': "time_step_init_scheme='random'", 'time_step_floor': 'time_step_floor=0.0001', 'rescale_prenorm_residual': 'rescale_prenorm_residual=False', 'use_mambapy': 'use_mambapy=False'
+}, model_name='MambaModel', library='transformers', import_path='transformers.models.mamba'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': "bos_token: str = '<|endoftext|>'", 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|padding|>'", 'add_prefix_space': 'add_prefix_space: bool = False', 'trim_offsets': 'trim_offsets: bool = True'
+}, model_name='GPTNeoXTokenizer', library='transformers', import_path='transformers.models.gpt_neox'), ModelAttributes(model=, model_type='model', model_parameters={'num_heads': 'num_heads=128', 'head_dim': 'head_dim=64', 'vocab_size': 'vocab_size=32768', 'hidden_size': 'hidden_size=4096', 'state_size': 'state_size=128', 'num_hidden_layers': 'num_hidden_layers=64', 'layer_norm_epsilon': 'layer_norm_epsilon=1e-05', 'pad_token_id': 'pad_token_id=1', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=2', 'expand': 'expand=2', 'conv_kernel': 'conv_kernel=4', 'n_groups': 'n_groups=8', 'use_bias': 'use_bias=False', 'use_conv_bias': 'use_conv_bias=True', 'hidden_act': "hidden_act='silu'", 'initializer_range': 'initializer_range=0.1', 'residual_in_fp32': 'residual_in_fp32=True', 'time_step_rank': "time_step_rank='auto'", 'time_step_min': 'time_step_min=0.001', 'time_step_max': 'time_step_max=0.1', 'time_step_floor': 'time_step_floor=0.0001', 'time_step_limit': 'time_step_limit=(0.0, inf)', 'rescale_prenorm_residual': 'rescale_prenorm_residual=False', 'rms_norm': 'rms_norm=True', 'chunk_size': 'chunk_size=256', 'tie_word_embeddings': 'tie_word_embeddings=False'
+}, model_name='Mamba2Model', library='transformers', import_path='transformers.models.mamba2'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': "bos_token: str = '<|endoftext|>'", 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|padding|>'", 'add_prefix_space': 'add_prefix_space: bool = False', 'trim_offsets': 'trim_offsets: bool = True'
+}, model_name='GPTNeoXTokenizer', library='transformers', import_path='transformers.models.gpt_neox'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=58101', 'decoder_vocab_size': 'decoder_vocab_size=None', 'max_position_embeddings': 'max_position_embeddings=1024', 'encoder_layers': 'encoder_layers=12', 'encoder_ffn_dim': 'encoder_ffn_dim=4096', 'encoder_attention_heads': 'encoder_attention_heads=16', 'decoder_layers': 'decoder_layers=12', 'decoder_ffn_dim': 'decoder_ffn_dim=4096', 'decoder_attention_heads': 'decoder_attention_heads=16', 'encoder_layerdrop': 'encoder_layerdrop=0.0', 'decoder_layerdrop': 'decoder_layerdrop=0.0', 'is_encoder_decoder': 'is_encoder_decoder=True', 'activation_function': "activation_function='gelu'", 'd_model': 'd_model=1024', 'dropout': 'dropout=0.1', 'attention_dropout': 'attention_dropout=0.0', 'activation_dropout': 'activation_dropout=0.0', 'init_std': 'init_std=0.02', 'decoder_start_token_id': 'decoder_start_token_id=58100', 'scale_embedding': 'scale_embedding=False', 'pad_token_id': 'pad_token_id=58100', 'eos_token_id': 'eos_token_id=0', 'forced_eos_token_id': 'forced_eos_token_id=0', 'share_encoder_decoder_embeddings': 'share_encoder_decoder_embeddings=True'
+}, model_name='MarianModel', library='transformers', import_path='transformers.models.marian'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'source_spm': 'source_spm', 'target_spm': 'target_spm', 'vocab': 'vocab', 'target_vocab_file': 'target_vocab_file=None', 'source_lang': 'source_lang=None', 'target_lang': 'target_lang=None', 'unk_token': "unk_token=''", 'eos_token': "eos_token=''", 'pad_token': "pad_token=''", 'model_max_length': 'model_max_length=512', 'sp_model_kwargs': 'sp_model_kwargs: Optional[dict[str, Any
+ ]
+ ] = None', 'separate_vocabs': 'separate_vocabs=False'
+}, model_name='MarianTokenizer', library='transformers', import_path='transformers.models.marian'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=30522', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=2', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'pad_token_id': 'pad_token_id=0', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=2', 'max_xpath_tag_unit_embeddings': 'max_xpath_tag_unit_embeddings=256', 'max_xpath_subs_unit_embeddings': 'max_xpath_subs_unit_embeddings=1024', 'tag_pad_id': 'tag_pad_id=216', 'subs_pad_id': 'subs_pad_id=1001', 'xpath_unit_hidden_size': 'xpath_unit_hidden_size=32', 'max_depth': 'max_depth=50', 'classifier_dropout': 'classifier_dropout=None'
+}, model_name='MarkupLMModel', library='transformers', import_path='transformers.models.markuplm'), ModelAttributes(model=, model_type='model', model_parameters={'backbone_config': 'backbone_config: Union[dict, transformers.configuration_utils.PreTrainedConfig, NoneType
+ ] = None', 'feature_size': 'feature_size: int = 256', 'mask_feature_size': 'mask_feature_size: int = 256', 'hidden_dim': 'hidden_dim: int = 256', 'encoder_feedforward_dim': 'encoder_feedforward_dim: int = 1024', 'activation_function': "activation_function: str = 'relu'", 'encoder_layers': 'encoder_layers: int = 6', 'decoder_layers': 'decoder_layers: int = 10', 'num_attention_heads': 'num_attention_heads: int = 8', 'dropout': 'dropout: float = 0.0', 'dim_feedforward': 'dim_feedforward: int = 2048', 'pre_norm': 'pre_norm: bool = False', 'enforce_input_projection': 'enforce_input_projection: bool = False', 'common_stride': 'common_stride: int = 4', 'ignore_value': 'ignore_value: int = 255', 'num_queries': 'num_queries: int = 100', 'no_object_weight': 'no_object_weight: float = 0.1', 'class_weight': 'class_weight: float = 2.0', 'mask_weight': 'mask_weight: float = 5.0', 'dice_weight': 'dice_weight: float = 5.0', 'train_num_points': 'train_num_points: int = 12544', 'oversample_ratio': 'oversample_ratio: float = 3.0', 'importance_sample_ratio': 'importance_sample_ratio: float = 0.75', 'init_std': 'init_std: float = 0.02', 'init_xavier_std': 'init_xavier_std: float = 1.0', 'use_auxiliary_loss': 'use_auxiliary_loss: bool = True', 'feature_strides': 'feature_strides: list[int
+ ] = [
+ 4,
+ 8,
+ 16,
+ 32
+ ]', 'output_auxiliary_logits': 'output_auxiliary_logits: Optional[bool
+ ] = None', 'backbone': 'backbone: Optional[str
+ ] = None', 'use_pretrained_backbone': 'use_pretrained_backbone: bool = False', 'use_timm_backbone': 'use_timm_backbone: bool = False', 'backbone_kwargs': 'backbone_kwargs: Optional[dict
+ ] = None'
+}, model_name='Mask2FormerModel', library='transformers', import_path='transformers.models.mask2former'), ModelAttributes(model=, model_type='model', model_parameters={'fpn_feature_size': 'fpn_feature_size: int = 256', 'mask_feature_size': 'mask_feature_size: int = 256', 'no_object_weight': 'no_object_weight: float = 0.1', 'use_auxiliary_loss': 'use_auxiliary_loss: bool = False', 'backbone_config': 'backbone_config: Union[dict, transformers.configuration_utils.PreTrainedConfig, NoneType
+ ] = None', 'decoder_config': 'decoder_config: Optional[dict
+ ] = None', 'init_std': 'init_std: float = 0.02', 'init_xavier_std': 'init_xavier_std: float = 1.0', 'dice_weight': 'dice_weight: float = 1.0', 'cross_entropy_weight': 'cross_entropy_weight: float = 1.0', 'mask_weight': 'mask_weight: float = 20.0', 'output_auxiliary_logits': 'output_auxiliary_logits: Optional[bool
+ ] = None', 'backbone': 'backbone: Optional[str
+ ] = None', 'use_pretrained_backbone': 'use_pretrained_backbone: bool = False', 'use_timm_backbone': 'use_timm_backbone: bool = False', 'backbone_kwargs': 'backbone_kwargs: Optional[dict
+ ] = None'
+}, model_name='MaskFormerModel', library='transformers', import_path='transformers.models.maskformer'), ModelAttributes(model=, model_type='model', model_parameters={'image_size': 'image_size=224', 'patch_size': 'patch_size=4', 'num_channels': 'num_channels=3', 'embed_dim': 'embed_dim=96', 'depths': 'depths=[
+ 2,
+ 2,
+ 6,
+ 2
+ ]', 'num_heads': 'num_heads=[
+ 3,
+ 6,
+ 12,
+ 24
+ ]', 'window_size': 'window_size=7', 'mlp_ratio': 'mlp_ratio=4.0', 'qkv_bias': 'qkv_bias=True', 'hidden_dropout_prob': 'hidden_dropout_prob=0.0', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.0', 'drop_path_rate': 'drop_path_rate=0.1', 'hidden_act': "hidden_act='gelu'", 'use_absolute_embeddings': 'use_absolute_embeddings=False', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'out_features': 'out_features=None', 'out_indices': 'out_indices=None'
+}, model_name='MaskFormerSwinModel', library='transformers', import_path='transformers.models.maskformer'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=50265', 'max_position_embeddings': 'max_position_embeddings=1024', 'encoder_layers': 'encoder_layers=12', 'encoder_ffn_dim': 'encoder_ffn_dim=4096', 'encoder_attention_heads': 'encoder_attention_heads=16', 'decoder_layers': 'decoder_layers=12', 'decoder_ffn_dim': 'decoder_ffn_dim=4096', 'decoder_attention_heads': 'decoder_attention_heads=16', 'encoder_layerdrop': 'encoder_layerdrop=0.0', 'decoder_layerdrop': 'decoder_layerdrop=0.0', 'is_encoder_decoder': 'is_encoder_decoder=True', 'activation_function': "activation_function='gelu'", 'd_model': 'd_model=1024', 'dropout': 'dropout=0.1', 'attention_dropout': 'attention_dropout=0.0', 'activation_dropout': 'activation_dropout=0.0', 'init_std': 'init_std=0.02', 'classifier_dropout': 'classifier_dropout=0.0', 'scale_embedding': 'scale_embedding=False', 'pad_token_id': 'pad_token_id=1', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=2', 'forced_eos_token_id': 'forced_eos_token_id=2'
+}, model_name='MBartModel', library='transformers', import_path='transformers.models.mbart'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'sep_token': "sep_token=''", 'cls_token': "cls_token=''", 'unk_token': "unk_token=''", 'pad_token': "pad_token=''", 'mask_token': "mask_token=''", 'src_lang': 'src_lang=None', 'tgt_lang': 'tgt_lang=None', 'additional_special_tokens': 'additional_special_tokens=None'
+}, model_name='MBartTokenizer', library='transformers', import_path='transformers.models.mbart'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=29056', 'hidden_size': 'hidden_size=1024', 'num_hidden_layers': 'num_hidden_layers=24', 'num_attention_heads': 'num_attention_heads=16', 'intermediate_size': 'intermediate_size=4096', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=2', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'pad_token_id': 'pad_token_id=0'
+}, model_name='MegatronBertModel', library='transformers', import_path='transformers.models.megatron_bert'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'do_lower_case': 'do_lower_case: bool = False', 'unk_token': "unk_token: str = '[UNK]'", 'sep_token': "sep_token: str = '[SEP]'", 'pad_token': "pad_token: str = '[PAD]'", 'cls_token': "cls_token: str = '[CLS]'", 'mask_token': "mask_token: str = '[MASK]'", 'tokenize_chinese_chars': 'tokenize_chinese_chars: bool = True', 'strip_accents': 'strip_accents: Optional[bool
+ ] = None'
+}, model_name='BertTokenizer', library='transformers', import_path='transformers.models.bert'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'vision_config': 'vision_config=None', 'projection_dim': 'projection_dim=512', 'logit_scale_init_value': 'logit_scale_init_value=2.6592'
+}, model_name='MetaClip2Model', library='transformers', import_path='transformers.models.metaclip_2'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, list[tuple[str, float
+ ]
+ ], NoneType
+ ] = None', 'add_prefix_space': 'add_prefix_space: bool = True', 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = ''", 'sep_token': "sep_token: str = ''", 'cls_token': "cls_token: str = ''", 'unk_token': "unk_token: str = ''", 'pad_token': "pad_token: str = ''", 'mask_token': "mask_token: str = ''"
+}, model_name='XLMRobertaTokenizer', library='transformers', import_path='transformers.models.xlm_roberta'), ModelAttributes(model=, model_type='model', model_parameters={'image_size': 'image_size=[
+ 32,
+ 128
+ ]', 'patch_size': 'patch_size=4', 'num_channels': 'num_channels=3', 'max_token_length': 'max_token_length=27', 'num_character_labels': 'num_character_labels=38', 'num_bpe_labels': 'num_bpe_labels=50257', 'num_wordpiece_labels': 'num_wordpiece_labels=30522', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'mlp_ratio': 'mlp_ratio=4.0', 'qkv_bias': 'qkv_bias=True', 'distilled': 'distilled=False', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'drop_rate': 'drop_rate=0.0', 'attn_drop_rate': 'attn_drop_rate=0.0', 'drop_path_rate': 'drop_path_rate=0.0', 'output_a3_attentions': 'output_a3_attentions=False', 'initializer_range': 'initializer_range=0.02'
+}, model_name='MgpstrForSceneTextRecognition', library='transformers', import_path='transformers.models.mgp_str'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab_file': 'vocab_file', 'unk_token': "unk_token='[GO]'", 'bos_token': "bos_token='[GO]'", 'eos_token': "eos_token='[s]'", 'pad_token': "pad_token='[GO]'"
+}, model_name='MgpstrTokenizer', library='transformers', import_path='transformers.models.mgp_str'), ModelAttributes(model=, model_type='model', model_parameters={'sampling_rate': 'sampling_rate: Optional[int
+ ] = 24000', 'frame_rate': 'frame_rate: Optional[int
+ ] = None', 'audio_channels': 'audio_channels: Optional[int
+ ] = 1', 'hidden_size': 'hidden_size: Optional[int
+ ] = 512', 'num_filters': 'num_filters: Optional[int
+ ] = 64', 'num_residual_layers': 'num_residual_layers: Optional[int
+ ] = 1', 'upsampling_ratios': 'upsampling_ratios: Optional[list[int
+ ]
+ ] = None', 'kernel_size': 'kernel_size: Optional[int
+ ] = 7', 'last_kernel_size': 'last_kernel_size: Optional[int
+ ] = 3', 'residual_kernel_size': 'residual_kernel_size: Optional[int
+ ] = 3', 'dilation_growth_rate': 'dilation_growth_rate: Optional[int
+ ] = 2', 'use_causal_conv': 'use_causal_conv: Optional[bool
+ ] = True', 'pad_mode': "pad_mode: Optional[str] = 'constant'", 'compress': 'compress: Optional[int
+ ] = 2', 'trim_right_ratio': 'trim_right_ratio: Optional[float
+ ] = 1.0', 'codebook_size': 'codebook_size: Optional[int
+ ] = 2048', 'codebook_dim': 'codebook_dim: Optional[int
+ ] = 256', 'num_quantizers': 'num_quantizers: Optional[int
+ ] = 32', 'use_conv_shortcut': 'use_conv_shortcut: Optional[bool
+ ] = False', 'vector_quantization_hidden_dimension': 'vector_quantization_hidden_dimension: Optional[int
+ ] = 256', 'num_semantic_quantizers': 'num_semantic_quantizers: Optional[int
+ ] = 1', 'upsample_groups': 'upsample_groups: Optional[int
+ ] = 512', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 8', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 2048', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 8', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 8', 'head_dim': 'head_dim: Optional[int
+ ] = None', 'hidden_act': "hidden_act: Optional[str] = 'gelu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 8000', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'norm_eps': 'norm_eps: Optional[int
+ ] = 1e-05', 'use_streaming': 'use_streaming: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'sliding_window': 'sliding_window: Optional[int
+ ] = 250', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'layer_scale_initial_scale': 'layer_scale_initial_scale: Optional[float
+ ] = 0.01', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False'
+}, model_name='MimiModel', library='transformers', import_path='transformers.models.mimi'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 32000', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 14336', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 8', 'head_dim': 'head_dim: Optional[int
+ ] = None', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 131072', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-05', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = None', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'sliding_window': 'sliding_window: Optional[int
+ ] = None', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'num_experts_per_tok': 'num_experts_per_tok: Optional[int
+ ] = 2', 'num_local_experts': 'num_local_experts: Optional[int
+ ] = 8', 'output_router_logits': 'output_router_logits: Optional[bool
+ ] = False', 'router_aux_loss_coef': 'router_aux_loss_coef: Optional[float
+ ] = 0.001', 'router_jitter_noise': 'router_jitter_noise: Optional[float
+ ] = 0.0', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'layer_types': 'layer_types: Optional[list[str
+ ]
+ ] = None', 'block_size': 'block_size: Optional[int
+ ] = 256', 'full_attn_alpha_factor': 'full_attn_alpha_factor: Optional[int
+ ] = 1', 'full_attn_beta_factor': 'full_attn_beta_factor: Optional[int
+ ] = 1', 'linear_attn_alpha_factor': 'linear_attn_alpha_factor: Optional[int
+ ] = 1', 'linear_attn_beta_factor': 'linear_attn_beta_factor: Optional[int
+ ] = 1', 'mlp_alpha_factor': 'mlp_alpha_factor: Optional[int
+ ] = 1', 'mlp_beta_factor': 'mlp_beta_factor: Optional[int
+ ] = 1'
+}, model_name='MiniMaxModel', library='transformers', import_path='transformers.models.minimax'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'bos_token': "bos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'eos_token': "eos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'pad_token': 'pad_token: Union[tokenizers.AddedToken, str, NoneType
+ ] = None', 'add_prefix_space': 'add_prefix_space=False'
+}, model_name='GPT2Tokenizer', library='transformers', import_path='transformers.models.gpt2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 32000', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 14336', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 8', 'head_dim': 'head_dim: Optional[int
+ ] = None', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 131072', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[float
+ ] = 1e-06', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = None', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Optional[transformers.modeling_rope_utils.RopeParameters
+ ] = None', 'sliding_window': 'sliding_window: Optional[int
+ ] = 4096', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'layer_types': 'layer_types: Optional[list[str
+ ]
+ ] = None'
+}, model_name='MinistralModel', library='transformers', import_path='transformers.models.ministral'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 131072', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 14336', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 34', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 8', 'head_dim': 'head_dim: Optional[int
+ ] = 128', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 262144', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[float
+ ] = 1e-05', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 11', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'sliding_window': 'sliding_window: Optional[int
+ ] = None', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0'
+}, model_name='Ministral3Model', library='transformers', import_path='transformers.models.ministral3'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 32000', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 14336', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 8', 'head_dim': 'head_dim: Optional[int
+ ] = None', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 131072', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-06', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = None', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'sliding_window': 'sliding_window: Optional[int
+ ] = 4096', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0'
+}, model_name='MistralModel', library='transformers', import_path='transformers.models.mistral'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'text_config': 'text_config=None', 'image_token_index': 'image_token_index=10', 'projector_hidden_act': "projector_hidden_act='gelu'", 'vision_feature_layer': 'vision_feature_layer=-1', 'multimodal_projector_bias': 'multimodal_projector_bias=False', 'spatial_merge_size': 'spatial_merge_size=2'
+}, model_name='Mistral3Model', library='transformers', import_path='transformers.models.mistral3'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 32000', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 14336', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 8', 'head_dim': 'head_dim: Optional[int
+ ] = None', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 131072', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-05', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = None', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'sliding_window': 'sliding_window: Optional[int
+ ] = None', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'num_experts_per_tok': 'num_experts_per_tok: Optional[int
+ ] = 2', 'num_local_experts': 'num_local_experts: Optional[int
+ ] = 8', 'output_router_logits': 'output_router_logits: Optional[bool
+ ] = False', 'router_aux_loss_coef': 'router_aux_loss_coef: Optional[float
+ ] = 0.001', 'router_jitter_noise': 'router_jitter_noise: Optional[float
+ ] = 0.0', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None'
+}, model_name='MixtralModel', library='transformers', import_path='transformers.models.mixtral'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=1664', 'intermediate_size': 'intermediate_size=8192', 'num_hidden_layers': 'num_hidden_layers=48', 'num_attention_heads': 'num_attention_heads=16', 'num_key_value_groups': 'num_key_value_groups=1', 'num_channels': 'num_channels=3', 'image_size': 'image_size=336', 'patch_size': 'patch_size=14', 'hidden_act': "hidden_act='gelu'", 'layer_norm_eps': 'layer_norm_eps=1e-05', 'attention_dropout': 'attention_dropout=0.0', 'initializer_range': 'initializer_range=0.02', 'initializer_factor': 'initializer_factor=1.0'
+}, model_name='MLCDVisionModel', library='transformers', import_path='transformers.models.mlcd'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'text_config': 'text_config=None', 'image_token_index': 'image_token_index=128256'
+}, model_name='MllamaModel', library='transformers', import_path='transformers.models.mllama'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'backbone_config': 'backbone_config=None', 'backbone': 'backbone=None', 'use_pretrained_backbone': 'use_pretrained_backbone=False', 'use_timm_backbone': 'use_timm_backbone=False', 'backbone_kwargs': 'backbone_kwargs=None', 'text_config': 'text_config=None', 'num_queries': 'num_queries=900', 'encoder_layers': 'encoder_layers=6', 'encoder_ffn_dim': 'encoder_ffn_dim=2048', 'encoder_attention_heads': 'encoder_attention_heads=8', 'decoder_layers': 'decoder_layers=6', 'decoder_ffn_dim': 'decoder_ffn_dim=2048', 'decoder_attention_heads': 'decoder_attention_heads=8', 'is_encoder_decoder': 'is_encoder_decoder=True', 'activation_function': "activation_function='relu'", 'd_model': 'd_model=256', 'dropout': 'dropout=0.1', 'attention_dropout': 'attention_dropout=0.0', 'activation_dropout': 'activation_dropout=0.0', 'auxiliary_loss': 'auxiliary_loss=False', 'position_embedding_type': "position_embedding_type='sine'", 'num_feature_levels': 'num_feature_levels=4', 'encoder_n_points': 'encoder_n_points=4', 'decoder_n_points': 'decoder_n_points=4', 'two_stage': 'two_stage=True', 'class_cost': 'class_cost=1.0', 'bbox_cost': 'bbox_cost=5.0', 'giou_cost': 'giou_cost=2.0', 'bbox_loss_coefficient': 'bbox_loss_coefficient=5.0', 'giou_loss_coefficient': 'giou_loss_coefficient=2.0', 'focal_alpha': 'focal_alpha=0.25', 'disable_custom_kernels': 'disable_custom_kernels=False', 'max_text_len': 'max_text_len=256', 'text_enhancer_dropout': 'text_enhancer_dropout=0.0', 'fusion_droppath': 'fusion_droppath=0.1', 'fusion_dropout': 'fusion_dropout=0.0', 'embedding_init_target': 'embedding_init_target=True', 'query_dim': 'query_dim=4', 'positional_embedding_temperature': 'positional_embedding_temperature=20', 'init_std': 'init_std=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-05'
+}, model_name='MMGroundingDinoModel', library='transformers', import_path='transformers.models.mm_grounding_dino'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'do_lower_case': 'do_lower_case: bool = False', 'unk_token': "unk_token: str = '[UNK]'", 'sep_token': "sep_token: str = '[SEP]'", 'pad_token': "pad_token: str = '[PAD]'", 'cls_token': "cls_token: str = '[CLS]'", 'mask_token': "mask_token: str = '[MASK]'", 'tokenize_chinese_chars': 'tokenize_chinese_chars: bool = True', 'strip_accents': 'strip_accents: Optional[bool
+ ] = None'
+}, model_name='BertTokenizer', library='transformers', import_path='transformers.models.bert'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=30522', 'hidden_size': 'hidden_size=512', 'num_hidden_layers': 'num_hidden_layers=24', 'num_attention_heads': 'num_attention_heads=4', 'intermediate_size': 'intermediate_size=512', 'hidden_act': "hidden_act='relu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.0', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=2', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'pad_token_id': 'pad_token_id=0', 'embedding_size': 'embedding_size=128', 'trigram_input': 'trigram_input=True', 'use_bottleneck': 'use_bottleneck=True', 'intra_bottleneck_size': 'intra_bottleneck_size=128', 'use_bottleneck_attention': 'use_bottleneck_attention=False', 'key_query_shared_bottleneck': 'key_query_shared_bottleneck=True', 'num_feedforward_networks': 'num_feedforward_networks=4', 'normalization_type': "normalization_type='no_norm'", 'classifier_activation': 'classifier_activation=True', 'classifier_dropout': 'classifier_dropout=None'
+}, model_name='MobileBertModel', library='transformers', import_path='transformers.models.mobilebert'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'do_lower_case': 'do_lower_case: bool = False', 'unk_token': "unk_token: str = '[UNK]'", 'sep_token': "sep_token: str = '[SEP]'", 'pad_token': "pad_token: str = '[PAD]'", 'cls_token': "cls_token: str = '[CLS]'", 'mask_token': "mask_token: str = '[MASK]'", 'tokenize_chinese_chars': 'tokenize_chinese_chars: bool = True', 'strip_accents': 'strip_accents: Optional[bool
+ ] = None'
+}, model_name='BertTokenizer', library='transformers', import_path='transformers.models.bert'), ModelAttributes(model=, model_type='model', model_parameters={'num_channels': 'num_channels=3', 'image_size': 'image_size=224', 'depth_multiplier': 'depth_multiplier=1.0', 'min_depth': 'min_depth=8', 'hidden_act': "hidden_act='relu6'", 'tf_padding': 'tf_padding=True', 'classifier_dropout_prob': 'classifier_dropout_prob=0.999', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=0.001'
+}, model_name='MobileNetV1Model', library='transformers', import_path='transformers.models.mobilenet_v1'), ModelAttributes(model=, model_type='model', model_parameters={'num_channels': 'num_channels=3', 'image_size': 'image_size=224', 'depth_multiplier': 'depth_multiplier=1.0', 'depth_divisible_by': 'depth_divisible_by=8', 'min_depth': 'min_depth=8', 'expand_ratio': 'expand_ratio=6.0', 'output_stride': 'output_stride=32', 'first_layer_is_expansion': 'first_layer_is_expansion=True', 'finegrained_output': 'finegrained_output=True', 'hidden_act': "hidden_act='relu6'", 'tf_padding': 'tf_padding=True', 'classifier_dropout_prob': 'classifier_dropout_prob=0.8', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=0.001', 'semantic_loss_ignore_index': 'semantic_loss_ignore_index=255'
+}, model_name='MobileNetV2Model', library='transformers', import_path='transformers.models.mobilenet_v2'), ModelAttributes(model=, model_type='model', model_parameters={'num_channels': 'num_channels=3', 'image_size': 'image_size=256', 'patch_size': 'patch_size=2', 'hidden_sizes': 'hidden_sizes=[
+ 144,
+ 192,
+ 240
+ ]', 'neck_hidden_sizes': 'neck_hidden_sizes=[
+ 16,
+ 32,
+ 64,
+ 96,
+ 128,
+ 160,
+ 640
+ ]', 'num_attention_heads': 'num_attention_heads=4', 'mlp_ratio': 'mlp_ratio=2.0', 'expand_ratio': 'expand_ratio=4.0', 'hidden_act': "hidden_act='silu'", 'conv_kernel_size': 'conv_kernel_size=3', 'output_stride': 'output_stride=32', 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.0', 'classifier_dropout_prob': 'classifier_dropout_prob=0.1', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'qkv_bias': 'qkv_bias=True', 'aspp_out_channels': 'aspp_out_channels=256', 'atrous_rates': 'atrous_rates=[
+ 6,
+ 12,
+ 18
+ ]', 'aspp_dropout_prob': 'aspp_dropout_prob=0.1', 'semantic_loss_ignore_index': 'semantic_loss_ignore_index=255'
+}, model_name='MobileViTModel', library='transformers', import_path='transformers.models.mobilevit'), ModelAttributes(model=, model_type='model', model_parameters={'num_channels': 'num_channels=3', 'image_size': 'image_size=256', 'patch_size': 'patch_size=2', 'expand_ratio': 'expand_ratio=2.0', 'hidden_act': "hidden_act='swish'", 'conv_kernel_size': 'conv_kernel_size=3', 'output_stride': 'output_stride=32', 'classifier_dropout_prob': 'classifier_dropout_prob=0.1', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'aspp_out_channels': 'aspp_out_channels=512', 'atrous_rates': 'atrous_rates=[
+ 6,
+ 12,
+ 18
+ ]', 'aspp_dropout_prob': 'aspp_dropout_prob=0.1', 'semantic_loss_ignore_index': 'semantic_loss_ignore_index=255', 'n_attn_blocks': 'n_attn_blocks=[
+ 2,
+ 4,
+ 3
+ ]', 'base_attn_unit_dims': 'base_attn_unit_dims=[
+ 128,
+ 192,
+ 256
+ ]', 'width_multiplier': 'width_multiplier=1.0', 'ffn_multiplier': 'ffn_multiplier=2', 'attn_dropout': 'attn_dropout=0.0', 'ffn_dropout': 'ffn_dropout=0.0'
+}, model_name='MobileViTV2Model', library='transformers', import_path='transformers.models.mobilevitv2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 50368', 'hidden_size': 'hidden_size: Optional[int
+ ] = 768', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 1152', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 22', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 12', 'hidden_activation': "hidden_activation: Optional[str] = 'gelu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 8192', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'initializer_cutoff_factor': 'initializer_cutoff_factor: Optional[float
+ ] = 2.0', 'norm_eps': 'norm_eps: Optional[int
+ ] = 1e-05', 'norm_bias': 'norm_bias: Optional[bool
+ ] = False', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 50283', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 50282', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 50281', 'cls_token_id': 'cls_token_id: Optional[int
+ ] = 50281', 'sep_token_id': 'sep_token_id: Optional[int
+ ] = 50282', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'layer_types': 'layer_types: Optional[list[str
+ ]
+ ] = None', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'local_attention': 'local_attention: Optional[int
+ ] = 128', 'embedding_dropout': 'embedding_dropout: Optional[float
+ ] = 0.0', 'mlp_bias': 'mlp_bias: Optional[bool
+ ] = False', 'mlp_dropout': 'mlp_dropout: Optional[float
+ ] = 0.0', 'decoder_bias': 'decoder_bias: Optional[bool
+ ] = True', 'classifier_pooling': "classifier_pooling: Literal['cls', 'mean'] = 'cls'", 'classifier_dropout': 'classifier_dropout: Optional[float
+ ] = 0.0', 'classifier_bias': 'classifier_bias: Optional[bool
+ ] = False', 'classifier_activation': "classifier_activation: Optional[str] = 'gelu'", 'deterministic_flash_attn': 'deterministic_flash_attn: Optional[bool
+ ] = False', 'sparse_prediction': 'sparse_prediction: Optional[bool
+ ] = False', 'sparse_pred_ignore_index': 'sparse_pred_ignore_index: Optional[int
+ ] = -100', 'reference_compile': 'reference_compile: Optional[bool
+ ] = None', 'repad_logits_with_grad': 'repad_logits_with_grad: Optional[bool
+ ] = False'
+}, model_name='ModernBertModel', library='transformers', import_path='transformers.models.modernbert'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'args': '*args'
+}, model_name='TokenizersBackend', library='transformers', import_path='transformers'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 50368', 'hidden_size': 'hidden_size: Optional[int
+ ] = 768', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 1152', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 22', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 12', 'hidden_activation': "hidden_activation: Optional[str] = 'gelu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 8192', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'initializer_cutoff_factor': 'initializer_cutoff_factor: Optional[float
+ ] = 2.0', 'norm_eps': 'norm_eps: Optional[int
+ ] = 1e-05', 'norm_bias': 'norm_bias: Optional[bool
+ ] = False', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 50283', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 50282', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 50281', 'cls_token_id': 'cls_token_id: Optional[int
+ ] = 50281', 'sep_token_id': 'sep_token_id: Optional[int
+ ] = 50282', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'embedding_dropout': 'embedding_dropout: Optional[float
+ ] = 0.0', 'mlp_bias': 'mlp_bias: Optional[bool
+ ] = False', 'mlp_dropout': 'mlp_dropout: Optional[float
+ ] = 0.0', 'decoder_bias': 'decoder_bias: Optional[bool
+ ] = True', 'classifier_dropout': 'classifier_dropout: Optional[float
+ ] = 0.0', 'classifier_bias': 'classifier_bias: Optional[bool
+ ] = False', 'classifier_activation': "classifier_activation: Optional[str] = 'gelu'", 'local_attention': 'local_attention: Optional[int
+ ] = 128', 'global_attn_every_n_layers': 'global_attn_every_n_layers: Optional[int
+ ] = 3', 'layer_types': 'layer_types: Optional[list[str
+ ]
+ ] = None', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None'
+}, model_name='ModernBertDecoderModel', library='transformers', import_path='transformers.models.modernbert_decoder'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 32768', 'hidden_size': 'hidden_size: Optional[int
+ ] = 288', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 1152', 'encoder_num_hidden_layers': 'encoder_num_hidden_layers: Optional[int
+ ] = 6', 'decoder_num_hidden_layers': 'decoder_num_hidden_layers: Optional[int
+ ] = 6', 'encoder_num_attention_heads': 'encoder_num_attention_heads: Optional[int
+ ] = 8', 'decoder_num_attention_heads': 'decoder_num_attention_heads: Optional[int
+ ] = 8', 'encoder_num_key_value_heads': 'encoder_num_key_value_heads: Optional[int
+ ] = None', 'decoder_num_key_value_heads': 'decoder_num_key_value_heads: Optional[int
+ ] = None', 'pad_head_dim_to_multiple_of': 'pad_head_dim_to_multiple_of: Optional[int
+ ] = None', 'encoder_hidden_act': "encoder_hidden_act: Optional[str] = 'gelu'", 'decoder_hidden_act': "decoder_hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 512', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'decoder_start_token_id': 'decoder_start_token_id: Optional[int
+ ] = 1', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'is_encoder_decoder': 'is_encoder_decoder: Optional[bool
+ ] = True', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2'
+}, model_name='MoonshineModel', library='transformers', import_path='transformers.models.moonshine'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'args': '*args'
+}, model_name='TokenizersBackend', library='transformers', import_path='transformers'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 32000', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'audio_vocab_size': 'audio_vocab_size: Optional[int
+ ] = None', 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 3000', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'head_dim': 'head_dim: Optional[int
+ ] = None', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'sliding_window': 'sliding_window: Optional[int
+ ] = 3000', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'ffn_dim': 'ffn_dim: Optional[int
+ ] = 22528', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-08', 'num_codebooks': 'num_codebooks: Optional[int
+ ] = 8', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False'
+}, model_name='MoshiModel', library='transformers', import_path='transformers.models.moshi'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'args': '*args'
+}, model_name='TokenizersBackend', library='transformers', import_path='transformers'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=30527', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'relative_attention_num_buckets': 'relative_attention_num_buckets=32', 'pad_token_id': 'pad_token_id=1', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=2'
+}, model_name='MPNetModel', library='transformers', import_path='transformers.models.mpnet'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'do_lower_case': 'do_lower_case=True', 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'sep_token': "sep_token=''", 'cls_token': "cls_token=''", 'unk_token': "unk_token='[UNK]'", 'pad_token': "pad_token=''", 'mask_token': "mask_token=''", 'tokenize_chinese_chars': 'tokenize_chinese_chars=True', 'strip_accents': 'strip_accents=None'
+}, model_name='MPNetTokenizer', library='transformers', import_path='transformers.models.mpnet'), ModelAttributes(model=, model_type='model', model_parameters={'d_model': 'd_model: int = 2048', 'n_heads': 'n_heads: int = 16', 'n_layers': 'n_layers: int = 24', 'expansion_ratio': 'expansion_ratio: int = 4', 'max_seq_len': 'max_seq_len: int = 2048', 'vocab_size': 'vocab_size: int = 50368', 'resid_pdrop': 'resid_pdrop: float = 0.0', 'layer_norm_epsilon': 'layer_norm_epsilon: float = 1e-05', 'emb_pdrop': 'emb_pdrop: float = 0.0', 'learned_pos_emb': 'learned_pos_emb: bool = True', 'attn_config': 'attn_config: transformers.models.mpt.configuration_mpt.MptAttentionConfig = None', 'init_device': "init_device: str = 'cpu'", 'logit_scale': 'logit_scale: Union[float, str, NoneType
+ ] = None', 'no_bias': 'no_bias: bool = True', 'verbose': 'verbose: int = 0', 'embedding_fraction': 'embedding_fraction: float = 1.0', 'norm_type': "norm_type: str = 'low_precision_layernorm'", 'initializer_range': 'initializer_range=0.02'
+}, model_name='MptModel', library='transformers', import_path='transformers.models.mpt'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': "bos_token: str = '<|endoftext|>'", 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|padding|>'", 'add_prefix_space': 'add_prefix_space: bool = False', 'trim_offsets': 'trim_offsets: bool = True'
+}, model_name='GPTNeoXTokenizer', library='transformers', import_path='transformers.models.gpt_neox'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=50265', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=1', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'block_per_row': 'block_per_row=4', 'approx_mode': "approx_mode='full'", 'initial_prior_first_n_blocks': 'initial_prior_first_n_blocks=0', 'initial_prior_diagonal_n_blocks': 'initial_prior_diagonal_n_blocks=0', 'pad_token_id': 'pad_token_id=1', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=2'
+}, model_name='MraModel', library='transformers', import_path='transformers.models.mra'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = ''", 'sep_token': "sep_token: str = ''", 'cls_token': "cls_token: str = ''", 'unk_token': "unk_token: str = ''", 'pad_token': "pad_token: str = ''", 'mask_token': "mask_token: str = ''", 'add_prefix_space': 'add_prefix_space: bool = False', 'trim_offsets': 'trim_offsets: bool = True'
+}, model_name='RobertaTokenizer', library='transformers', import_path='transformers.models.roberta'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=250112', 'd_model': 'd_model=512', 'd_kv': 'd_kv=64', 'd_ff': 'd_ff=1024', 'num_layers': 'num_layers=8', 'num_decoder_layers': 'num_decoder_layers=None', 'num_heads': 'num_heads=6', 'relative_attention_num_buckets': 'relative_attention_num_buckets=32', 'relative_attention_max_distance': 'relative_attention_max_distance=128', 'dropout_rate': 'dropout_rate=0.1', 'layer_norm_epsilon': 'layer_norm_epsilon=1e-06', 'initializer_factor': 'initializer_factor=1.0', 'feed_forward_proj': "feed_forward_proj='gated-gelu'", 'is_encoder_decoder': 'is_encoder_decoder=True', 'tokenizer_class': "tokenizer_class='T5Tokenizer'", 'tie_word_embeddings': 'tie_word_embeddings=False', 'pad_token_id': 'pad_token_id=0', 'eos_token_id': 'eos_token_id=1', 'decoder_start_token_id': 'decoder_start_token_id=0', 'classifier_dropout': 'classifier_dropout=0.0'
+}, model_name='MT5Model', library='transformers', import_path='transformers.models.mt5'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, list[tuple[str, float
+ ]
+ ], NoneType
+ ] = None', 'eos_token': "eos_token=''", 'unk_token': "unk_token=''", 'pad_token': "pad_token=''", 'extra_ids': 'extra_ids=100', 'additional_special_tokens': 'additional_special_tokens=None'
+}, model_name='T5Tokenizer', library='transformers', import_path='transformers.models.t5'), ModelAttributes(model=, model_type='model', model_parameters={'text_encoder': 'text_encoder', 'audio_encoder': 'audio_encoder', 'decoder': 'decoder'
+}, model_name='MusicgenModel', library='transformers', import_path='transformers.models.musicgen'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, list[tuple[str, float
+ ]
+ ], NoneType
+ ] = None', 'eos_token': "eos_token=''", 'unk_token': "unk_token=''", 'pad_token': "pad_token=''", 'extra_ids': 'extra_ids=100', 'additional_special_tokens': 'additional_special_tokens=None'
+}, model_name='T5Tokenizer', library='transformers', import_path='transformers.models.t5'), ModelAttributes(model=, model_type='model', model_parameters={'text_encoder': 'text_encoder', 'audio_encoder': 'audio_encoder', 'decoder': 'decoder', 'num_chroma': 'num_chroma=12', 'chroma_length': 'chroma_length=235'
+}, model_name='MusicgenMelodyModel', library='transformers', import_path='transformers.models.musicgen_melody'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, list[tuple[str, float
+ ]
+ ], NoneType
+ ] = None', 'eos_token': "eos_token=''", 'unk_token': "unk_token=''", 'pad_token': "pad_token=''", 'extra_ids': 'extra_ids=100', 'additional_special_tokens': 'additional_special_tokens=None'
+}, model_name='T5Tokenizer', library='transformers', import_path='transformers.models.t5'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=50267', 'max_position_embeddings': 'max_position_embeddings=1024', 'encoder_layers': 'encoder_layers=12', 'encoder_ffn_dim': 'encoder_ffn_dim=4096', 'encoder_attention_heads': 'encoder_attention_heads=16', 'decoder_layers': 'decoder_layers=12', 'decoder_ffn_dim': 'decoder_ffn_dim=4096', 'decoder_attention_heads': 'decoder_attention_heads=16', 'encoder_layerdrop': 'encoder_layerdrop=0.0', 'decoder_layerdrop': 'decoder_layerdrop=0.0', 'activation_function': "activation_function='gelu'", 'd_model': 'd_model=1024', 'dropout': 'dropout=0.1', 'attention_dropout': 'attention_dropout=0.0', 'activation_dropout': 'activation_dropout=0.0', 'init_std': 'init_std=0.02', 'classifier_dropout': 'classifier_dropout=0.0', 'scale_embedding': 'scale_embedding=False', 'pad_token_id': 'pad_token_id=1', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=2', 'is_encoder_decoder': 'is_encoder_decoder=True', 'decoder_start_token_id': 'decoder_start_token_id=2', 'use_prompt': 'use_prompt=False', 'prompt_length': 'prompt_length=100', 'prompt_mid_dim': 'prompt_mid_dim=800'
+}, model_name='MvpModel', library='transformers', import_path='transformers.models.mvp'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = ''", 'sep_token': "sep_token: str = ''", 'cls_token': "cls_token: str = ''", 'unk_token': "unk_token: str = ''", 'pad_token': "pad_token: str = ''", 'mask_token': "mask_token: str = ''", 'add_prefix_space': 'add_prefix_space: bool = False', 'trim_offsets': 'trim_offsets: bool = True'
+}, model_name='RobertaTokenizer', library='transformers', import_path='transformers.models.roberta'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: int = 50304', 'hidden_size': 'hidden_size: int = 768', 'intermediate_size': 'intermediate_size: int | None = 8192', 'num_hidden_layers': 'num_hidden_layers: int = 12', 'num_attention_heads': 'num_attention_heads: int = 6', 'num_key_value_heads': 'num_key_value_heads: int | None = None', 'max_position_embeddings': 'max_position_embeddings: int = 2048', 'hidden_act': "hidden_act: str = 'relu2'", 'attention_dropout': 'attention_dropout: float = 0.0', 'rms_norm_eps': 'rms_norm_eps: float = 1e-06', 'initializer_range': 'initializer_range: float = 0.02', 'rope_parameters': 'rope_parameters: transformers.modeling_rope_utils.RopeParameters | dict | None = None', 'final_logit_softcapping': 'final_logit_softcapping: float | None = 15.0', 'attention_bias': 'attention_bias: bool = False', 'bos_token_id': 'bos_token_id: int = 0', 'eos_token_id': 'eos_token_id: int = 1', 'pad_token_id': 'pad_token_id: int = 1', 'tie_word_embeddings': 'tie_word_embeddings: bool = False'
+}, model_name='NanoChatModel', library='transformers', import_path='transformers.models.nanochat'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 256000', 'hidden_size': 'hidden_size: Optional[int
+ ] = 6144', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 24576', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 48', 'head_dim': 'head_dim: Optional[int
+ ] = None', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'hidden_act': "hidden_act: Optional[str] = 'relu2'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 4096', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.0134', 'norm_eps': 'norm_eps: Optional[int
+ ] = 1e-05', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = None', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 2', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 3', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'mlp_bias': 'mlp_bias: Optional[bool
+ ] = False'
+}, model_name='NemotronModel', library='transformers', import_path='transformers.models.nemotron'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'args': '*args'
+}, model_name='TokenizersBackend', library='transformers', import_path='transformers'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=128112', 'max_position_embeddings': 'max_position_embeddings=1024', 'encoder_layers': 'encoder_layers=12', 'encoder_ffn_dim': 'encoder_ffn_dim=4096', 'encoder_attention_heads': 'encoder_attention_heads=16', 'decoder_layers': 'decoder_layers=12', 'decoder_ffn_dim': 'decoder_ffn_dim=4096', 'decoder_attention_heads': 'decoder_attention_heads=16', 'encoder_layerdrop': 'encoder_layerdrop=0.05', 'decoder_layerdrop': 'decoder_layerdrop=0.05', 'is_encoder_decoder': 'is_encoder_decoder=True', 'activation_function': "activation_function='relu'", 'd_model': 'd_model=1024', 'dropout': 'dropout=0.1', 'attention_dropout': 'attention_dropout=0.1', 'activation_dropout': 'activation_dropout=0.0', 'init_std': 'init_std=0.02', 'decoder_start_token_id': 'decoder_start_token_id=2', 'scale_embedding': 'scale_embedding=True', 'router_bias': 'router_bias=False', 'router_dtype': "router_dtype='float32'", 'router_ignore_padding_tokens': 'router_ignore_padding_tokens=False', 'num_experts': 'num_experts=128', 'expert_capacity': 'expert_capacity=64', 'encoder_sparse_step': 'encoder_sparse_step=4', 'decoder_sparse_step': 'decoder_sparse_step=4', 'router_z_loss_coef': 'router_z_loss_coef=0.001', 'router_aux_loss_coef': 'router_aux_loss_coef=0.001', 'second_expert_policy': "second_expert_policy='all'", 'normalize_router_prob_before_dropping': 'normalize_router_prob_before_dropping=False', 'batch_prioritized_routing': 'batch_prioritized_routing=False', 'moe_eval_capacity_token_fraction': 'moe_eval_capacity_token_fraction=1.0', 'moe_token_dropout': 'moe_token_dropout=0.2', 'pad_token_id': 'pad_token_id=1', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=2', 'output_router_logits': 'output_router_logits=False'
+}, model_name='NllbMoeModel', library='transformers', import_path='transformers.models.nllb_moe'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'sep_token': "sep_token=''", 'cls_token': "cls_token=''", 'unk_token': "unk_token=''", 'pad_token': "pad_token=''", 'mask_token': "mask_token=''", 'src_lang': 'src_lang=None', 'tgt_lang': 'tgt_lang=None', 'additional_special_tokens': 'additional_special_tokens=None', 'legacy_behaviour': 'legacy_behaviour=False'
+}, model_name='NllbTokenizer', library='transformers', import_path='transformers.models.nllb'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=30000', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu_new'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=510', 'type_vocab_size': 'type_vocab_size=2', 'segment_means_seq_len': 'segment_means_seq_len=64', 'num_landmarks': 'num_landmarks=64', 'conv_kernel_size': 'conv_kernel_size=65', 'inv_coeff_init_option': 'inv_coeff_init_option=False', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'pad_token_id': 'pad_token_id=1', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=2'
+}, model_name='NystromformerModel', library='transformers', import_path='transformers.models.nystromformer'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, list[tuple[str, float
+ ]
+ ], NoneType
+ ] = None', 'do_lower_case': 'do_lower_case: bool = True', 'keep_accents': 'keep_accents: bool = False', 'bos_token': "bos_token: str = '[CLS]'", 'eos_token': "eos_token: str = '[SEP]'", 'unk_token': "unk_token: str = ''", 'sep_token': "sep_token: str = '[SEP]'", 'pad_token': "pad_token: str = ''", 'cls_token': "cls_token: str = '[CLS]'", 'mask_token': "mask_token: str = '[MASK]'", 'add_prefix_space': 'add_prefix_space: bool = True', 'trim_offsets': 'trim_offsets: bool = True'
+}, model_name='AlbertTokenizer', library='transformers', import_path='transformers.models.albert'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 50304', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 11008', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 2048', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 1', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = None', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 50279', 'tie_word_embeddings': 'tie_word_embeddings: Optional[int
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'clip_qkv': 'clip_qkv: Optional[bool
+ ] = None'
+}, model_name='OlmoModel', library='transformers', import_path='transformers.models.olmo'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': "bos_token: str = '<|endoftext|>'", 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|padding|>'", 'add_prefix_space': 'add_prefix_space: bool = False', 'trim_offsets': 'trim_offsets: bool = True'
+}, model_name='GPTNeoXTokenizer', library='transformers', import_path='transformers.models.gpt_neox'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 50304', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 11008', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 2048', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 1', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = None', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 50279', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-05'
+}, model_name='Olmo2Model', library='transformers', import_path='transformers.models.olmo2'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': "bos_token: str = '<|endoftext|>'", 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|padding|>'", 'add_prefix_space': 'add_prefix_space: bool = False', 'trim_offsets': 'trim_offsets: bool = True'
+}, model_name='GPTNeoXTokenizer', library='transformers', import_path='transformers.models.gpt_neox'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 50304', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 11008', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 2048', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 1', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = None', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 50279', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'rms_norm_eps': 'rms_norm_eps: Optional[float
+ ] = 1e-05', 'sliding_window': 'sliding_window: Optional[int
+ ] = 4096', 'layer_types': 'layer_types: Optional[list[str
+ ]
+ ] = None'
+}, model_name='Olmo3Model', library='transformers', import_path='transformers.models.olmo3'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'bos_token': "bos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'eos_token': "eos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'pad_token': 'pad_token: Union[tokenizers.AddedToken, str, NoneType
+ ] = None', 'add_prefix_space': 'add_prefix_space=False'
+}, model_name='GPT2Tokenizer', library='transformers', import_path='transformers.models.gpt2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 50304', 'hidden_size': 'hidden_size: Optional[int
+ ] = 2048', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 2048', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 16', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 16', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 4096', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-05', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 1', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = None', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 50279', 'tie_word_embeddings': 'tie_word_embeddings: Optional[int
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'clip_qkv': 'clip_qkv: Optional[bool
+ ] = None', 'num_experts_per_tok': 'num_experts_per_tok: Optional[int
+ ] = 8', 'num_experts': 'num_experts: Optional[int
+ ] = 64', 'output_router_logits': 'output_router_logits: Optional[bool
+ ] = False', 'router_aux_loss_coef': 'router_aux_loss_coef: Optional[float
+ ] = 0.01', 'norm_topk_prob': 'norm_topk_prob: Optional[bool
+ ] = False'
+}, model_name='OlmoeModel', library='transformers', import_path='transformers.models.olmoe'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': "bos_token: str = '<|endoftext|>'", 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|padding|>'", 'add_prefix_space': 'add_prefix_space: bool = False', 'trim_offsets': 'trim_offsets: bool = True'
+}, model_name='GPTNeoXTokenizer', library='transformers', import_path='transformers.models.gpt_neox'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'backbone_config': 'backbone_config=None', 'use_timm_backbone': 'use_timm_backbone=True', 'backbone': "backbone='swin_tiny_patch4_window7_224'", 'backbone_kwargs': 'backbone_kwargs=None', 'use_pretrained_backbone': 'use_pretrained_backbone=False', 'apply_layernorm_after_vision_backbone': 'apply_layernorm_after_vision_backbone=True', 'image_size': 'image_size=640', 'disable_custom_kernels': 'disable_custom_kernels=False', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'batch_norm_eps': 'batch_norm_eps=1e-05', 'init_std': 'init_std=0.02', 'text_projection_in_dim': 'text_projection_in_dim=512', 'text_projection_out_dim': 'text_projection_out_dim=512', 'task_encoder_hidden_dim': 'task_encoder_hidden_dim=1024', 'class_embed_dim': 'class_embed_dim=512', 'class_distance_type': "class_distance_type='cosine'", 'num_queries': 'num_queries=900', 'csp_activation': "csp_activation='silu'", 'conv_norm_activation': "conv_norm_activation='gelu'", 'encoder_feedforward_activation': "encoder_feedforward_activation='relu'", 'encoder_feedforward_dropout': 'encoder_feedforward_dropout=0.0', 'encoder_dropout': 'encoder_dropout=0.0', 'hidden_expansion': 'hidden_expansion=1', 'vision_features_channels': 'vision_features_channels=[
+ 256,
+ 256,
+ 256
+ ]', 'encoder_hidden_dim': 'encoder_hidden_dim=256', 'encoder_in_channels': 'encoder_in_channels=[
+ 192,
+ 384,
+ 768
+ ]', 'encoder_projection_indices': 'encoder_projection_indices=[
+ 2
+ ]', 'encoder_attention_heads': 'encoder_attention_heads=8', 'encoder_dim_feedforward': 'encoder_dim_feedforward=2048', 'encoder_layers': 'encoder_layers=1', 'positional_encoding_temperature': 'positional_encoding_temperature=10000', 'num_feature_levels': 'num_feature_levels=3', 'decoder_hidden_dim': 'decoder_hidden_dim=256', 'decoder_num_heads': 'decoder_num_heads=8', 'decoder_num_layers': 'decoder_num_layers=6', 'decoder_activation': "decoder_activation='relu'", 'decoder_dim_feedforward': 'decoder_dim_feedforward=2048', 'decoder_num_points': 'decoder_num_points=4', 'decoder_dropout': 'decoder_dropout=0.0', 'eval_size': 'eval_size=None', 'learn_initial_query': 'learn_initial_query=False', 'cache_size': 'cache_size=100', 'is_encoder_decoder': 'is_encoder_decoder=True'
+}, model_name='OmDetTurboForObjectDetection', library='transformers', import_path='transformers.models.omdet_turbo'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': "bos_token: str = '<|startoftext|>'", 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|endoftext|>'"
+}, model_name='CLIPTokenizer', library='transformers', import_path='transformers.models.clip'), ModelAttributes(model=, model_type='model', model_parameters={'backbone_config': 'backbone_config: Union[dict, transformers.configuration_utils.PreTrainedConfig, NoneType
+ ] = None', 'backbone': 'backbone: Optional[str
+ ] = None', 'use_pretrained_backbone': 'use_pretrained_backbone: bool = False', 'use_timm_backbone': 'use_timm_backbone: bool = False', 'backbone_kwargs': 'backbone_kwargs: Optional[dict
+ ] = None', 'ignore_value': 'ignore_value: int = 255', 'num_queries': 'num_queries: int = 150', 'no_object_weight': 'no_object_weight: int = 0.1', 'class_weight': 'class_weight: float = 2.0', 'mask_weight': 'mask_weight: float = 5.0', 'dice_weight': 'dice_weight: float = 5.0', 'contrastive_weight': 'contrastive_weight: float = 0.5', 'contrastive_temperature': 'contrastive_temperature: float = 0.07', 'train_num_points': 'train_num_points: int = 12544', 'oversample_ratio': 'oversample_ratio: float = 3.0', 'importance_sample_ratio': 'importance_sample_ratio: float = 0.75', 'init_std': 'init_std: float = 0.02', 'init_xavier_std': 'init_xavier_std: float = 1.0', 'layer_norm_eps': 'layer_norm_eps: float = 1e-05', 'is_training': 'is_training: bool = False', 'use_auxiliary_loss': 'use_auxiliary_loss: bool = True', 'output_auxiliary_logits': 'output_auxiliary_logits: bool = True', 'strides': 'strides: Optional[list
+ ] = [
+ 4,
+ 8,
+ 16,
+ 32
+ ]', 'task_seq_len': 'task_seq_len: int = 77', 'text_encoder_width': 'text_encoder_width: int = 256', 'text_encoder_context_length': 'text_encoder_context_length: int = 77', 'text_encoder_num_layers': 'text_encoder_num_layers: int = 6', 'text_encoder_vocab_size': 'text_encoder_vocab_size: int = 49408', 'text_encoder_proj_layers': 'text_encoder_proj_layers: int = 2', 'text_encoder_n_ctx': 'text_encoder_n_ctx: int = 16', 'conv_dim': 'conv_dim: int = 256', 'mask_dim': 'mask_dim: int = 256', 'hidden_dim': 'hidden_dim: int = 256', 'encoder_feedforward_dim': 'encoder_feedforward_dim: int = 1024', 'norm': "norm: str = 'GN'", 'encoder_layers': 'encoder_layers: int = 6', 'decoder_layers': 'decoder_layers: int = 10', 'use_task_norm': 'use_task_norm: bool = True', 'num_attention_heads': 'num_attention_heads: int = 8', 'dropout': 'dropout: float = 0.1', 'dim_feedforward': 'dim_feedforward: int = 2048', 'pre_norm': 'pre_norm: bool = False', 'enforce_input_proj': 'enforce_input_proj: bool = False', 'query_dec_layers': 'query_dec_layers: int = 2', 'common_stride': 'common_stride: int = 4'
+}, model_name='OneFormerModel', library='transformers', import_path='transformers.models.oneformer'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': "bos_token: str = '<|startoftext|>'", 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|endoftext|>'"
+}, model_name='CLIPTokenizer', library='transformers', import_path='transformers.models.clip'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=40478', 'n_positions': 'n_positions=512', 'n_embd': 'n_embd=768', 'n_layer': 'n_layer=12', 'n_head': 'n_head=12', 'afn': "afn='gelu'", 'resid_pdrop': 'resid_pdrop=0.1', 'embd_pdrop': 'embd_pdrop=0.1', 'attn_pdrop': 'attn_pdrop=0.1', 'layer_norm_epsilon': 'layer_norm_epsilon=1e-05', 'initializer_range': 'initializer_range=0.02', 'summary_type': "summary_type='cls_index'", 'summary_use_proj': 'summary_use_proj=True', 'summary_activation': 'summary_activation=None', 'summary_proj_to_labels': 'summary_proj_to_labels=True', 'summary_first_dropout': 'summary_first_dropout=0.1'
+}, model_name='OpenAIGPTModel', library='transformers', import_path='transformers.models.openai'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'unk_token': "unk_token: str = ''"
+}, model_name='OpenAIGPTTokenizer', library='transformers', import_path='transformers.models.openai'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=50272', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'ffn_dim': 'ffn_dim=3072', 'max_position_embeddings': 'max_position_embeddings=2048', 'do_layer_norm_before': 'do_layer_norm_before=True', '_remove_final_layer_norm': '_remove_final_layer_norm=False', 'word_embed_proj_dim': 'word_embed_proj_dim=None', 'dropout': 'dropout=0.1', 'attention_dropout': 'attention_dropout=0.0', 'num_attention_heads': 'num_attention_heads=12', 'activation_function': "activation_function='relu'", 'layerdrop': 'layerdrop=0.0', 'init_std': 'init_std=0.02', 'pad_token_id': 'pad_token_id=1', 'bos_token_id': 'bos_token_id=2', 'eos_token_id': 'eos_token_id=2', 'enable_bias': 'enable_bias=True', 'layer_norm_elementwise_affine': 'layer_norm_elementwise_affine=True'
+}, model_name='OPTModel', library='transformers', import_path='transformers.models.opt'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'bos_token': "bos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'eos_token': "eos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'pad_token': 'pad_token: Union[tokenizers.AddedToken, str, NoneType
+ ] = None', 'add_prefix_space': 'add_prefix_space=False'
+}, model_name='GPT2Tokenizer', library='transformers', import_path='transformers.models.gpt2'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'text_config': 'text_config=None', 'image_token_id': 'image_token_id=151665', 'visual_indicator_token_ids': 'visual_indicator_token_ids=[
+ 151666,
+ 151667,
+ 151668,
+ 151669,
+ 151670
+ ]', 'vocab_size': 'vocab_size=151643', 'hidden_size': 'hidden_size=1536'
+}, model_name='Ovis2Model', library='transformers', import_path='transformers.models.ovis2'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'vocab_file': 'vocab_file=None', 'merges_file': 'merges_file=None', 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': 'bos_token=None', 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|endoftext|>'", 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='Qwen2Tokenizer', library='transformers', import_path='transformers.models.qwen2'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'vision_config': 'vision_config=None', 'projection_dim': 'projection_dim=512', 'logit_scale_init_value': 'logit_scale_init_value=2.6592', 'return_dict': 'return_dict=True'
+}, model_name='Owlv2Model', library='transformers', import_path='transformers.models.owlv2'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': "bos_token: str = '<|startoftext|>'", 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|endoftext|>'"
+}, model_name='CLIPTokenizer', library='transformers', import_path='transformers.models.clip'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'vision_config': 'vision_config=None', 'projection_dim': 'projection_dim=512', 'logit_scale_init_value': 'logit_scale_init_value=2.6592', 'return_dict': 'return_dict=True'
+}, model_name='OwlViTModel', library='transformers', import_path='transformers.models.owlvit'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': "bos_token: str = '<|startoftext|>'", 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|endoftext|>'"
+}, model_name='CLIPTokenizer', library='transformers', import_path='transformers.models.clip'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'text_config': 'text_config=None', 'image_token_index': 'image_token_index=256000', 'vocab_size': 'vocab_size=257152', 'projection_dim': 'projection_dim=2048', 'hidden_size': 'hidden_size=2048'
+}, model_name='PaliGemmaModel', library='transformers', import_path='transformers.models.paligemma'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=1025', 'ctc_loss_reduction': "ctc_loss_reduction='mean'", 'ctc_zero_infinity': 'ctc_zero_infinity=True', 'encoder_config': 'encoder_config: Union[dict, transformers.models.parakeet.configuration_parakeet.ParakeetEncoderConfig
+ ] = None', 'pad_token_id': 'pad_token_id=1024'
+}, model_name='ParakeetForCTC', library='transformers', import_path='transformers.models.parakeet'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=1024', 'num_hidden_layers': 'num_hidden_layers=24', 'num_attention_heads': 'num_attention_heads=8', 'intermediate_size': 'intermediate_size=4096', 'hidden_act': "hidden_act='silu'", 'attention_bias': 'attention_bias=True', 'convolution_bias': 'convolution_bias=True', 'conv_kernel_size': 'conv_kernel_size=9', 'subsampling_factor': 'subsampling_factor=8', 'subsampling_conv_channels': 'subsampling_conv_channels=256', 'num_mel_bins': 'num_mel_bins=80', 'subsampling_conv_kernel_size': 'subsampling_conv_kernel_size=3', 'subsampling_conv_stride': 'subsampling_conv_stride=2', 'dropout': 'dropout=0.1', 'dropout_positions': 'dropout_positions=0.0', 'layerdrop': 'layerdrop=0.1', 'activation_dropout': 'activation_dropout=0.1', 'attention_dropout': 'attention_dropout=0.1', 'max_position_embeddings': 'max_position_embeddings=5000', 'scale_input': 'scale_input=True', 'initializer_range': 'initializer_range=0.02'
+}, model_name='ParakeetEncoder', library='transformers', import_path='transformers.models.parakeet'), ModelAttributes(model=, model_type='model', model_parameters={'context_length': 'context_length: int = 32', 'patch_length': 'patch_length: int = 8', 'num_input_channels': 'num_input_channels: int = 1', 'patch_stride': 'patch_stride: int = 8', 'num_parallel_samples': 'num_parallel_samples: int = 100', 'd_model': 'd_model: int = 8', 'expansion_factor': 'expansion_factor: int = 2', 'num_layers': 'num_layers: int = 3', 'dropout': 'dropout: float = 0.2', 'mode': "mode: str = 'common_channel'", 'gated_attn': 'gated_attn: bool = True', 'norm_mlp': "norm_mlp: str = 'LayerNorm'", 'self_attn': 'self_attn: bool = False', 'self_attn_heads': 'self_attn_heads: int = 1', 'use_positional_encoding': 'use_positional_encoding: bool = False', 'positional_encoding_type': "positional_encoding_type: str = 'sincos'", 'scaling': "scaling: Union[str, bool, NoneType] = 'std'", 'loss': "loss: str = 'mse'", 'init_std': 'init_std: float = 0.02', 'post_init': 'post_init: bool = False', 'norm_eps': 'norm_eps: float = 1e-05', 'mask_type': "mask_type: str = 'random'", 'random_mask_ratio': 'random_mask_ratio: float = 0.5', 'num_forecast_mask_patches': 'num_forecast_mask_patches: Union[int, list[int
+ ], NoneType
+ ] = [
+ 2
+ ]', 'mask_value': 'mask_value: int = 0', 'masked_loss': 'masked_loss: bool = True', 'channel_consistent_masking': 'channel_consistent_masking: bool = True', 'unmasked_channel_indices': 'unmasked_channel_indices: Optional[list[int
+ ]
+ ] = None', 'head_dropout': 'head_dropout: float = 0.2', 'distribution_output': "distribution_output: str = 'student_t'", 'prediction_length': 'prediction_length: int = 16', 'prediction_channel_indices': 'prediction_channel_indices: Optional[list
+ ] = None', 'num_targets': 'num_targets: int = 3', 'output_range': 'output_range: Optional[list
+ ] = None', 'head_aggregation': "head_aggregation: str = 'max_pool'"
+}, model_name='PatchTSMixerModel', library='transformers', import_path='transformers.models.patchtsmixer'), ModelAttributes(model=, model_type='model', model_parameters={'num_input_channels': 'num_input_channels: int = 1', 'context_length': 'context_length: int = 32', 'distribution_output': "distribution_output: str = 'student_t'", 'loss': "loss: str = 'mse'", 'patch_length': 'patch_length: int = 1', 'patch_stride': 'patch_stride: int = 1', 'num_hidden_layers': 'num_hidden_layers: int = 3', 'd_model': 'd_model: int = 128', 'num_attention_heads': 'num_attention_heads: int = 4', 'share_embedding': 'share_embedding: bool = True', 'channel_attention': 'channel_attention: bool = False', 'ffn_dim': 'ffn_dim: int = 512', 'norm_type': "norm_type: str = 'batchnorm'", 'norm_eps': 'norm_eps: float = 1e-05', 'attention_dropout': 'attention_dropout: float = 0.0', 'positional_dropout': 'positional_dropout: float = 0.0', 'path_dropout': 'path_dropout: float = 0.0', 'ff_dropout': 'ff_dropout: float = 0.0', 'bias': 'bias: bool = True', 'activation_function': "activation_function: str = 'gelu'", 'pre_norm': 'pre_norm: bool = True', 'positional_encoding_type': "positional_encoding_type: str = 'sincos'", 'use_cls_token': 'use_cls_token: bool = False', 'init_std': 'init_std: float = 0.02', 'share_projection': 'share_projection: bool = True', 'scaling': "scaling: Union[str, bool, NoneType] = 'std'", 'do_mask_input': 'do_mask_input: Optional[bool
+ ] = None', 'mask_type': "mask_type: str = 'random'", 'random_mask_ratio': 'random_mask_ratio: float = 0.5', 'num_forecast_mask_patches': 'num_forecast_mask_patches: Union[int, list[int
+ ], NoneType
+ ] = [
+ 2
+ ]', 'channel_consistent_masking': 'channel_consistent_masking: Optional[bool
+ ] = False', 'unmasked_channel_indices': 'unmasked_channel_indices: Optional[list[int
+ ]
+ ] = None', 'mask_value': 'mask_value: int = 0', 'pooling_type': "pooling_type: str = 'mean'", 'head_dropout': 'head_dropout: float = 0.0', 'prediction_length': 'prediction_length: int = 24', 'num_targets': 'num_targets: int = 1', 'output_range': 'output_range: Optional[list
+ ] = None', 'num_parallel_samples': 'num_parallel_samples: int = 100'
+}, model_name='PatchTSTModel', library='transformers', import_path='transformers.models.patchtst'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'audio_config': 'audio_config=None'
+}, model_name='PeAudioModel', library='transformers', import_path='transformers.models.pe_audio'), ModelAttributes(model=, model_type='model', model_parameters={'dac_config': 'dac_config: Union[dict, transformers.configuration_utils.PreTrainedConfig, NoneType
+ ] = None', 'hidden_size': 'hidden_size: Optional[int
+ ] = 1792', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 4800', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 6', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 14', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'head_dim': 'head_dim: Optional[int
+ ] = 128', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 10000', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[float
+ ] = 1e-05', 'rope_parameters': "rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict, NoneType] = {'rope_theta': 20000}", 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0'
+}, model_name='PeAudioEncoder', library='transformers', import_path='transformers.models.pe_audio'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'audio_video_config': 'audio_video_config=None'
+}, model_name='PeAudioVideoModel', library='transformers', import_path='transformers.models.pe_audio_video'), ModelAttributes(model=, model_type='model', model_parameters={'audio_config': 'audio_config: Union[dict, transformers.configuration_utils.PreTrainedConfig, NoneType
+ ] = None', 'video_config': 'video_config: Union[dict, transformers.configuration_utils.PreTrainedConfig, NoneType
+ ] = None', 'hidden_size': 'hidden_size: Optional[int
+ ] = 1792', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 4800', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 6', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 14', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'head_dim': 'head_dim: Optional[int
+ ] = 128', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 10000', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[float
+ ] = 1e-05', 'rope_parameters': "rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict, NoneType] = {'rope_theta': 20000}", 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0'
+}, model_name='PeAudioVideoEncoder', library='transformers', import_path='transformers.models.pe_audio_video'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'video_config': 'video_config=None'
+}, model_name='PeVideoModel', library='transformers', import_path='transformers.models.pe_video'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config: Union[dict, transformers.configuration_utils.PreTrainedConfig, NoneType
+ ] = None', 'hidden_size': 'hidden_size: Optional[int
+ ] = 1792', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 4800', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 6', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 14', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'head_dim': 'head_dim: Optional[int
+ ] = 128', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 10000', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[float
+ ] = 1e-05', 'rope_parameters': "rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict, NoneType] = {'rope_theta': 20000}", 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0'
+}, model_name='PeVideoEncoder', library='transformers', import_path='transformers.models.pe_video'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=50265', 'max_position_embeddings': 'max_position_embeddings=1024', 'encoder_layers': 'encoder_layers=12', 'encoder_ffn_dim': 'encoder_ffn_dim=4096', 'encoder_attention_heads': 'encoder_attention_heads=16', 'decoder_layers': 'decoder_layers=12', 'decoder_ffn_dim': 'decoder_ffn_dim=4096', 'decoder_attention_heads': 'decoder_attention_heads=16', 'encoder_layerdrop': 'encoder_layerdrop=0.0', 'decoder_layerdrop': 'decoder_layerdrop=0.0', 'is_encoder_decoder': 'is_encoder_decoder=True', 'activation_function': "activation_function='gelu'", 'd_model': 'd_model=1024', 'dropout': 'dropout=0.1', 'attention_dropout': 'attention_dropout=0.0', 'activation_dropout': 'activation_dropout=0.0', 'init_std': 'init_std=0.02', 'decoder_start_token_id': 'decoder_start_token_id=0', 'scale_embedding': 'scale_embedding=False', 'pad_token_id': 'pad_token_id=0', 'eos_token_id': 'eos_token_id=1', 'forced_eos_token_id': 'forced_eos_token_id=1'
+}, model_name='PegasusModel', library='transformers', import_path='transformers.models.pegasus'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, list[tuple[str, float
+ ]
+ ], NoneType
+ ] = None', 'pad_token': "pad_token=''", 'eos_token': "eos_token=''", 'unk_token': "unk_token=''", 'mask_token': "mask_token=''", 'mask_token_sent': "mask_token_sent=''", 'additional_special_tokens': 'additional_special_tokens=None', 'offset': 'offset=103'
+}, model_name='PegasusTokenizer', library='transformers', import_path='transformers.models.pegasus'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=96103', 'max_position_embeddings': 'max_position_embeddings=16384', 'encoder_layers': 'encoder_layers=16', 'encoder_ffn_dim': 'encoder_ffn_dim=4096', 'encoder_attention_heads': 'encoder_attention_heads=16', 'decoder_layers': 'decoder_layers=16', 'decoder_ffn_dim': 'decoder_ffn_dim=4096', 'decoder_attention_heads': 'decoder_attention_heads=16', 'encoder_layerdrop': 'encoder_layerdrop=0.0', 'decoder_layerdrop': 'decoder_layerdrop=0.0', 'is_encoder_decoder': 'is_encoder_decoder=True', 'activation_function': "activation_function='gelu'", 'd_model': 'd_model=1024', 'dropout': 'dropout=0.1', 'attention_dropout': 'attention_dropout=0.0', 'activation_dropout': 'activation_dropout=0.0', 'init_std': 'init_std=0.02', 'decoder_start_token_id': 'decoder_start_token_id=0', 'scale_embedding': 'scale_embedding=True', 'pad_token_id': 'pad_token_id=0', 'eos_token_id': 'eos_token_id=1', 'forced_eos_token_id': 'forced_eos_token_id=1', 'num_global_tokens': 'num_global_tokens=32', 'block_size': 'block_size=512', 'stagger_local_blocks': 'stagger_local_blocks=True'
+}, model_name='PegasusXModel', library='transformers', import_path='transformers.models.pegasus_x'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, list[tuple[str, float
+ ]
+ ], NoneType
+ ] = None', 'pad_token': "pad_token=''", 'eos_token': "eos_token=''", 'unk_token': "unk_token=''", 'mask_token': "mask_token=''", 'mask_token_sent': "mask_token_sent=''", 'additional_special_tokens': 'additional_special_tokens=None', 'offset': 'offset=103'
+}, model_name='PegasusTokenizer', library='transformers', import_path='transformers.models.pegasus'), ModelAttributes(model=, model_type='model', model_parameters={'num_latents': 'num_latents=256', 'd_latents': 'd_latents=1280', 'd_model': 'd_model=768', 'num_blocks': 'num_blocks=1', 'num_self_attends_per_block': 'num_self_attends_per_block=26', 'num_self_attention_heads': 'num_self_attention_heads=8', 'num_cross_attention_heads': 'num_cross_attention_heads=8', 'qk_channels': 'qk_channels=None', 'v_channels': 'v_channels=None', 'cross_attention_shape_for_attention': "cross_attention_shape_for_attention='kv'", 'self_attention_widening_factor': 'self_attention_widening_factor=1', 'cross_attention_widening_factor': 'cross_attention_widening_factor=1', 'hidden_act': "hidden_act='gelu'", 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'use_query_residual': 'use_query_residual=True', 'vocab_size': 'vocab_size=262', 'max_position_embeddings': 'max_position_embeddings=2048', 'image_size': 'image_size=56', 'train_size': 'train_size=[
+ 368,
+ 496
+ ]', 'num_frames': 'num_frames=16', 'audio_samples_per_frame': 'audio_samples_per_frame=1920', 'samples_per_patch': 'samples_per_patch=16', 'output_shape': 'output_shape=[
+ 1,
+ 16,
+ 224,
+ 224
+ ]', 'output_num_channels': 'output_num_channels=512', '_label_trainable_num_channels': '_label_trainable_num_channels=1024'
+}, model_name='PerceiverModel', library='transformers', import_path='transformers.models.perceiver'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'pad_token': "pad_token='[PAD]'", 'bos_token': "bos_token='[BOS]'", 'eos_token': "eos_token='[EOS]'", 'mask_token': "mask_token='[MASK]'", 'cls_token': "cls_token='[CLS]'", 'sep_token': "sep_token='[SEP]'", 'model_max_length': 'model_max_length=2048'
+}, model_name='PerceiverTokenizer', library='transformers', import_path='transformers.models.perceiver'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'text_config': 'text_config=None', 'vision_use_cls_token': 'vision_use_cls_token=True', 'projector_pooling_ratio': 'projector_pooling_ratio=1', 'image_token_id': 'image_token_id=128002', 'video_token_id': 'video_token_id=128003'
+}, model_name='PerceptionLMModel', library='transformers', import_path='transformers.models.perception_lm'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 262144', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 16384', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 36', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 64', 'hidden_act': "hidden_act: Optional[str] = 'relu2'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 16384', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'layer_norm_eps': 'layer_norm_eps: Optional[int
+ ] = 1e-05', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'qk_layernorm': 'qk_layernorm: Optional[bool
+ ] = True', 'hidden_dropout': 'hidden_dropout: Optional[float
+ ] = 0.0', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = None', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2'
+}, model_name='PersimmonModel', library='transformers', import_path='transformers.models.persimmon'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 51200', 'hidden_size': 'hidden_size: Optional[int
+ ] = 2048', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 8192', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 24', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'resid_pdrop': 'resid_pdrop: Optional[float
+ ] = 0.0', 'embd_pdrop': 'embd_pdrop: Optional[float
+ ] = 0.0', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'hidden_act': "hidden_act: Optional[str] = 'gelu_new'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 2048', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'layer_norm_eps': 'layer_norm_eps: Optional[int
+ ] = 1e-05', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'qk_layernorm': 'qk_layernorm: Optional[bool
+ ] = False', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2'
+}, model_name='PhiModel', library='transformers', import_path='transformers.models.phi'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'bos_token': "bos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'eos_token': "eos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'pad_token': 'pad_token: Union[tokenizers.AddedToken, str, NoneType
+ ] = None', 'add_prefix_space': 'add_prefix_space=False'
+}, model_name='GPT2Tokenizer', library='transformers', import_path='transformers.models.gpt2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 32064', 'hidden_size': 'hidden_size: Optional[int
+ ] = 3072', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 8192', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'resid_pdrop': 'resid_pdrop: Optional[float
+ ] = 0.0', 'embd_pdrop': 'embd_pdrop: Optional[float
+ ] = 0.0', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 4096', 'original_max_position_embeddings': 'original_max_position_embeddings: Optional[int
+ ] = 4096', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-05', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 32000', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 32000', 'sliding_window': 'sliding_window: Optional[int
+ ] = None'
+}, model_name='Phi3Model', library='transformers', import_path='transformers.models.phi3'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 200064', 'hidden_size': 'hidden_size: Optional[int
+ ] = 3072', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 8192', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 8', 'resid_pdrop': 'resid_pdrop: Optional[float
+ ] = 0.0', 'embd_pdrop': 'embd_pdrop: Optional[float
+ ] = 0.0', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 131072', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-05', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 199999', 'eos_token_id': 'eos_token_id: Optional[list[int
+ ]
+ ] = [
+ 199999,
+ 200020
+ ]', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 199999', 'original_max_position_embeddings': 'original_max_position_embeddings: Optional[int
+ ] = 4096', 'sliding_window': 'sliding_window: Optional[int
+ ] = None', 'vision_config': 'vision_config: Optional[dict
+ ] = None', 'audio_config': 'audio_config: Optional[dict
+ ] = None'
+}, model_name='Phi4MultimodalModel', library='transformers', import_path='transformers.models.phi4_multimodal'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 32064', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 6400', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 8', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 131072', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-05', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = None', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2', 'tie_word_embeddings': 'tie_word_embeddings: Optional[int
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'sliding_window': 'sliding_window: Optional[int
+ ] = None', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'num_experts_per_tok': 'num_experts_per_tok: Optional[int
+ ] = 2', 'num_local_experts': 'num_local_experts: Optional[int
+ ] = 16', 'output_router_logits': 'output_router_logits: Optional[bool
+ ] = False', 'router_aux_loss_coef': 'router_aux_loss_coef: Optional[float
+ ] = 0.001', 'router_jitter_noise': 'router_jitter_noise: Optional[float
+ ] = 0.01', 'input_jitter_noise': 'input_jitter_noise: Optional[float
+ ] = 0.0', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'lm_head_bias': 'lm_head_bias: Optional[bool
+ ] = False'
+}, model_name='PhimoeModel', library='transformers', import_path='transformers.models.phimoe'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=1280', 'num_hidden_layers': 'num_hidden_layers=32', 'num_attention_heads': 'num_attention_heads=16', 'mlp_ratio': 'mlp_ratio=4', 'n_cls_tokens': 'n_cls_tokens=8', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.0', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.0', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-06', 'image_size': 'image_size=256', 'patch_size': 'patch_size=16', 'num_channels': 'num_channels=3', 'qkv_bias': 'qkv_bias=True', 'drop_path_rate': 'drop_path_rate=0.0', 'out_features': 'out_features=None', 'out_indices': 'out_indices=None', 'apply_layernorm': 'apply_layernorm=True', 'reshape_hidden_states': 'reshape_hidden_states=True'
+}, model_name='PixioModel', library='transformers', import_path='transformers.models.pixio'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size: Optional[int
+ ] = 1024', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 4096', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 24', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 16', 'num_channels': 'num_channels: Optional[int
+ ] = 3', 'image_size': 'image_size: Optional[int
+ ] = 1024', 'patch_size': 'patch_size: Optional[int
+ ] = 16', 'hidden_act': "hidden_act: Optional[str] = 'gelu'", 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02'
+}, model_name='PixtralVisionModel', library='transformers', import_path='transformers.models.pixtral'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'args': '*args'
+}, model_name='TokenizersBackend', library='transformers', import_path='transformers'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=50005', 'max_position_embeddings': 'max_position_embeddings=1024', 'encoder_layers': 'encoder_layers=6', 'encoder_ffn_dim': 'encoder_ffn_dim=3072', 'encoder_attention_heads': 'encoder_attention_heads=12', 'decoder_layers': 'decoder_layers=6', 'decoder_ffn_dim': 'decoder_ffn_dim=3072', 'decoder_attention_heads': 'decoder_attention_heads=12', 'encoder_layerdrop': 'encoder_layerdrop=0.0', 'decoder_layerdrop': 'decoder_layerdrop=0.0', 'is_encoder_decoder': 'is_encoder_decoder=True', 'activation_function': "activation_function='gelu'", 'd_model': 'd_model=768', 'dropout': 'dropout=0.1', 'attention_dropout': 'attention_dropout=0.1', 'activation_dropout': 'activation_dropout=0.0', 'init_std': 'init_std=0.02', 'classifier_dropout': 'classifier_dropout=0.0', 'scale_embedding': 'scale_embedding=True', 'pad_token_id': 'pad_token_id=1', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=2', 'forced_eos_token_id': 'forced_eos_token_id=2'
+}, model_name='PLBartModel', library='transformers', import_path='transformers.models.plbart'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab_file': 'vocab_file', 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'sep_token': "sep_token=''", 'cls_token': "cls_token=''", 'unk_token': "unk_token=''", 'pad_token': "pad_token=''", 'mask_token': "mask_token=''", 'language_codes': "language_codes='base'", 'src_lang': 'src_lang=None', 'tgt_lang': 'tgt_lang=None', 'sp_model_kwargs': 'sp_model_kwargs: Optional[dict[str, Any
+ ]
+ ] = None', 'additional_special_tokens': 'additional_special_tokens=None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=True'
+}, model_name='PLBartTokenizer', library='transformers', import_path='transformers.models.plbart'), ModelAttributes(model=, model_type='model', model_parameters={'num_channels': 'num_channels=3', 'patch_size': 'patch_size=16', 'stride': 'stride=16', 'pool_size': 'pool_size=3', 'mlp_ratio': 'mlp_ratio=4.0', 'depths': 'depths=[
+ 2,
+ 2,
+ 6,
+ 2
+ ]', 'hidden_sizes': 'hidden_sizes=[
+ 64,
+ 128,
+ 320,
+ 512
+ ]', 'patch_sizes': 'patch_sizes=[
+ 7,
+ 3,
+ 3,
+ 3
+ ]', 'strides': 'strides=[
+ 4,
+ 2,
+ 2,
+ 2
+ ]', 'padding': 'padding=[
+ 2,
+ 1,
+ 1,
+ 1
+ ]', 'num_encoder_blocks': 'num_encoder_blocks=4', 'drop_path_rate': 'drop_path_rate=0.0', 'hidden_act': "hidden_act='gelu'", 'use_layer_scale': 'use_layer_scale=True', 'layer_scale_init_value': 'layer_scale_init_value=1e-05', 'initializer_range': 'initializer_range=0.02'
+}, model_name='PoolFormerModel', library='transformers', import_path='transformers.models.poolformer'), ModelAttributes(model=, model_type='model', model_parameters={'activation_dropout': 'activation_dropout: Optional[float
+ ] = 0.1', 'activation_function': "activation_function: Union[str, collections.abc.Callable, NoneType] = 'gelu'", 'vocab_size': 'vocab_size: Optional[int
+ ] = 30522', 'hidden_size': 'hidden_size: Optional[int
+ ] = 1024', 'encoder_ffn_dim': 'encoder_ffn_dim: Optional[int
+ ] = 4096', 'num_encoder_layers': 'num_encoder_layers: Optional[int
+ ] = 12', 'num_encoder_attention_heads': 'num_encoder_attention_heads: Optional[int
+ ] = 16', 'decoder_ffn_dim': 'decoder_ffn_dim: Optional[int
+ ] = 4096', 'num_decoder_layers': 'num_decoder_layers: Optional[int
+ ] = 12', 'num_decoder_attention_heads': 'num_decoder_attention_heads: Optional[int
+ ] = 16', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.1', 'dropout': 'dropout: Optional[float
+ ] = 0.1', 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 512', 'init_std': 'init_std: Optional[float
+ ] = 0.02', 'is_encoder_decoder': 'is_encoder_decoder: Optional[bool
+ ] = True', 'add_cross_attention': 'add_cross_attention: Optional[bool
+ ] = True', 'decoder_start_token_id': 'decoder_start_token_id: Optional[int
+ ] = 0', 'ngram': 'ngram: Optional[int
+ ] = 2', 'num_buckets': 'num_buckets: Optional[int
+ ] = 32', 'relative_max_distance': 'relative_max_distance: Optional[int
+ ] = 128', 'disable_ngram_loss': 'disable_ngram_loss: Optional[bool
+ ] = False', 'eps': 'eps: Optional[float
+ ] = 0.0', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 0', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 1', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2'
+}, model_name='ProphetNetModel', library='transformers', import_path='transformers.models.prophetnet'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab_file': 'vocab_file: str', 'do_lower_case': 'do_lower_case: Optional[bool
+ ] = True', 'do_basic_tokenize': 'do_basic_tokenize: Optional[bool
+ ] = True', 'never_split': 'never_split: Optional[collections.abc.Iterable
+ ] = None', 'unk_token': "unk_token: Optional[str] = '[UNK]'", 'sep_token': "sep_token: Optional[str] = '[SEP]'", 'x_sep_token': "x_sep_token: Optional[str] = '[X_SEP]'", 'pad_token': "pad_token: Optional[str] = '[PAD]'", 'mask_token': "mask_token: Optional[str] = '[MASK]'", 'tokenize_chinese_chars': 'tokenize_chinese_chars: Optional[bool
+ ] = True', 'strip_accents': 'strip_accents: Optional[bool
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces: bool = True'
+}, model_name='ProphetNetTokenizer', library='transformers', import_path='transformers.models.prophetnet'), ModelAttributes(model=, model_type='model', model_parameters={'image_size': 'image_size: int = 224', 'num_channels': 'num_channels: int = 3', 'num_encoder_blocks': 'num_encoder_blocks: int = 4', 'depths': 'depths: list[int
+ ] = [
+ 2,
+ 2,
+ 2,
+ 2
+ ]', 'sequence_reduction_ratios': 'sequence_reduction_ratios: list[int
+ ] = [
+ 8,
+ 4,
+ 2,
+ 1
+ ]', 'hidden_sizes': 'hidden_sizes: list[int
+ ] = [
+ 64,
+ 128,
+ 320,
+ 512
+ ]', 'patch_sizes': 'patch_sizes: list[int
+ ] = [
+ 4,
+ 2,
+ 2,
+ 2
+ ]', 'strides': 'strides: list[int
+ ] = [
+ 4,
+ 2,
+ 2,
+ 2
+ ]', 'num_attention_heads': 'num_attention_heads: list[int
+ ] = [
+ 1,
+ 2,
+ 5,
+ 8
+ ]', 'mlp_ratios': 'mlp_ratios: list[int
+ ] = [
+ 8,
+ 8,
+ 4,
+ 4
+ ]', 'hidden_act': "hidden_act: collections.abc.Mapping[str, collections.abc.Callable] = 'gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob: float = 0.0', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob: float = 0.0', 'initializer_range': 'initializer_range: float = 0.02', 'drop_path_rate': 'drop_path_rate: float = 0.0', 'layer_norm_eps': 'layer_norm_eps: float = 1e-06', 'qkv_bias': 'qkv_bias: bool = True', 'num_labels': 'num_labels: int = 1000'
+}, model_name='PvtModel', library='transformers', import_path='transformers.models.pvt'), ModelAttributes(model=, model_type='model', model_parameters={'image_size': 'image_size: Union[int, tuple[int, int
+ ]
+ ] = 224', 'num_channels': 'num_channels: int = 3', 'num_encoder_blocks': 'num_encoder_blocks: int = 4', 'depths': 'depths: list[int
+ ] = [
+ 2,
+ 2,
+ 2,
+ 2
+ ]', 'sr_ratios': 'sr_ratios: list[int
+ ] = [
+ 8,
+ 4,
+ 2,
+ 1
+ ]', 'hidden_sizes': 'hidden_sizes: list[int
+ ] = [
+ 32,
+ 64,
+ 160,
+ 256
+ ]', 'patch_sizes': 'patch_sizes: list[int
+ ] = [
+ 7,
+ 3,
+ 3,
+ 3
+ ]', 'strides': 'strides: list[int
+ ] = [
+ 4,
+ 2,
+ 2,
+ 2
+ ]', 'num_attention_heads': 'num_attention_heads: list[int
+ ] = [
+ 1,
+ 2,
+ 5,
+ 8
+ ]', 'mlp_ratios': 'mlp_ratios: list[int
+ ] = [
+ 8,
+ 8,
+ 4,
+ 4
+ ]', 'hidden_act': "hidden_act: Union[str, collections.abc.Callable] = 'gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob: float = 0.0', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob: float = 0.0', 'initializer_range': 'initializer_range: float = 0.02', 'drop_path_rate': 'drop_path_rate: float = 0.0', 'layer_norm_eps': 'layer_norm_eps: float = 1e-06', 'qkv_bias': 'qkv_bias: bool = True', 'linear_attention': 'linear_attention: bool = False', 'out_features': 'out_features=None', 'out_indices': 'out_indices=None'
+}, model_name='PvtV2Model', library='transformers', import_path='transformers.models.pvt_v2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 151936', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 22016', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 32', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 32768', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-06', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'use_sliding_window': 'use_sliding_window: Optional[bool
+ ] = False', 'sliding_window': 'sliding_window: Optional[int
+ ] = 4096', 'max_window_layers': 'max_window_layers: Optional[int
+ ] = 28', 'layer_types': 'layer_types: Optional[list[str
+ ]
+ ] = None', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0'
+}, model_name='Qwen2Model', library='transformers', import_path='transformers.models.qwen2'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'vocab_file': 'vocab_file=None', 'merges_file': 'merges_file=None', 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': 'bos_token=None', 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|endoftext|>'", 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='Qwen2Tokenizer', library='transformers', import_path='transformers.models.qwen2'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'vision_config': 'vision_config=None', 'image_token_id': 'image_token_id=151655', 'video_token_id': 'video_token_id=151656', 'vision_start_token_id': 'vision_start_token_id=151652', 'vision_end_token_id': 'vision_end_token_id=151653'
+}, model_name='Qwen2_5_VLModel', library='transformers', import_path='transformers.models.qwen2_5_vl'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'vocab_file': 'vocab_file=None', 'merges_file': 'merges_file=None', 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': 'bos_token=None', 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|endoftext|>'", 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='Qwen2Tokenizer', library='transformers', import_path='transformers.models.qwen2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 152064', 'hidden_size': 'hidden_size: Optional[int
+ ] = 8192', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 29568', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 80', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 64', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 8', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 32768', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-05', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'use_sliding_window': 'use_sliding_window: Optional[bool
+ ] = False', 'sliding_window': 'sliding_window: Optional[int
+ ] = 4096', 'max_window_layers': 'max_window_layers: Optional[int
+ ] = 80', 'layer_types': 'layer_types: Optional[list[str
+ ]
+ ] = None', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 151643', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 151645', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = None'
+}, model_name='Qwen2_5_VLTextModel', library='transformers', import_path='transformers.models.qwen2_5_vl'), ModelAttributes(model=, model_type='model', model_parameters={'num_mel_bins': 'num_mel_bins=128', 'encoder_layers': 'encoder_layers=32', 'encoder_attention_heads': 'encoder_attention_heads=20', 'encoder_ffn_dim': 'encoder_ffn_dim=5120', 'encoder_layerdrop': 'encoder_layerdrop=0.0', 'd_model': 'd_model=1280', 'dropout': 'dropout=0.0', 'attention_dropout': 'attention_dropout=0.0', 'activation_function': "activation_function='gelu'", 'activation_dropout': 'activation_dropout=0.0', 'scale_embedding': 'scale_embedding=False', 'initializer_range': 'initializer_range=0.02', 'max_source_positions': 'max_source_positions=1500'
+}, model_name='Qwen2AudioEncoder', library='transformers', import_path='transformers.models.qwen2_audio'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 151936', 'hidden_size': 'hidden_size: Optional[int
+ ] = 2048', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 5632', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 24', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 16', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 16', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 32768', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-06', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'use_sliding_window': 'use_sliding_window: Optional[bool
+ ] = False', 'sliding_window': 'sliding_window: Optional[int
+ ] = 4096', 'max_window_layers': 'max_window_layers: Optional[int
+ ] = 28', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'decoder_sparse_step': 'decoder_sparse_step: Optional[int
+ ] = 1', 'moe_intermediate_size': 'moe_intermediate_size: Optional[int
+ ] = 1408', 'shared_expert_intermediate_size': 'shared_expert_intermediate_size: Optional[int
+ ] = 5632', 'num_experts_per_tok': 'num_experts_per_tok: Optional[int
+ ] = 4', 'num_experts': 'num_experts: Optional[int
+ ] = 60', 'norm_topk_prob': 'norm_topk_prob: Optional[bool
+ ] = False', 'output_router_logits': 'output_router_logits: Optional[bool
+ ] = False', 'router_aux_loss_coef': 'router_aux_loss_coef: Optional[float
+ ] = 0.001', 'mlp_only_layers': 'mlp_only_layers: Optional[bool
+ ] = None', 'qkv_bias': 'qkv_bias: Optional[bool
+ ] = True', 'layer_types': 'layer_types: Optional[list[str
+ ]
+ ] = None'
+}, model_name='Qwen2MoeModel', library='transformers', import_path='transformers.models.qwen2_moe'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'vocab_file': 'vocab_file=None', 'merges_file': 'merges_file=None', 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': 'bos_token=None', 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|endoftext|>'", 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='Qwen2Tokenizer', library='transformers', import_path='transformers.models.qwen2'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'vision_config': 'vision_config=None', 'image_token_id': 'image_token_id=151655', 'video_token_id': 'video_token_id=151656', 'vision_start_token_id': 'vision_start_token_id=151652', 'vision_end_token_id': 'vision_end_token_id=151653'
+}, model_name='Qwen2VLModel', library='transformers', import_path='transformers.models.qwen2_vl'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'vocab_file': 'vocab_file=None', 'merges_file': 'merges_file=None', 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': 'bos_token=None', 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|endoftext|>'", 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='Qwen2Tokenizer', library='transformers', import_path='transformers.models.qwen2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 152064', 'hidden_size': 'hidden_size: Optional[int
+ ] = 8192', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 29568', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 80', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 64', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 8', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 32768', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-05', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'use_sliding_window': 'use_sliding_window: Optional[bool
+ ] = False', 'sliding_window': 'sliding_window: Optional[int
+ ] = 4096', 'max_window_layers': 'max_window_layers: Optional[int
+ ] = 80', 'layer_types': 'layer_types: Optional[list[str
+ ]
+ ] = None', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 151643', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 151645', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = None'
+}, model_name='Qwen2VLTextModel', library='transformers', import_path='transformers.models.qwen2_vl'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 151936', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 22016', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 32', 'head_dim': 'head_dim: Optional[int
+ ] = 128', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 32768', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-06', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'use_sliding_window': 'use_sliding_window: Optional[bool
+ ] = False', 'sliding_window': 'sliding_window: Optional[int
+ ] = 4096', 'max_window_layers': 'max_window_layers: Optional[int
+ ] = 28', 'layer_types': 'layer_types: Optional[list[str
+ ]
+ ] = None', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0'
+}, model_name='Qwen3Model', library='transformers', import_path='transformers.models.qwen3'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'vocab_file': 'vocab_file=None', 'merges_file': 'merges_file=None', 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': 'bos_token=None', 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|endoftext|>'", 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='Qwen2Tokenizer', library='transformers', import_path='transformers.models.qwen2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 151936', 'hidden_size': 'hidden_size: Optional[int
+ ] = 2048', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 6144', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 24', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 4', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 32768', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-06', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'use_sliding_window': 'use_sliding_window: Optional[bool
+ ] = False', 'sliding_window': 'sliding_window: Optional[int
+ ] = 4096', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'decoder_sparse_step': 'decoder_sparse_step: Optional[int
+ ] = 1', 'moe_intermediate_size': 'moe_intermediate_size: Optional[int
+ ] = 768', 'num_experts_per_tok': 'num_experts_per_tok: Optional[int
+ ] = 8', 'num_experts': 'num_experts: Optional[int
+ ] = 128', 'norm_topk_prob': 'norm_topk_prob: Optional[bool
+ ] = False', 'output_router_logits': 'output_router_logits: Optional[bool
+ ] = False', 'router_aux_loss_coef': 'router_aux_loss_coef: Optional[float
+ ] = 0.001', 'mlp_only_layers': 'mlp_only_layers: Optional[bool
+ ] = None'
+}, model_name='Qwen3MoeModel', library='transformers', import_path='transformers.models.qwen3_moe'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'vocab_file': 'vocab_file=None', 'merges_file': 'merges_file=None', 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': 'bos_token=None', 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|endoftext|>'", 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='Qwen2Tokenizer', library='transformers', import_path='transformers.models.qwen2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 151936', 'hidden_size': 'hidden_size: Optional[int
+ ] = 2048', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 5632', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 48', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 16', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 2', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 32768', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[float
+ ] = 1e-06', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'head_dim': 'head_dim: Optional[int
+ ] = 256', 'linear_conv_kernel_dim': 'linear_conv_kernel_dim: Optional[int
+ ] = 4', 'linear_key_head_dim': 'linear_key_head_dim: Optional[int
+ ] = 128', 'linear_value_head_dim': 'linear_value_head_dim: Optional[int
+ ] = 128', 'linear_num_key_heads': 'linear_num_key_heads: Optional[int
+ ] = 16', 'linear_num_value_heads': 'linear_num_value_heads: Optional[int
+ ] = 32', 'decoder_sparse_step': 'decoder_sparse_step: Optional[int
+ ] = 1', 'moe_intermediate_size': 'moe_intermediate_size: Optional[int
+ ] = 512', 'shared_expert_intermediate_size': 'shared_expert_intermediate_size: Optional[int
+ ] = 512', 'num_experts_per_tok': 'num_experts_per_tok: Optional[int
+ ] = 10', 'num_experts': 'num_experts: Optional[int
+ ] = 512', 'norm_topk_prob': 'norm_topk_prob: Optional[bool
+ ] = True', 'output_router_logits': 'output_router_logits: Optional[bool
+ ] = False', 'router_aux_loss_coef': 'router_aux_loss_coef: Optional[float
+ ] = 0.001', 'mlp_only_layers': 'mlp_only_layers: Optional[list[int
+ ]
+ ] = []', 'layer_types': 'layer_types: Optional[list[str
+ ]
+ ] = None'
+}, model_name='Qwen3NextModel', library='transformers', import_path='transformers.models.qwen3_next'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'vocab_file': 'vocab_file=None', 'merges_file': 'merges_file=None', 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': 'bos_token=None', 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|endoftext|>'", 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='Qwen2Tokenizer', library='transformers', import_path='transformers.models.qwen2'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'vision_config': 'vision_config=None', 'image_token_id': 'image_token_id=151655', 'video_token_id': 'video_token_id=151656', 'vision_start_token_id': 'vision_start_token_id=151652', 'vision_end_token_id': 'vision_end_token_id=151653', 'tie_word_embeddings': 'tie_word_embeddings=False'
+}, model_name='Qwen3VLModel', library='transformers', import_path='transformers.models.qwen3_vl'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'vocab_file': 'vocab_file=None', 'merges_file': 'merges_file=None', 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': 'bos_token=None', 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|endoftext|>'", 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='Qwen2Tokenizer', library='transformers', import_path='transformers.models.qwen2'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'vision_config': 'vision_config=None', 'image_token_id': 'image_token_id=151655', 'video_token_id': 'video_token_id=151656', 'vision_start_token_id': 'vision_start_token_id=151652', 'vision_end_token_id': 'vision_end_token_id=151653', 'tie_word_embeddings': 'tie_word_embeddings=False'
+}, model_name='Qwen3VLMoeModel', library='transformers', import_path='transformers.models.qwen3_vl_moe'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'vocab_file': 'vocab_file=None', 'merges_file': 'merges_file=None', 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': 'bos_token=None', 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|endoftext|>'", 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='Qwen2Tokenizer', library='transformers', import_path='transformers.models.qwen2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 151936', 'hidden_size': 'hidden_size: Optional[int
+ ] = 2048', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 5632', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 24', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 16', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 16', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 128000', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[float
+ ] = 1e-06', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'decoder_sparse_step': 'decoder_sparse_step: Optional[int
+ ] = 1', 'moe_intermediate_size': 'moe_intermediate_size: Optional[int
+ ] = 1408', 'num_experts_per_tok': 'num_experts_per_tok: Optional[int
+ ] = 4', 'num_experts': 'num_experts: Optional[int
+ ] = 60', 'mlp_only_layers': 'mlp_only_layers: Optional[list[int
+ ]
+ ] = None', 'rope_parameters': 'rope_parameters: Optional[transformers.modeling_rope_utils.RopeParameters
+ ] = None', 'head_dim': 'head_dim: Optional[int
+ ] = None'
+}, model_name='Qwen3VLMoeTextModel', library='transformers', import_path='transformers.models.qwen3_vl_moe'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 151936', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 22016', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 32', 'head_dim': 'head_dim: Optional[int
+ ] = 128', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 128000', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[float
+ ] = 1e-06', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0'
+}, model_name='Qwen3VLTextModel', library='transformers', import_path='transformers.models.qwen3_vl'), ModelAttributes(model=, model_type='model', model_parameters={'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 26', 'vocab_size': 'vocab_size: Optional[int
+ ] = 256000', 'hidden_size': 'hidden_size: Optional[int
+ ] = 2560', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 7680', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 10', 'lru_width': 'lru_width: Optional[int
+ ] = None', 'attention_window_size': 'attention_window_size: Optional[int
+ ] = 2048', 'conv1d_width': 'conv1d_width: Optional[int
+ ] = 4', 'logits_soft_cap': 'logits_soft_cap: Optional[float
+ ] = 30.0', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-06', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 0', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 1', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 2', 'hidden_activation': "hidden_activation: Optional[str] = 'gelu_pytorch_tanh'", 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'block_types': "block_types: Optional[list[str]] = ('recurrent', 'recurrent', 'attention')", 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = None', 'attention_bias': 'attention_bias: Optional[str
+ ] = False', 'w_init_variance_scale': 'w_init_variance_scale: Optional[float
+ ] = 0.01'
+}, model_name='RecurrentGemmaModel', library='transformers', import_path='transformers.models.recurrent_gemma'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'unk_token': "unk_token: str = ''", 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = ''", 'pad_token': "pad_token: str = ''", 'mask_token': "mask_token: str = ''"
+}, model_name='GemmaTokenizer', library='transformers', import_path='transformers.models.gemma'), ModelAttributes(model=, model_type='model', model_parameters={'attention_head_size': 'attention_head_size=64', 'attn_layers': "attn_layers=['local', 'lsh', 'local', 'lsh', 'local', 'lsh']", 'axial_norm_std': 'axial_norm_std=1.0', 'axial_pos_embds': 'axial_pos_embds=True', 'axial_pos_shape': 'axial_pos_shape=[
+ 64,
+ 64
+ ]', 'axial_pos_embds_dim': 'axial_pos_embds_dim=[
+ 64,
+ 192
+ ]', 'chunk_size_lm_head': 'chunk_size_lm_head=0', 'eos_token_id': 'eos_token_id=2', 'feed_forward_size': 'feed_forward_size=512', 'hash_seed': 'hash_seed=None', 'hidden_act': "hidden_act='relu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.05', 'hidden_size': 'hidden_size=256', 'initializer_range': 'initializer_range=0.02', 'is_decoder': 'is_decoder=False', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'local_num_chunks_before': 'local_num_chunks_before=1', 'local_num_chunks_after': 'local_num_chunks_after=0', 'local_attention_probs_dropout_prob': 'local_attention_probs_dropout_prob=0.05', 'local_attn_chunk_length': 'local_attn_chunk_length=64', 'lsh_attn_chunk_length': 'lsh_attn_chunk_length=64', 'lsh_attention_probs_dropout_prob': 'lsh_attention_probs_dropout_prob=0.0', 'lsh_num_chunks_before': 'lsh_num_chunks_before=1', 'lsh_num_chunks_after': 'lsh_num_chunks_after=0', 'max_position_embeddings': 'max_position_embeddings=4096', 'num_attention_heads': 'num_attention_heads=12', 'num_buckets': 'num_buckets=None', 'num_hashes': 'num_hashes=1', 'pad_token_id': 'pad_token_id=0', 'vocab_size': 'vocab_size=320', 'tie_word_embeddings': 'tie_word_embeddings=False', 'classifier_dropout': 'classifier_dropout=None'
+}, model_name='ReformerModel', library='transformers', import_path='transformers.models.reformer'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'eos_token': "eos_token: str = ''", 'unk_token': "unk_token: str = ''", 'additional_special_tokens': 'additional_special_tokens: Optional[list
+ ] = None'
+}, model_name='ReformerTokenizer', library='transformers', import_path='transformers.models.reformer'), ModelAttributes(model=, model_type='model', model_parameters={'num_channels': 'num_channels=3', 'embedding_size': 'embedding_size=32', 'hidden_sizes': 'hidden_sizes=[
+ 128,
+ 192,
+ 512,
+ 1088
+ ]', 'depths': 'depths=[
+ 2,
+ 6,
+ 12,
+ 2
+ ]', 'groups_width': 'groups_width=64', 'layer_type': "layer_type='y'", 'hidden_act': "hidden_act='relu'"
+}, model_name='RegNetModel', library='transformers', import_path='transformers.models.regnet'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=250300', 'hidden_size': 'hidden_size=1152', 'num_hidden_layers': 'num_hidden_layers=32', 'num_attention_heads': 'num_attention_heads=18', 'input_embedding_size': 'input_embedding_size=256', 'output_embedding_size': 'output_embedding_size=1664', 'intermediate_size': 'intermediate_size=4608', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.0', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.0', 'classifier_dropout_prob': 'classifier_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=2', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'pad_token_id': 'pad_token_id=0', 'bos_token_id': 'bos_token_id=312', 'eos_token_id': 'eos_token_id=313'
+}, model_name='RemBertModel', library='transformers', import_path='transformers.models.rembert'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, list[tuple[str, float
+ ]
+ ], NoneType
+ ] = None', 'do_lower_case': 'do_lower_case: bool = False', 'keep_accents': 'keep_accents: bool = False', 'bos_token': "bos_token: str = '[CLS]'", 'eos_token': "eos_token: str = '[SEP]'", 'unk_token': "unk_token: str = ''", 'sep_token': "sep_token: str = '[SEP]'", 'pad_token': "pad_token: str = ''", 'cls_token': "cls_token: str = '[CLS]'", 'mask_token': "mask_token: str = '[MASK]'", 'add_prefix_space': 'add_prefix_space: bool = True', 'remove_space': 'remove_space: bool = True'
+}, model_name='RemBertTokenizer', library='transformers', import_path='transformers.models.rembert'), ModelAttributes(model=, model_type='model', model_parameters={'num_channels': 'num_channels=3', 'embedding_size': 'embedding_size=64', 'hidden_sizes': 'hidden_sizes=[
+ 256,
+ 512,
+ 1024,
+ 2048
+ ]', 'depths': 'depths=[
+ 3,
+ 4,
+ 6,
+ 3
+ ]', 'layer_type': "layer_type='bottleneck'", 'hidden_act': "hidden_act='relu'", 'downsample_in_first_stage': 'downsample_in_first_stage=False', 'downsample_in_bottleneck': 'downsample_in_bottleneck=False', 'out_features': 'out_features=None', 'out_indices': 'out_indices=None'
+}, model_name='ResNetModel', library='transformers', import_path='transformers.models.resnet'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=50265', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=2', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'pad_token_id': 'pad_token_id=1', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=2', 'classifier_dropout': 'classifier_dropout=None'
+}, model_name='RobertaModel', library='transformers', import_path='transformers.models.roberta'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = ''", 'sep_token': "sep_token: str = ''", 'cls_token': "cls_token: str = ''", 'unk_token': "unk_token: str = ''", 'pad_token': "pad_token: str = ''", 'mask_token': "mask_token: str = ''", 'add_prefix_space': 'add_prefix_space: bool = False', 'trim_offsets': 'trim_offsets: bool = True'
+}, model_name='RobertaTokenizer', library='transformers', import_path='transformers.models.roberta'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=50265', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=2', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'pad_token_id': 'pad_token_id=1', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=2', 'classifier_dropout': 'classifier_dropout=None'
+}, model_name='RobertaPreLayerNormModel', library='transformers', import_path='transformers.models.roberta_prelayernorm'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = ''", 'sep_token': "sep_token: str = ''", 'cls_token': "cls_token: str = ''", 'unk_token': "unk_token: str = ''", 'pad_token': "pad_token: str = ''", 'mask_token': "mask_token: str = ''", 'add_prefix_space': 'add_prefix_space: bool = False', 'trim_offsets': 'trim_offsets: bool = True'
+}, model_name='RobertaTokenizer', library='transformers', import_path='transformers.models.roberta'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=30522', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=2', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'pad_token_id': 'pad_token_id=0', 'classifier_dropout': 'classifier_dropout=None', 'enable_pronunciation': 'enable_pronunciation=True', 'enable_shape': 'enable_shape=True', 'pronunciation_embed_dim': 'pronunciation_embed_dim=768', 'pronunciation_vocab_size': 'pronunciation_vocab_size=910', 'shape_embed_dim': 'shape_embed_dim=512', 'shape_vocab_size': 'shape_vocab_size=24858', 'concat_input': 'concat_input=True'
+}, model_name='RoCBertModel', library='transformers', import_path='transformers.models.roc_bert'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab_file': 'vocab_file', 'word_shape_file': 'word_shape_file', 'word_pronunciation_file': 'word_pronunciation_file', 'do_lower_case': 'do_lower_case=True', 'do_basic_tokenize': 'do_basic_tokenize=True', 'never_split': 'never_split=None', 'unk_token': "unk_token='[UNK]'", 'sep_token': "sep_token='[SEP]'", 'pad_token': "pad_token='[PAD]'", 'cls_token': "cls_token='[CLS]'", 'mask_token': "mask_token='[MASK]'", 'tokenize_chinese_chars': 'tokenize_chinese_chars=True', 'strip_accents': 'strip_accents=None'
+}, model_name='RoCBertTokenizer', library='transformers', import_path='transformers.models.roc_bert'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=50000', 'embedding_size': 'embedding_size=None', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=1536', 'type_vocab_size': 'type_vocab_size=2', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'pad_token_id': 'pad_token_id=0', 'rotary_value': 'rotary_value=False'
+}, model_name='RoFormerModel', library='transformers', import_path='transformers.models.roformer'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Optional[dict[str, int
+ ]
+ ] = None', 'do_lower_case': 'do_lower_case=True', 'unk_token': "unk_token='[UNK]'", 'sep_token': "sep_token='[SEP]'", 'pad_token': "pad_token='[PAD]'", 'cls_token': "cls_token='[CLS]'", 'mask_token': "mask_token='[MASK]'", 'tokenize_chinese_chars': 'tokenize_chinese_chars=True', 'strip_accents': 'strip_accents=None'
+}, model_name='RoFormerTokenizer', library='transformers', import_path='transformers.models.roformer'), ModelAttributes(model=, model_type='model', model_parameters={'initializer_range': 'initializer_range=0.01', 'initializer_bias_prior_prob': 'initializer_bias_prior_prob=None', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'batch_norm_eps': 'batch_norm_eps=1e-05', 'backbone_config': 'backbone_config=None', 'backbone': 'backbone=None', 'use_pretrained_backbone': 'use_pretrained_backbone=False', 'use_timm_backbone': 'use_timm_backbone=False', 'freeze_backbone_batch_norms': 'freeze_backbone_batch_norms=True', 'backbone_kwargs': 'backbone_kwargs=None', 'encoder_hidden_dim': 'encoder_hidden_dim=256', 'encoder_in_channels': 'encoder_in_channels=[
+ 512,
+ 1024,
+ 2048
+ ]', 'feat_strides': 'feat_strides=[
+ 8,
+ 16,
+ 32
+ ]', 'encoder_layers': 'encoder_layers=1', 'encoder_ffn_dim': 'encoder_ffn_dim=1024', 'encoder_attention_heads': 'encoder_attention_heads=8', 'dropout': 'dropout=0.0', 'activation_dropout': 'activation_dropout=0.0', 'encode_proj_layers': 'encode_proj_layers=[
+ 2
+ ]', 'positional_encoding_temperature': 'positional_encoding_temperature=10000', 'encoder_activation_function': "encoder_activation_function='gelu'", 'activation_function': "activation_function='silu'", 'eval_size': 'eval_size=None', 'normalize_before': 'normalize_before=False', 'hidden_expansion': 'hidden_expansion=1.0', 'd_model': 'd_model=256', 'num_queries': 'num_queries=300', 'decoder_in_channels': 'decoder_in_channels=[
+ 256,
+ 256,
+ 256
+ ]', 'decoder_ffn_dim': 'decoder_ffn_dim=1024', 'num_feature_levels': 'num_feature_levels=3', 'decoder_n_points': 'decoder_n_points=4', 'decoder_layers': 'decoder_layers=6', 'decoder_attention_heads': 'decoder_attention_heads=8', 'decoder_activation_function': "decoder_activation_function='relu'", 'attention_dropout': 'attention_dropout=0.0', 'num_denoising': 'num_denoising=100', 'label_noise_ratio': 'label_noise_ratio=0.5', 'box_noise_scale': 'box_noise_scale=1.0', 'learn_initial_query': 'learn_initial_query=False', 'anchor_image_size': 'anchor_image_size=None', 'disable_custom_kernels': 'disable_custom_kernels=True', 'with_box_refine': 'with_box_refine=True', 'is_encoder_decoder': 'is_encoder_decoder=True', 'matcher_alpha': 'matcher_alpha=0.25', 'matcher_gamma': 'matcher_gamma=2.0', 'matcher_class_cost': 'matcher_class_cost=2.0', 'matcher_bbox_cost': 'matcher_bbox_cost=5.0', 'matcher_giou_cost': 'matcher_giou_cost=2.0', 'use_focal_loss': 'use_focal_loss=True', 'auxiliary_loss': 'auxiliary_loss=True', 'focal_loss_alpha': 'focal_loss_alpha=0.75', 'focal_loss_gamma': 'focal_loss_gamma=2.0', 'weight_loss_vfl': 'weight_loss_vfl=1.0', 'weight_loss_bbox': 'weight_loss_bbox=5.0', 'weight_loss_giou': 'weight_loss_giou=2.0', 'eos_coefficient': 'eos_coefficient=0.0001'
+}, model_name='RTDetrModel', library='transformers', import_path='transformers.models.rt_detr'), ModelAttributes(model=, model_type='model', model_parameters={'initializer_range': 'initializer_range=0.01', 'initializer_bias_prior_prob': 'initializer_bias_prior_prob=None', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'batch_norm_eps': 'batch_norm_eps=1e-05', 'backbone_config': 'backbone_config=None', 'backbone': 'backbone=None', 'use_pretrained_backbone': 'use_pretrained_backbone=False', 'use_timm_backbone': 'use_timm_backbone=False', 'freeze_backbone_batch_norms': 'freeze_backbone_batch_norms=True', 'backbone_kwargs': 'backbone_kwargs=None', 'encoder_hidden_dim': 'encoder_hidden_dim=256', 'encoder_in_channels': 'encoder_in_channels=[
+ 512,
+ 1024,
+ 2048
+ ]', 'feat_strides': 'feat_strides=[
+ 8,
+ 16,
+ 32
+ ]', 'encoder_layers': 'encoder_layers=1', 'encoder_ffn_dim': 'encoder_ffn_dim=1024', 'encoder_attention_heads': 'encoder_attention_heads=8', 'dropout': 'dropout=0.0', 'activation_dropout': 'activation_dropout=0.0', 'encode_proj_layers': 'encode_proj_layers=[
+ 2
+ ]', 'positional_encoding_temperature': 'positional_encoding_temperature=10000', 'encoder_activation_function': "encoder_activation_function='gelu'", 'activation_function': "activation_function='silu'", 'eval_size': 'eval_size=None', 'normalize_before': 'normalize_before=False', 'hidden_expansion': 'hidden_expansion=1.0', 'd_model': 'd_model=256', 'num_queries': 'num_queries=300', 'decoder_in_channels': 'decoder_in_channels=[
+ 256,
+ 256,
+ 256
+ ]', 'decoder_ffn_dim': 'decoder_ffn_dim=1024', 'num_feature_levels': 'num_feature_levels=3', 'decoder_n_points': 'decoder_n_points=4', 'decoder_layers': 'decoder_layers=6', 'decoder_attention_heads': 'decoder_attention_heads=8', 'decoder_activation_function': "decoder_activation_function='relu'", 'attention_dropout': 'attention_dropout=0.0', 'num_denoising': 'num_denoising=100', 'label_noise_ratio': 'label_noise_ratio=0.5', 'box_noise_scale': 'box_noise_scale=1.0', 'learn_initial_query': 'learn_initial_query=False', 'anchor_image_size': 'anchor_image_size=None', 'with_box_refine': 'with_box_refine=True', 'is_encoder_decoder': 'is_encoder_decoder=True', 'matcher_alpha': 'matcher_alpha=0.25', 'matcher_gamma': 'matcher_gamma=2.0', 'matcher_class_cost': 'matcher_class_cost=2.0', 'matcher_bbox_cost': 'matcher_bbox_cost=5.0', 'matcher_giou_cost': 'matcher_giou_cost=2.0', 'use_focal_loss': 'use_focal_loss=True', 'auxiliary_loss': 'auxiliary_loss=True', 'focal_loss_alpha': 'focal_loss_alpha=0.75', 'focal_loss_gamma': 'focal_loss_gamma=2.0', 'weight_loss_vfl': 'weight_loss_vfl=1.0', 'weight_loss_bbox': 'weight_loss_bbox=5.0', 'weight_loss_giou': 'weight_loss_giou=2.0', 'eos_coefficient': 'eos_coefficient=0.0001', 'decoder_n_levels': 'decoder_n_levels=3', 'decoder_offset_scale': 'decoder_offset_scale=0.5', 'decoder_method': "decoder_method='default'"
+}, model_name='RTDetrV2Model', library='transformers', import_path='transformers.models.rt_detr_v2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=50277', 'context_length': 'context_length=1024', 'hidden_size': 'hidden_size=4096', 'num_hidden_layers': 'num_hidden_layers=32', 'attention_hidden_size': 'attention_hidden_size=None', 'intermediate_size': 'intermediate_size=None', 'layer_norm_epsilon': 'layer_norm_epsilon=1e-05', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=0', 'rescale_every': 'rescale_every=6', 'tie_word_embeddings': 'tie_word_embeddings=False'
+}, model_name='RwkvModel', library='transformers', import_path='transformers.models.rwkv'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': "bos_token: str = '<|endoftext|>'", 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|padding|>'", 'add_prefix_space': 'add_prefix_space: bool = False', 'trim_offsets': 'trim_offsets: bool = True'
+}, model_name='GPTNeoXTokenizer', library='transformers', import_path='transformers.models.gpt_neox'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'prompt_encoder_config': 'prompt_encoder_config=None', 'mask_decoder_config': 'mask_decoder_config=None', 'initializer_range': 'initializer_range=0.02'
+}, model_name='SamModel', library='transformers', import_path='transformers.models.sam'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'prompt_encoder_config': 'prompt_encoder_config=None', 'mask_decoder_config': 'mask_decoder_config=None', 'initializer_range': 'initializer_range=0.02'
+}, model_name='Sam2Model', library='transformers', import_path='transformers.models.sam2'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=96', 'num_attention_heads': 'num_attention_heads=1', 'num_channels': 'num_channels=3', 'image_size': 'image_size=None', 'patch_kernel_size': 'patch_kernel_size=None', 'patch_stride': 'patch_stride=None', 'patch_padding': 'patch_padding=None', 'query_stride': 'query_stride=None', 'window_positional_embedding_background_size': 'window_positional_embedding_background_size=None', 'num_query_pool_stages': 'num_query_pool_stages=3', 'blocks_per_stage': 'blocks_per_stage=None', 'embed_dim_per_stage': 'embed_dim_per_stage=None', 'num_attention_heads_per_stage': 'num_attention_heads_per_stage=None', 'window_size_per_stage': 'window_size_per_stage=None', 'global_attention_blocks': 'global_attention_blocks=None', 'mlp_ratio': 'mlp_ratio=4.0', 'hidden_act': "hidden_act='gelu'", 'layer_norm_eps': 'layer_norm_eps=1e-06', 'initializer_range': 'initializer_range=0.02'
+}, model_name='Sam2HieraDetModel', library='transformers', import_path='transformers.models.sam2'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'prompt_encoder_config': 'prompt_encoder_config=None', 'mask_decoder_config': 'mask_decoder_config=None', 'initializer_range': 'initializer_range=0.02', 'num_maskmem': 'num_maskmem=7', 'image_size': 'image_size=1024', 'sigmoid_scale_for_mem_enc': 'sigmoid_scale_for_mem_enc=20.0', 'sigmoid_bias_for_mem_enc': 'sigmoid_bias_for_mem_enc=-10.0', 'enable_occlusion_spatial_embedding': 'enable_occlusion_spatial_embedding=True', 'multimask_output_in_sam': 'multimask_output_in_sam=True', 'multimask_min_pt_num': 'multimask_min_pt_num=0', 'multimask_max_pt_num': 'multimask_max_pt_num=1', 'multimask_output_for_tracking': 'multimask_output_for_tracking=True', 'max_object_pointers_in_encoder': 'max_object_pointers_in_encoder=16', 'max_cond_frame_num': 'max_cond_frame_num=-1', 'enable_temporal_pos_encoding_for_object_pointers': 'enable_temporal_pos_encoding_for_object_pointers=True', 'memory_attention_hidden_size': 'memory_attention_hidden_size=256', 'memory_attention_num_layers': 'memory_attention_num_layers=4', 'memory_attention_num_attention_heads': 'memory_attention_num_attention_heads=1', 'memory_attention_downsample_rate': 'memory_attention_downsample_rate=1', 'memory_attention_feed_forward_hidden_size': 'memory_attention_feed_forward_hidden_size=2048', 'memory_attention_feed_forward_hidden_act': "memory_attention_feed_forward_hidden_act='relu'", 'memory_attention_dropout': 'memory_attention_dropout=0.1', 'memory_attention_rope_theta': 'memory_attention_rope_theta=10000', 'memory_attention_rope_feat_sizes': 'memory_attention_rope_feat_sizes=None', 'memory_attention_rope_dropout': 'memory_attention_rope_dropout=0.1', 'memory_encoder_hidden_size': 'memory_encoder_hidden_size=256', 'memory_encoder_output_channels': 'memory_encoder_output_channels=64', 'mask_downsampler_embed_dim': 'mask_downsampler_embed_dim=256', 'mask_downsampler_kernel_size': 'mask_downsampler_kernel_size=3', 'mask_downsampler_stride': 'mask_downsampler_stride=2', 'mask_downsampler_padding': 'mask_downsampler_padding=1', 'mask_downsampler_total_stride': 'mask_downsampler_total_stride=16', 'mask_downsampler_hidden_act': "mask_downsampler_hidden_act='gelu'", 'memory_fuser_num_layers': 'memory_fuser_num_layers=2', 'memory_fuser_embed_dim': 'memory_fuser_embed_dim=256', 'memory_fuser_intermediate_dim': 'memory_fuser_intermediate_dim=1024', 'memory_fuser_kernel_size': 'memory_fuser_kernel_size=7', 'memory_fuser_padding': 'memory_fuser_padding=3', 'memory_fuser_layer_scale_init_value': 'memory_fuser_layer_scale_init_value=1e-06', 'memory_fuser_hidden_act': "memory_fuser_hidden_act='gelu'"
+}, model_name='Sam2VideoModel', library='transformers', import_path='transformers.models.sam2_video'), ModelAttributes(model=, model_type='model', model_parameters={'backbone_config': 'backbone_config=None', 'backbone_channel_list': 'backbone_channel_list=None', 'backbone_feature_sizes': 'backbone_feature_sizes=None', 'fpn_hidden_size': 'fpn_hidden_size=256', 'fpn_kernel_size': 'fpn_kernel_size=1', 'fpn_stride': 'fpn_stride=1', 'fpn_padding': 'fpn_padding=0', 'fpn_top_down_levels': 'fpn_top_down_levels=None', 'num_feature_levels': 'num_feature_levels=3', 'hidden_act': "hidden_act='gelu'", 'layer_norm_eps': 'layer_norm_eps=1e-06', 'initializer_range': 'initializer_range=0.02'
+}, model_name='Sam2VisionModel', library='transformers', import_path='transformers.models.sam2'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'text_config': 'text_config=None', 'geometry_encoder_config': 'geometry_encoder_config=None', 'detr_encoder_config': 'detr_encoder_config=None', 'detr_decoder_config': 'detr_decoder_config=None', 'mask_decoder_config': 'mask_decoder_config=None', 'initializer_range': 'initializer_range=0.02'
+}, model_name='Sam3Model', library='transformers', import_path='transformers.models.sam3'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'prompt_encoder_config': 'prompt_encoder_config=None', 'mask_decoder_config': 'mask_decoder_config=None', 'initializer_range': 'initializer_range=0.02'
+}, model_name='Sam3TrackerModel', library='transformers', import_path='transformers.models.sam3_tracker'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'prompt_encoder_config': 'prompt_encoder_config=None', 'mask_decoder_config': 'mask_decoder_config=None', 'initializer_range': 'initializer_range=0.02', 'num_maskmem': 'num_maskmem=7', 'image_size': 'image_size=1008', 'sigmoid_scale_for_mem_enc': 'sigmoid_scale_for_mem_enc=20.0', 'sigmoid_bias_for_mem_enc': 'sigmoid_bias_for_mem_enc=-10.0', 'enable_occlusion_spatial_embedding': 'enable_occlusion_spatial_embedding=True', 'multimask_output_in_sam': 'multimask_output_in_sam=True', 'multimask_min_pt_num': 'multimask_min_pt_num=0', 'multimask_max_pt_num': 'multimask_max_pt_num=1', 'multimask_output_for_tracking': 'multimask_output_for_tracking=True', 'max_object_pointers_in_encoder': 'max_object_pointers_in_encoder=16', 'max_cond_frame_num': 'max_cond_frame_num=4', 'enable_temporal_pos_encoding_for_object_pointers': 'enable_temporal_pos_encoding_for_object_pointers=True', 'memory_attention_hidden_size': 'memory_attention_hidden_size=256', 'memory_attention_num_layers': 'memory_attention_num_layers=4', 'memory_attention_num_attention_heads': 'memory_attention_num_attention_heads=1', 'memory_attention_downsample_rate': 'memory_attention_downsample_rate=1', 'memory_attention_feed_forward_hidden_size': 'memory_attention_feed_forward_hidden_size=2048', 'memory_attention_feed_forward_hidden_act': "memory_attention_feed_forward_hidden_act='relu'", 'memory_attention_dropout': 'memory_attention_dropout=0.1', 'memory_attention_rope_theta': 'memory_attention_rope_theta=10000', 'memory_attention_rope_feat_sizes': 'memory_attention_rope_feat_sizes=None', 'memory_attention_rope_dropout': 'memory_attention_rope_dropout=0.1', 'memory_encoder_hidden_size': 'memory_encoder_hidden_size=256', 'memory_encoder_output_channels': 'memory_encoder_output_channels=64', 'mask_downsampler_embed_dim': 'mask_downsampler_embed_dim=256', 'mask_downsampler_kernel_size': 'mask_downsampler_kernel_size=3', 'mask_downsampler_stride': 'mask_downsampler_stride=2', 'mask_downsampler_padding': 'mask_downsampler_padding=1', 'mask_downsampler_total_stride': 'mask_downsampler_total_stride=16', 'mask_downsampler_hidden_act': "mask_downsampler_hidden_act='gelu'", 'memory_fuser_num_layers': 'memory_fuser_num_layers=2', 'memory_fuser_embed_dim': 'memory_fuser_embed_dim=256', 'memory_fuser_intermediate_dim': 'memory_fuser_intermediate_dim=1024', 'memory_fuser_kernel_size': 'memory_fuser_kernel_size=7', 'memory_fuser_padding': 'memory_fuser_padding=3', 'memory_fuser_layer_scale_init_value': 'memory_fuser_layer_scale_init_value=1e-06', 'memory_fuser_hidden_act': "memory_fuser_hidden_act='gelu'"
+}, model_name='Sam3TrackerVideoModel', library='transformers', import_path='transformers.models.sam3_tracker_video'), ModelAttributes(model=, model_type='model', model_parameters={'detector_config': 'detector_config=None', 'tracker_config': 'tracker_config=None', 'initializer_range': 'initializer_range=0.02', 'low_res_mask_size': 'low_res_mask_size=288', 'score_threshold_detection': 'score_threshold_detection=0.5', 'det_nms_thresh': 'det_nms_thresh=0.1', 'assoc_iou_thresh': 'assoc_iou_thresh=0.1', 'trk_assoc_iou_thresh': 'trk_assoc_iou_thresh=0.5', 'new_det_thresh': 'new_det_thresh=0.7', 'recondition_on_trk_masks': 'recondition_on_trk_masks=True', 'hotstart_delay': 'hotstart_delay=15', 'hotstart_unmatch_thresh': 'hotstart_unmatch_thresh=8', 'hotstart_dup_thresh': 'hotstart_dup_thresh=8', 'suppress_unmatched_only_within_hotstart': 'suppress_unmatched_only_within_hotstart=True', 'init_trk_keep_alive': 'init_trk_keep_alive=30', 'max_trk_keep_alive': 'max_trk_keep_alive=30', 'min_trk_keep_alive': 'min_trk_keep_alive=-1', 'suppress_overlapping_based_on_recent_occlusion_threshold': 'suppress_overlapping_based_on_recent_occlusion_threshold=0.7', 'decrease_trk_keep_alive_for_empty_masklets': 'decrease_trk_keep_alive_for_empty_masklets=False', 'fill_hole_area': 'fill_hole_area=16', 'max_num_objects': 'max_num_objects=10000', 'recondition_every_nth_frame': 'recondition_every_nth_frame=16', 'high_conf_thresh': 'high_conf_thresh=0.8', 'high_iou_thresh': 'high_iou_thresh=0.8'
+}, model_name='Sam3VideoModel', library='transformers', import_path='transformers.models.sam3_video'), ModelAttributes(model=, model_type='model', model_parameters={'backbone_config': 'backbone_config=None', 'fpn_hidden_size': 'fpn_hidden_size=256', 'backbone_feature_sizes': 'backbone_feature_sizes=None', 'scale_factors': 'scale_factors=None', 'hidden_act': "hidden_act='gelu'", 'layer_norm_eps': 'layer_norm_eps=1e-06', 'initializer_range': 'initializer_range=0.02'
+}, model_name='Sam3VisionModel', library='transformers', import_path='transformers.models.sam3'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=1024', 'intermediate_size': 'intermediate_size=4736', 'num_hidden_layers': 'num_hidden_layers=32', 'num_attention_heads': 'num_attention_heads=16', 'num_channels': 'num_channels=3', 'image_size': 'image_size=1008', 'patch_size': 'patch_size=14', 'hidden_act': "hidden_act='gelu'", 'layer_norm_eps': 'layer_norm_eps=1e-06', 'attention_dropout': 'attention_dropout=0.0', 'rope_theta': 'rope_theta=10000.0', 'window_size': 'window_size=24', 'global_attn_indexes': 'global_attn_indexes=None', 'layer_scale_init_value': 'layer_scale_init_value=None', 'pretrain_image_size': 'pretrain_image_size=336', 'hidden_dropout': 'hidden_dropout=0.0', 'initializer_range': 'initializer_range=0.02'
+}, model_name='Sam3ViTModel', library='transformers', import_path='transformers.models.sam3'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'prompt_encoder_config': 'prompt_encoder_config=None', 'mask_decoder_config': 'mask_decoder_config=None', 'initializer_range': 'initializer_range=0.02'
+}, model_name='SamHQModel', library='transformers', import_path='transformers.models.sam_hq'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=768', 'output_channels': 'output_channels=256', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'num_channels': 'num_channels=3', 'image_size': 'image_size=1024', 'patch_size': 'patch_size=16', 'hidden_act': "hidden_act='gelu'", 'layer_norm_eps': 'layer_norm_eps=1e-06', 'attention_dropout': 'attention_dropout=0.0', 'initializer_range': 'initializer_range=1e-10', 'qkv_bias': 'qkv_bias=True', 'mlp_ratio': 'mlp_ratio=4.0', 'use_abs_pos': 'use_abs_pos=True', 'use_rel_pos': 'use_rel_pos=True', 'window_size': 'window_size=14', 'global_attn_indexes': 'global_attn_indexes=[
+ 2,
+ 5,
+ 8,
+ 11
+ ]', 'num_pos_feats': 'num_pos_feats=128', 'mlp_dim': 'mlp_dim=None'
+}, model_name='SamHQVisionModel', library='transformers', import_path='transformers.models.sam_hq'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=768', 'output_channels': 'output_channels=256', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'num_channels': 'num_channels=3', 'image_size': 'image_size=1024', 'patch_size': 'patch_size=16', 'hidden_act': "hidden_act='gelu'", 'layer_norm_eps': 'layer_norm_eps=1e-06', 'attention_dropout': 'attention_dropout=0.0', 'initializer_range': 'initializer_range=1e-10', 'qkv_bias': 'qkv_bias=True', 'mlp_ratio': 'mlp_ratio=4.0', 'use_abs_pos': 'use_abs_pos=True', 'use_rel_pos': 'use_rel_pos=True', 'window_size': 'window_size=14', 'global_attn_indexes': 'global_attn_indexes=[
+ 2,
+ 5,
+ 8,
+ 11
+ ]', 'num_pos_feats': 'num_pos_feats=128', 'mlp_dim': 'mlp_dim=None'
+}, model_name='SamVisionModel', library='transformers', import_path='transformers.models.sam'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=256102', 't2u_vocab_size': 't2u_vocab_size=10082', 'hidden_size': 'hidden_size=1024', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'max_position_embeddings': 'max_position_embeddings=1024', 'is_encoder_decoder': 'is_encoder_decoder=True', 'encoder_layerdrop': 'encoder_layerdrop=0.05', 'decoder_layerdrop': 'decoder_layerdrop=0.05', 'activation_function': "activation_function='relu'", 'dropout': 'dropout=0.1', 'attention_dropout': 'attention_dropout=0.1', 'activation_dropout': 'activation_dropout=0.0', 'scale_embedding': 'scale_embedding=True', 'encoder_layers': 'encoder_layers=24', 'encoder_ffn_dim': 'encoder_ffn_dim=8192', 'encoder_attention_heads': 'encoder_attention_heads=16', 'decoder_layers': 'decoder_layers=24', 'decoder_ffn_dim': 'decoder_ffn_dim=8192', 'decoder_attention_heads': 'decoder_attention_heads=16', 'decoder_start_token_id': 'decoder_start_token_id=3', 'max_new_tokens': 'max_new_tokens=256', 'pad_token_id': 'pad_token_id=0', 'bos_token_id': 'bos_token_id=2', 'eos_token_id': 'eos_token_id=3', 'speech_encoder_layers': 'speech_encoder_layers=24', 'speech_encoder_attention_heads': 'speech_encoder_attention_heads=16', 'speech_encoder_intermediate_size': 'speech_encoder_intermediate_size=4096', 'speech_encoder_hidden_act': "speech_encoder_hidden_act='swish'", 'speech_encoder_dropout': 'speech_encoder_dropout=0.0', 'add_adapter': 'add_adapter=True', 'speech_encoder_layerdrop': 'speech_encoder_layerdrop=0.1', 'feature_projection_input_dim': 'feature_projection_input_dim=160', 'num_conv_pos_embeddings': 'num_conv_pos_embeddings=128', 'num_conv_pos_embedding_groups': 'num_conv_pos_embedding_groups=16', 'adaptor_kernel_size': 'adaptor_kernel_size=8', 'adaptor_stride': 'adaptor_stride=8', 'adaptor_dropout': 'adaptor_dropout=0.1', 'num_adapter_layers': 'num_adapter_layers=1', 'position_embeddings_type': "position_embeddings_type='relative'", 'rotary_embedding_base': 'rotary_embedding_base=10000', 'max_source_positions': 'max_source_positions=4096', 'conv_depthwise_kernel_size': 'conv_depthwise_kernel_size=31', 't2u_bos_token_id': 't2u_bos_token_id=0', 't2u_pad_token_id': 't2u_pad_token_id=1', 't2u_eos_token_id': 't2u_eos_token_id=2', 't2u_decoder_start_token_id': 't2u_decoder_start_token_id=2', 't2u_max_new_tokens': 't2u_max_new_tokens=1024', 't2u_encoder_layers': 't2u_encoder_layers=6', 't2u_encoder_ffn_dim': 't2u_encoder_ffn_dim=8192', 't2u_encoder_attention_heads': 't2u_encoder_attention_heads=16', 't2u_decoder_layers': 't2u_decoder_layers=6', 't2u_decoder_ffn_dim': 't2u_decoder_ffn_dim=8192', 't2u_decoder_attention_heads': 't2u_decoder_attention_heads=16', 't2u_max_position_embeddings': 't2u_max_position_embeddings=2048', 'sampling_rate': 'sampling_rate=16000', 'upsample_initial_channel': 'upsample_initial_channel=512', 'upsample_rates': 'upsample_rates=[
+ 5,
+ 4,
+ 4,
+ 2,
+ 2
+ ]', 'upsample_kernel_sizes': 'upsample_kernel_sizes=[
+ 11,
+ 8,
+ 8,
+ 4,
+ 4
+ ]', 'resblock_kernel_sizes': 'resblock_kernel_sizes=[
+ 3,
+ 7,
+ 11
+ ]', 'resblock_dilation_sizes': 'resblock_dilation_sizes=[
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ]
+ ]', 'leaky_relu_slope': 'leaky_relu_slope=0.1', 'unit_hifi_gan_vocab_size': 'unit_hifi_gan_vocab_size=10000', 'unit_embed_dim': 'unit_embed_dim=1280', 'lang_embed_dim': 'lang_embed_dim=256', 'spkr_embed_dim': 'spkr_embed_dim=256', 'vocoder_num_langs': 'vocoder_num_langs=36', 'vocoder_num_spkrs': 'vocoder_num_spkrs=200', 'variance_predictor_kernel_size': 'variance_predictor_kernel_size=3', 'var_pred_dropout': 'var_pred_dropout=0.5', 'vocoder_offset': 'vocoder_offset=4'
+}, model_name='SeamlessM4TModel', library='transformers', import_path='transformers.models.seamless_m4t'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'sep_token': "sep_token=''", 'cls_token': "cls_token=''", 'unk_token': "unk_token=''", 'pad_token': "pad_token=''", 'src_lang': "src_lang='eng'", 'tgt_lang': "tgt_lang='fra'", 'additional_special_tokens': 'additional_special_tokens=None', 'keep_accents': 'keep_accents=None', 'vocab_file': 'vocab_file=None'
+}, model_name='SeamlessM4TTokenizer', library='transformers', import_path='transformers.models.seamless_m4t'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=256102', 't2u_vocab_size': 't2u_vocab_size=10082', 'char_vocab_size': 'char_vocab_size=10943', 'hidden_size': 'hidden_size=1024', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'max_position_embeddings': 'max_position_embeddings=4096', 'is_encoder_decoder': 'is_encoder_decoder=True', 'encoder_layerdrop': 'encoder_layerdrop=0.05', 'decoder_layerdrop': 'decoder_layerdrop=0.05', 'activation_function': "activation_function='relu'", 'dropout': 'dropout=0.1', 'attention_dropout': 'attention_dropout=0.1', 'activation_dropout': 'activation_dropout=0.0', 'scale_embedding': 'scale_embedding=True', 'encoder_layers': 'encoder_layers=24', 'encoder_ffn_dim': 'encoder_ffn_dim=8192', 'encoder_attention_heads': 'encoder_attention_heads=16', 'decoder_layers': 'decoder_layers=24', 'decoder_ffn_dim': 'decoder_ffn_dim=8192', 'decoder_attention_heads': 'decoder_attention_heads=16', 'decoder_start_token_id': 'decoder_start_token_id=3', 'max_new_tokens': 'max_new_tokens=256', 'pad_token_id': 'pad_token_id=0', 'bos_token_id': 'bos_token_id=2', 'eos_token_id': 'eos_token_id=3', 'speech_encoder_layers': 'speech_encoder_layers=24', 'speech_encoder_attention_heads': 'speech_encoder_attention_heads=16', 'speech_encoder_intermediate_size': 'speech_encoder_intermediate_size=4096', 'speech_encoder_hidden_act': "speech_encoder_hidden_act='swish'", 'speech_encoder_dropout': 'speech_encoder_dropout=0.0', 'add_adapter': 'add_adapter=True', 'speech_encoder_layerdrop': 'speech_encoder_layerdrop=0.1', 'feature_projection_input_dim': 'feature_projection_input_dim=160', 'adaptor_kernel_size': 'adaptor_kernel_size=8', 'adaptor_stride': 'adaptor_stride=8', 'adaptor_dropout': 'adaptor_dropout=0.1', 'num_adapter_layers': 'num_adapter_layers=1', 'position_embeddings_type': "position_embeddings_type='relative_key'", 'conv_depthwise_kernel_size': 'conv_depthwise_kernel_size=31', 'left_max_position_embeddings': 'left_max_position_embeddings=64', 'right_max_position_embeddings': 'right_max_position_embeddings=8', 'speech_encoder_chunk_size': 'speech_encoder_chunk_size=20000', 'speech_encoder_left_chunk_num': 'speech_encoder_left_chunk_num=128', 't2u_bos_token_id': 't2u_bos_token_id=0', 't2u_pad_token_id': 't2u_pad_token_id=1', 't2u_eos_token_id': 't2u_eos_token_id=2', 't2u_encoder_layers': 't2u_encoder_layers=6', 't2u_encoder_ffn_dim': 't2u_encoder_ffn_dim=8192', 't2u_encoder_attention_heads': 't2u_encoder_attention_heads=16', 't2u_decoder_layers': 't2u_decoder_layers=6', 't2u_decoder_ffn_dim': 't2u_decoder_ffn_dim=8192', 't2u_decoder_attention_heads': 't2u_decoder_attention_heads=16', 't2u_max_position_embeddings': 't2u_max_position_embeddings=4096', 't2u_variance_predictor_embed_dim': 't2u_variance_predictor_embed_dim=1024', 't2u_variance_predictor_hidden_dim': 't2u_variance_predictor_hidden_dim=256', 't2u_variance_predictor_kernel_size': 't2u_variance_predictor_kernel_size=3', 't2u_variance_pred_dropout': 't2u_variance_pred_dropout=0.5', 'sampling_rate': 'sampling_rate=16000', 'upsample_initial_channel': 'upsample_initial_channel=512', 'upsample_rates': 'upsample_rates=[
+ 5,
+ 4,
+ 4,
+ 2,
+ 2
+ ]', 'upsample_kernel_sizes': 'upsample_kernel_sizes=[
+ 11,
+ 8,
+ 8,
+ 4,
+ 4
+ ]', 'resblock_kernel_sizes': 'resblock_kernel_sizes=[
+ 3,
+ 7,
+ 11
+ ]', 'resblock_dilation_sizes': 'resblock_dilation_sizes=[
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ]
+ ]', 'leaky_relu_slope': 'leaky_relu_slope=0.1', 'unit_hifi_gan_vocab_size': 'unit_hifi_gan_vocab_size=10000', 'unit_embed_dim': 'unit_embed_dim=1280', 'lang_embed_dim': 'lang_embed_dim=256', 'spkr_embed_dim': 'spkr_embed_dim=256', 'vocoder_num_langs': 'vocoder_num_langs=36', 'vocoder_num_spkrs': 'vocoder_num_spkrs=200', 'variance_predictor_kernel_size': 'variance_predictor_kernel_size=3', 'var_pred_dropout': 'var_pred_dropout=0.5', 'vocoder_offset': 'vocoder_offset=4'
+}, model_name='SeamlessM4Tv2Model', library='transformers', import_path='transformers.models.seamless_m4t_v2'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'sep_token': "sep_token=''", 'cls_token': "cls_token=''", 'unk_token': "unk_token=''", 'pad_token': "pad_token=''", 'src_lang': "src_lang='eng'", 'tgt_lang': "tgt_lang='fra'", 'additional_special_tokens': 'additional_special_tokens=None', 'keep_accents': 'keep_accents=None', 'vocab_file': 'vocab_file=None'
+}, model_name='SeamlessM4TTokenizer', library='transformers', import_path='transformers.models.seamless_m4t'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 155136', 'hidden_size': 'hidden_size: Optional[int
+ ] = 4096', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 27648', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 64', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 80', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 8', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 524288', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[float
+ ] = 1e-06', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 1', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 0', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 2', 'pretraining_tp': 'pretraining_tp: Optional[int
+ ] = 1', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = True', 'attention_out_bias': 'attention_out_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.1', 'residual_dropout': 'residual_dropout: Optional[float
+ ] = 0.1', 'mlp_bias': 'mlp_bias: Optional[bool
+ ] = False', 'head_dim': 'head_dim: Optional[int
+ ] = 128'
+}, model_name='SeedOssModel', library='transformers', import_path='transformers.models.seed_oss'), ModelAttributes(model=, model_type='model', model_parameters={'num_channels': 'num_channels=3', 'num_encoder_blocks': 'num_encoder_blocks=4', 'depths': 'depths=[
+ 2,
+ 2,
+ 2,
+ 2
+ ]', 'sr_ratios': 'sr_ratios=[
+ 8,
+ 4,
+ 2,
+ 1
+ ]', 'hidden_sizes': 'hidden_sizes=[
+ 32,
+ 64,
+ 160,
+ 256
+ ]', 'patch_sizes': 'patch_sizes=[
+ 7,
+ 3,
+ 3,
+ 3
+ ]', 'strides': 'strides=[
+ 4,
+ 2,
+ 2,
+ 2
+ ]', 'num_attention_heads': 'num_attention_heads=[
+ 1,
+ 2,
+ 5,
+ 8
+ ]', 'mlp_ratios': 'mlp_ratios=[
+ 4,
+ 4,
+ 4,
+ 4
+ ]', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.0', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.0', 'classifier_dropout_prob': 'classifier_dropout_prob=0.1', 'initializer_range': 'initializer_range=0.02', 'drop_path_rate': 'drop_path_rate=0.1', 'layer_norm_eps': 'layer_norm_eps=1e-06', 'decoder_hidden_size': 'decoder_hidden_size=256', 'semantic_loss_ignore_index': 'semantic_loss_ignore_index=255'
+}, model_name='SegformerModel', library='transformers', import_path='transformers.models.segformer'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=1024', 'num_hidden_layers': 'num_hidden_layers=24', 'num_attention_heads': 'num_attention_heads=16', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.0', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-06', 'image_size': 'image_size=[
+ 896,
+ 448
+ ]', 'patch_size': 'patch_size=16', 'num_channels': 'num_channels=3', 'qkv_bias': 'qkv_bias=True', 'mlp_dim': 'mlp_dim=None', 'drop_path_rate': 'drop_path_rate=0.1', 'pretrain_image_size': 'pretrain_image_size=224', 'decoder_hidden_size': 'decoder_hidden_size=64', 'use_relative_position_embeddings': 'use_relative_position_embeddings=True', 'merge_index': 'merge_index=2', 'intermediate_hidden_state_indices': 'intermediate_hidden_state_indices=[
+ 5,
+ 11,
+ 17,
+ 23
+ ]', 'beta': 'beta=0.01'
+}, model_name='SegGptModel', library='transformers', import_path='transformers.models.seggpt'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=32', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'squeeze_factor': 'squeeze_factor=2', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout': 'hidden_dropout=0.1', 'activation_dropout': 'activation_dropout=0.1', 'attention_dropout': 'attention_dropout=0.1', 'feat_proj_dropout': 'feat_proj_dropout=0.0', 'final_dropout': 'final_dropout=0.1', 'layerdrop': 'layerdrop=0.1', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'feat_extract_norm': "feat_extract_norm='group'", 'feat_extract_activation': "feat_extract_activation='gelu'", 'conv_dim': 'conv_dim=(64,
+ 128,
+ 128,
+ 128,
+ 128,
+ 256,
+ 256,
+ 256,
+ 256,
+ 512,
+ 512,
+ 512,
+ 512)', 'conv_stride': 'conv_stride=(5,
+ 2,
+ 1,
+ 2,
+ 1,
+ 2,
+ 1,
+ 2,
+ 1,
+ 2,
+ 1,
+ 2,
+ 1)', 'conv_kernel': 'conv_kernel=(10,
+ 3,
+ 1,
+ 3,
+ 1,
+ 3,
+ 1,
+ 3,
+ 1,
+ 2,
+ 1,
+ 2,
+ 1)', 'conv_bias': 'conv_bias=False', 'num_conv_pos_embeddings': 'num_conv_pos_embeddings=128', 'num_conv_pos_embedding_groups': 'num_conv_pos_embedding_groups=16', 'apply_spec_augment': 'apply_spec_augment=True', 'mask_time_prob': 'mask_time_prob=0.05', 'mask_time_length': 'mask_time_length=10', 'mask_time_min_masks': 'mask_time_min_masks=2', 'mask_feature_prob': 'mask_feature_prob=0.0', 'mask_feature_length': 'mask_feature_length=10', 'mask_feature_min_masks': 'mask_feature_min_masks=0', 'ctc_loss_reduction': "ctc_loss_reduction='mean'", 'ctc_zero_infinity': 'ctc_zero_infinity=False', 'use_weighted_layer_sum': 'use_weighted_layer_sum=False', 'classifier_proj_size': 'classifier_proj_size=256', 'pad_token_id': 'pad_token_id=0', 'bos_token_id': 'bos_token_id=1', 'eos_token_id': 'eos_token_id=2'
+}, model_name='SEWModel', library='transformers', import_path='transformers.models.sew'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=32', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'squeeze_factor': 'squeeze_factor=2', 'max_position_embeddings': 'max_position_embeddings=512', 'position_buckets': 'position_buckets=256', 'share_att_key': 'share_att_key=True', 'relative_attention': 'relative_attention=True', 'pos_att_type': "pos_att_type=('p2c', 'c2p')", 'norm_rel_ebd': "norm_rel_ebd='layer_norm'", 'hidden_act': "hidden_act='gelu_python'", 'hidden_dropout': 'hidden_dropout=0.1', 'activation_dropout': 'activation_dropout=0.1', 'attention_dropout': 'attention_dropout=0.1', 'feat_proj_dropout': 'feat_proj_dropout=0.0', 'final_dropout': 'final_dropout=0.1', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-07', 'feature_layer_norm_eps': 'feature_layer_norm_eps=1e-05', 'feat_extract_norm': "feat_extract_norm='group'", 'feat_extract_activation': "feat_extract_activation='gelu'", 'conv_dim': 'conv_dim=(64,
+ 128,
+ 128,
+ 128,
+ 128,
+ 256,
+ 256,
+ 256,
+ 256,
+ 512,
+ 512,
+ 512,
+ 512)', 'conv_stride': 'conv_stride=(5,
+ 2,
+ 1,
+ 2,
+ 1,
+ 2,
+ 1,
+ 2,
+ 1,
+ 2,
+ 1,
+ 2,
+ 1)', 'conv_kernel': 'conv_kernel=(10,
+ 3,
+ 1,
+ 3,
+ 1,
+ 3,
+ 1,
+ 3,
+ 1,
+ 2,
+ 1,
+ 2,
+ 1)', 'conv_bias': 'conv_bias=False', 'num_conv_pos_embeddings': 'num_conv_pos_embeddings=128', 'num_conv_pos_embedding_groups': 'num_conv_pos_embedding_groups=16', 'apply_spec_augment': 'apply_spec_augment=True', 'mask_time_prob': 'mask_time_prob=0.05', 'mask_time_length': 'mask_time_length=10', 'mask_time_min_masks': 'mask_time_min_masks=2', 'mask_feature_prob': 'mask_feature_prob=0.0', 'mask_feature_length': 'mask_feature_length=10', 'mask_feature_min_masks': 'mask_feature_min_masks=0', 'ctc_loss_reduction': "ctc_loss_reduction='mean'", 'ctc_zero_infinity': 'ctc_zero_infinity=False', 'use_weighted_layer_sum': 'use_weighted_layer_sum=False', 'classifier_proj_size': 'classifier_proj_size=256', 'pad_token_id': 'pad_token_id=0', 'bos_token_id': 'bos_token_id=1', 'eos_token_id': 'eos_token_id=2'
+}, model_name='SEWDModel', library='transformers', import_path='transformers.models.sew_d'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'vision_config': 'vision_config=None'
+}, model_name='SiglipModel', library='transformers', import_path='transformers.models.siglip'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab_file': 'vocab_file', 'eos_token': "eos_token=''", 'unk_token': "unk_token=''", 'pad_token': "pad_token=''", 'additional_special_tokens': 'additional_special_tokens=None', 'sp_model_kwargs': 'sp_model_kwargs: Optional[dict[str, Any
+ ]
+ ] = None', 'model_max_length': 'model_max_length=64', 'do_lower_case': 'do_lower_case=True'
+}, model_name='SiglipTokenizer', library='transformers', import_path='transformers.models.siglip'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'vision_config': 'vision_config=None'
+}, model_name='Siglip2Model', library='transformers', import_path='transformers.models.siglip2'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'unk_token': "unk_token: str = ''", 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = ''", 'pad_token': "pad_token: str = ''", 'mask_token': "mask_token: str = ''"
+}, model_name='GemmaTokenizer', library='transformers', import_path='transformers.models.gemma'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=768', 'intermediate_size': 'intermediate_size=3072', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'num_channels': 'num_channels=3', 'num_patches': 'num_patches=256', 'patch_size': 'patch_size=16', 'hidden_act': "hidden_act='gelu_pytorch_tanh'", 'layer_norm_eps': 'layer_norm_eps=1e-06', 'attention_dropout': 'attention_dropout=0.0'
+}, model_name='Siglip2VisionModel', library='transformers', import_path='transformers.models.siglip2'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=768', 'intermediate_size': 'intermediate_size=3072', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'num_channels': 'num_channels=3', 'image_size': 'image_size=224', 'patch_size': 'patch_size=16', 'hidden_act': "hidden_act='gelu_pytorch_tanh'", 'layer_norm_eps': 'layer_norm_eps=1e-06', 'attention_dropout': 'attention_dropout=0.0'
+}, model_name='SiglipVisionModel', library='transformers', import_path='transformers.models.siglip'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 128256', 'hidden_size': 'hidden_size: Optional[int
+ ] = 2048', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 11008', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 36', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 16', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 4', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 32768', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-06', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 128004', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 128000', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 128001', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'use_sliding_window': 'use_sliding_window: Optional[bool
+ ] = False', 'sliding_window': 'sliding_window: Optional[int
+ ] = None', 'no_rope_layers': 'no_rope_layers: Optional[int
+ ] = None', 'no_rope_layer_interval': 'no_rope_layer_interval: Optional[int
+ ] = 4', 'layer_types': 'layer_types: Optional[int
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'mlp_bias': 'mlp_bias: Optional[bool
+ ] = False'
+}, model_name='SmolLM3Model', library='transformers', import_path='transformers.models.smollm3'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'args': '*args'
+}, model_name='TokenizersBackend', library='transformers', import_path='transformers'), ModelAttributes(model=, model_type='model', model_parameters={'image_token_id': 'image_token_id=128257', 'tie_word_embeddings': 'tie_word_embeddings=False', 'vision_config': 'vision_config=None', 'text_config': 'text_config=None', 'scale_factor': 'scale_factor=2', 'pad_token_id': 'pad_token_id=128002'
+}, model_name='SmolVLMModel', library='transformers', import_path='transformers.models.smolvlm'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=1152', 'intermediate_size': 'intermediate_size=3072', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=16', 'num_channels': 'num_channels=3', 'image_size': 'image_size=224', 'patch_size': 'patch_size=32', 'hidden_act': "hidden_act='gelu_pytorch_tanh'", 'layer_norm_eps': 'layer_norm_eps=1e-06', 'attention_dropout': 'attention_dropout=0.0', 'initializer_range': 'initializer_range=0.02'
+}, model_name='SmolVLMVisionTransformer', library='transformers', import_path='transformers.models.smolvlm'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=10000', 'encoder_layers': 'encoder_layers=12', 'encoder_ffn_dim': 'encoder_ffn_dim=2048', 'encoder_attention_heads': 'encoder_attention_heads=4', 'decoder_layers': 'decoder_layers=6', 'decoder_ffn_dim': 'decoder_ffn_dim=2048', 'decoder_attention_heads': 'decoder_attention_heads=4', 'encoder_layerdrop': 'encoder_layerdrop=0.0', 'decoder_layerdrop': 'decoder_layerdrop=0.0', 'is_encoder_decoder': 'is_encoder_decoder=True', 'activation_function': "activation_function='relu'", 'd_model': 'd_model=256', 'dropout': 'dropout=0.1', 'attention_dropout': 'attention_dropout=0.0', 'activation_dropout': 'activation_dropout=0.0', 'init_std': 'init_std=0.02', 'decoder_start_token_id': 'decoder_start_token_id=2', 'scale_embedding': 'scale_embedding=True', 'pad_token_id': 'pad_token_id=1', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=2', 'max_source_positions': 'max_source_positions=6000', 'max_target_positions': 'max_target_positions=1024', 'num_conv_layers': 'num_conv_layers=2', 'conv_kernel_sizes': 'conv_kernel_sizes=(5,
+ 5)', 'conv_channels': 'conv_channels=1024', 'input_feat_per_channel': 'input_feat_per_channel=80', 'input_channels': 'input_channels=1'
+}, model_name='Speech2TextModel', library='transformers', import_path='transformers.models.speech_to_text'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab_file': 'vocab_file', 'spm_file': 'spm_file', 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'pad_token': "pad_token=''", 'unk_token': "unk_token=''", 'do_upper_case': 'do_upper_case=False', 'do_lower_case': 'do_lower_case=False', 'tgt_lang': 'tgt_lang=None', 'lang_codes': 'lang_codes=None', 'additional_special_tokens': 'additional_special_tokens=None', 'sp_model_kwargs': 'sp_model_kwargs: Optional[dict[str, Any
+ ]
+ ] = None'
+}, model_name='Speech2TextTokenizer', library='transformers', import_path='transformers.models.speech_to_text'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=81', 'hidden_size': 'hidden_size=768', 'encoder_layers': 'encoder_layers=12', 'encoder_attention_heads': 'encoder_attention_heads=12', 'encoder_ffn_dim': 'encoder_ffn_dim=3072', 'encoder_layerdrop': 'encoder_layerdrop=0.1', 'decoder_layers': 'decoder_layers=6', 'decoder_ffn_dim': 'decoder_ffn_dim=3072', 'decoder_attention_heads': 'decoder_attention_heads=12', 'decoder_layerdrop': 'decoder_layerdrop=0.1', 'hidden_act': "hidden_act='gelu'", 'positional_dropout': 'positional_dropout=0.1', 'hidden_dropout': 'hidden_dropout=0.1', 'attention_dropout': 'attention_dropout=0.1', 'activation_dropout': 'activation_dropout=0.1', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'scale_embedding': 'scale_embedding=False', 'feat_extract_norm': "feat_extract_norm='group'", 'feat_proj_dropout': 'feat_proj_dropout=0.0', 'feat_extract_activation': "feat_extract_activation='gelu'", 'conv_dim': 'conv_dim=(512,
+ 512,
+ 512,
+ 512,
+ 512,
+ 512,
+ 512)', 'conv_stride': 'conv_stride=(5,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2)', 'conv_kernel': 'conv_kernel=(10,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2)', 'conv_bias': 'conv_bias=False', 'num_conv_pos_embeddings': 'num_conv_pos_embeddings=128', 'num_conv_pos_embedding_groups': 'num_conv_pos_embedding_groups=16', 'apply_spec_augment': 'apply_spec_augment=True', 'mask_time_prob': 'mask_time_prob=0.05', 'mask_time_length': 'mask_time_length=10', 'mask_time_min_masks': 'mask_time_min_masks=2', 'mask_feature_prob': 'mask_feature_prob=0.0', 'mask_feature_length': 'mask_feature_length=10', 'mask_feature_min_masks': 'mask_feature_min_masks=0', 'pad_token_id': 'pad_token_id=1', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=2', 'decoder_start_token_id': 'decoder_start_token_id=2', 'num_mel_bins': 'num_mel_bins=80', 'speech_decoder_prenet_layers': 'speech_decoder_prenet_layers=2', 'speech_decoder_prenet_units': 'speech_decoder_prenet_units=256', 'speech_decoder_prenet_dropout': 'speech_decoder_prenet_dropout=0.5', 'speaker_embedding_dim': 'speaker_embedding_dim=512', 'speech_decoder_postnet_layers': 'speech_decoder_postnet_layers=5', 'speech_decoder_postnet_units': 'speech_decoder_postnet_units=256', 'speech_decoder_postnet_kernel': 'speech_decoder_postnet_kernel=5', 'speech_decoder_postnet_dropout': 'speech_decoder_postnet_dropout=0.5', 'reduction_factor': 'reduction_factor=2', 'max_speech_positions': 'max_speech_positions=4000', 'max_text_positions': 'max_text_positions=450', 'encoder_max_relative_position': 'encoder_max_relative_position=160', 'use_guided_attention_loss': 'use_guided_attention_loss=True', 'guided_attention_loss_num_heads': 'guided_attention_loss_num_heads=2', 'guided_attention_loss_sigma': 'guided_attention_loss_sigma=0.4', 'guided_attention_loss_scale': 'guided_attention_loss_scale=10.0', 'is_encoder_decoder': 'is_encoder_decoder=True'
+}, model_name='SpeechT5Model', library='transformers', import_path='transformers.models.speecht5'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab_file': 'vocab_file', 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'unk_token': "unk_token=''", 'pad_token': "pad_token=''", 'normalize': 'normalize=False', 'sp_model_kwargs': 'sp_model_kwargs: Optional[dict[str, Any
+ ]
+ ] = None'
+}, model_name='SpeechT5Tokenizer', library='transformers', import_path='transformers.models.speecht5'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=30522', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=2', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'pad_token_id': 'pad_token_id=0', 'question_token_id': 'question_token_id=104'
+}, model_name='SplinterModel', library='transformers', import_path='transformers.models.splinter'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'do_lower_case': 'do_lower_case: bool = True', 'unk_token': "unk_token: str = '[UNK]'", 'sep_token': "sep_token: str = '[SEP]'", 'pad_token': "pad_token: str = '[PAD]'", 'cls_token': "cls_token: str = '[CLS]'", 'mask_token': "mask_token: str = '[MASK]'", 'question_token': "question_token: str = '[QUESTION]'", 'tokenize_chinese_chars': 'tokenize_chinese_chars: bool = True', 'strip_accents': 'strip_accents: Optional[bool
+ ] = None'
+}, model_name='SplinterTokenizer', library='transformers', import_path='transformers.models.splinter'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=30522', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=2', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'pad_token_id': 'pad_token_id=0', 'embedding_size': 'embedding_size=768', 'q_groups': 'q_groups=4', 'k_groups': 'k_groups=4', 'v_groups': 'v_groups=4', 'post_attention_groups': 'post_attention_groups=1', 'intermediate_groups': 'intermediate_groups=4', 'output_groups': 'output_groups=4'
+}, model_name='SqueezeBertModel', library='transformers', import_path='transformers.models.squeezebert'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'do_lower_case': 'do_lower_case: bool = False', 'unk_token': "unk_token: str = '[UNK]'", 'sep_token': "sep_token: str = '[SEP]'", 'pad_token': "pad_token: str = '[PAD]'", 'cls_token': "cls_token: str = '[CLS]'", 'mask_token': "mask_token: str = '[MASK]'", 'tokenize_chinese_chars': 'tokenize_chinese_chars: bool = True', 'strip_accents': 'strip_accents: Optional[bool
+ ] = None'
+}, model_name='BertTokenizer', library='transformers', import_path='transformers.models.bert'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 50304', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 6912', 'hidden_size': 'hidden_size: Optional[int
+ ] = 2560', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 32', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 32', 'hidden_act': "hidden_act: Optional[str] = 'silu'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 4096', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'layer_norm_eps': 'layer_norm_eps: Optional[float
+ ] = 1e-05', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = False', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'use_qkv_bias': 'use_qkv_bias: Optional[bool
+ ] = False', 'qk_layernorm': 'qk_layernorm: Optional[bool
+ ] = False', 'use_parallel_residual': 'use_parallel_residual: Optional[bool
+ ] = False', 'hidden_dropout': 'hidden_dropout: Optional[float
+ ] = 0.0', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 0', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 0'
+}, model_name='StableLmModel', library='transformers', import_path='transformers.models.stablelm'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': "bos_token: str = '<|endoftext|>'", 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|padding|>'", 'add_prefix_space': 'add_prefix_space: bool = False', 'trim_offsets': 'trim_offsets: bool = True'
+}, model_name='GPTNeoXTokenizer', library='transformers', import_path='transformers.models.gpt_neox'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 49152', 'hidden_size': 'hidden_size: Optional[int
+ ] = 3072', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 12288', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 30', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 24', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 2', 'hidden_act': "hidden_act: Optional[str] = 'gelu_pytorch_tanh'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 4096', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.018042', 'norm_epsilon': 'norm_epsilon: Optional[int
+ ] = 1e-05', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 50256', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 50256', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'sliding_window': 'sliding_window: Optional[int
+ ] = None', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'residual_dropout': 'residual_dropout: Optional[float
+ ] = 0.0', 'embedding_dropout': 'embedding_dropout: Optional[float
+ ] = 0.0', 'use_bias': 'use_bias: Optional[bool
+ ] = True'
+}, model_name='Starcoder2Model', library='transformers', import_path='transformers.models.starcoder2'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'bos_token': "bos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'eos_token': "eos_token: Union[tokenizers.AddedToken, str] = '<|endoftext|>'", 'pad_token': 'pad_token: Union[tokenizers.AddedToken, str, NoneType
+ ] = None', 'add_prefix_space': 'add_prefix_space=False'
+}, model_name='GPT2Tokenizer', library='transformers', import_path='transformers.models.gpt2'), ModelAttributes(model=, model_type='model', model_parameters={'image_size': 'image_size=224', 'num_channels': 'num_channels=3', 'depths': 'depths=[
+ 3,
+ 3,
+ 6,
+ 4
+ ]', 'embed_dims': 'embed_dims=[
+ 48,
+ 56,
+ 112,
+ 220
+ ]', 'mlp_ratio': 'mlp_ratio=4', 'downsamples': 'downsamples=[True, True, True, True
+ ]', 'hidden_act': "hidden_act='gelu'", 'down_patch_size': 'down_patch_size=3', 'down_stride': 'down_stride=2', 'down_pad': 'down_pad=1', 'drop_path_rate': 'drop_path_rate=0.0', 'drop_mlp_rate': 'drop_mlp_rate=0.0', 'drop_conv_encoder_rate': 'drop_conv_encoder_rate=0.0', 'use_layer_scale': 'use_layer_scale=True', 'layer_scale_init_value': 'layer_scale_init_value=1e-05', 'batch_norm_eps': 'batch_norm_eps=1e-05'
+}, model_name='SwiftFormerModel', library='transformers', import_path='transformers.models.swiftformer'), ModelAttributes(model=, model_type='model', model_parameters={'image_size': 'image_size=224', 'patch_size': 'patch_size=4', 'num_channels': 'num_channels=3', 'embed_dim': 'embed_dim=96', 'depths': 'depths=[
+ 2,
+ 2,
+ 6,
+ 2
+ ]', 'num_heads': 'num_heads=[
+ 3,
+ 6,
+ 12,
+ 24
+ ]', 'window_size': 'window_size=7', 'mlp_ratio': 'mlp_ratio=4.0', 'qkv_bias': 'qkv_bias=True', 'hidden_dropout_prob': 'hidden_dropout_prob=0.0', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.0', 'drop_path_rate': 'drop_path_rate=0.1', 'hidden_act': "hidden_act='gelu'", 'use_absolute_embeddings': 'use_absolute_embeddings=False', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'encoder_stride': 'encoder_stride=32', 'out_features': 'out_features=None', 'out_indices': 'out_indices=None'
+}, model_name='SwinModel', library='transformers', import_path='transformers.models.swin'), ModelAttributes(model=, model_type='model', model_parameters={'image_size': 'image_size=64', 'patch_size': 'patch_size=1', 'num_channels': 'num_channels=3', 'num_channels_out': 'num_channels_out=None', 'embed_dim': 'embed_dim=180', 'depths': 'depths=[
+ 6,
+ 6,
+ 6,
+ 6,
+ 6,
+ 6
+ ]', 'num_heads': 'num_heads=[
+ 6,
+ 6,
+ 6,
+ 6,
+ 6,
+ 6
+ ]', 'window_size': 'window_size=8', 'mlp_ratio': 'mlp_ratio=2.0', 'qkv_bias': 'qkv_bias=True', 'hidden_dropout_prob': 'hidden_dropout_prob=0.0', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.0', 'drop_path_rate': 'drop_path_rate=0.1', 'hidden_act': "hidden_act='gelu'", 'use_absolute_embeddings': 'use_absolute_embeddings=False', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'upscale': 'upscale=2', 'img_range': 'img_range=1.0', 'resi_connection': "resi_connection='1conv'", 'upsampler': "upsampler='pixelshuffle'"
+}, model_name='Swin2SRModel', library='transformers', import_path='transformers.models.swin2sr'), ModelAttributes(model=, model_type='model', model_parameters={'image_size': 'image_size=224', 'patch_size': 'patch_size=4', 'num_channels': 'num_channels=3', 'embed_dim': 'embed_dim=96', 'depths': 'depths=[
+ 2,
+ 2,
+ 6,
+ 2
+ ]', 'num_heads': 'num_heads=[
+ 3,
+ 6,
+ 12,
+ 24
+ ]', 'window_size': 'window_size=7', 'pretrained_window_sizes': 'pretrained_window_sizes=[
+ 0,
+ 0,
+ 0,
+ 0
+ ]', 'mlp_ratio': 'mlp_ratio=4.0', 'qkv_bias': 'qkv_bias=True', 'hidden_dropout_prob': 'hidden_dropout_prob=0.0', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.0', 'drop_path_rate': 'drop_path_rate=0.1', 'hidden_act': "hidden_act='gelu'", 'use_absolute_embeddings': 'use_absolute_embeddings=False', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'encoder_stride': 'encoder_stride=32', 'out_features': 'out_features=None', 'out_indices': 'out_indices=None'
+}, model_name='Swinv2Model', library='transformers', import_path='transformers.models.swinv2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=32128', 'd_model': 'd_model=768', 'd_kv': 'd_kv=64', 'd_ff': 'd_ff=2048', 'expert_capacity': 'expert_capacity=64', 'num_layers': 'num_layers=12', 'num_sparse_encoder_layers': 'num_sparse_encoder_layers=3', 'num_decoder_layers': 'num_decoder_layers=12', 'num_sparse_decoder_layers': 'num_sparse_decoder_layers=3', 'num_heads': 'num_heads=12', 'num_experts': 'num_experts=8', 'router_bias': 'router_bias=False', 'router_jitter_noise': 'router_jitter_noise=0.01', 'router_dtype': "router_dtype='float32'", 'router_ignore_padding_tokens': 'router_ignore_padding_tokens=False', 'relative_attention_num_buckets': 'relative_attention_num_buckets=32', 'relative_attention_max_distance': 'relative_attention_max_distance=128', 'dropout_rate': 'dropout_rate=0.1', 'layer_norm_epsilon': 'layer_norm_epsilon=1e-06', 'router_z_loss_coef': 'router_z_loss_coef=0.001', 'router_aux_loss_coef': 'router_aux_loss_coef=0.001', 'initializer_factor': 'initializer_factor=1.0', 'dense_act_fn': "dense_act_fn='relu'", 'is_encoder_decoder': 'is_encoder_decoder=True', 'add_router_probs': 'add_router_probs=False', 'pad_token_id': 'pad_token_id=0', 'eos_token_id': 'eos_token_id=1'
+}, model_name='SwitchTransformersModel', library='transformers', import_path='transformers.models.switch_transformers'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, list[tuple[str, float
+ ]
+ ], NoneType
+ ] = None', 'eos_token': "eos_token=''", 'unk_token': "unk_token=''", 'pad_token': "pad_token=''", 'extra_ids': 'extra_ids=100', 'additional_special_tokens': 'additional_special_tokens=None'
+}, model_name='T5Tokenizer', library='transformers', import_path='transformers.models.t5'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=32128', 'd_model': 'd_model=512', 'd_kv': 'd_kv=64', 'd_ff': 'd_ff=2048', 'num_layers': 'num_layers=6', 'num_decoder_layers': 'num_decoder_layers=None', 'num_heads': 'num_heads=8', 'relative_attention_num_buckets': 'relative_attention_num_buckets=32', 'relative_attention_max_distance': 'relative_attention_max_distance=128', 'dropout_rate': 'dropout_rate=0.1', 'layer_norm_epsilon': 'layer_norm_epsilon=1e-06', 'initializer_factor': 'initializer_factor=1.0', 'feed_forward_proj': "feed_forward_proj='relu'", 'is_encoder_decoder': 'is_encoder_decoder=True', 'pad_token_id': 'pad_token_id=0', 'eos_token_id': 'eos_token_id=1', 'classifier_dropout': 'classifier_dropout=0.0'
+}, model_name='T5Model', library='transformers', import_path='transformers.models.t5'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, list[tuple[str, float
+ ]
+ ], NoneType
+ ] = None', 'eos_token': "eos_token=''", 'unk_token': "unk_token=''", 'pad_token': "pad_token=''", 'extra_ids': 'extra_ids=100', 'additional_special_tokens': 'additional_special_tokens=None'
+}, model_name='T5Tokenizer', library='transformers', import_path='transformers.models.t5'), ModelAttributes(model=, model_type='model', model_parameters={'encoder': 'encoder: Union[transformers.models.t5gemma.configuration_t5gemma.T5GemmaModuleConfig, dict[Any, Any
+ ], NoneType
+ ] = None', 'decoder': 'decoder: Union[transformers.models.t5gemma.configuration_t5gemma.T5GemmaModuleConfig, dict[Any, Any
+ ], NoneType
+ ] = None', 'is_encoder_decoder': 'is_encoder_decoder: Optional[bool
+ ] = True', 'dropout_rate': 'dropout_rate: Optional[float
+ ] = 0.0', 'classifier_dropout_rate': 'classifier_dropout_rate: Optional[float
+ ] = 0.0', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = True', 'vocab_size': 'vocab_size: Optional[int
+ ] = 256000'
+}, model_name='T5GemmaModel', library='transformers', import_path='transformers.models.t5gemma'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'unk_token': "unk_token: str = ''", 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = ''", 'pad_token': "pad_token: str = ''", 'mask_token': "mask_token: str = ''"
+}, model_name='GemmaTokenizer', library='transformers', import_path='transformers.models.gemma'), ModelAttributes(model=, model_type='model', model_parameters={'encoder': 'encoder: Union[transformers.models.t5gemma2.configuration_t5gemma2.T5Gemma2EncoderConfig, dict[str, Any
+ ], NoneType
+ ] = None', 'decoder': 'decoder: Union[transformers.models.t5gemma2.configuration_t5gemma2.T5Gemma2DecoderConfig, dict[str, Any
+ ], NoneType
+ ] = None', 'is_encoder_decoder': 'is_encoder_decoder: bool = True', 'dropout_rate': 'dropout_rate: float = 0.0', 'attention_dropout': 'attention_dropout: float = 0.0', 'classifier_dropout_rate': 'classifier_dropout_rate: float = 0.0', 'initializer_range': 'initializer_range: float = 0.02', 'image_token_index': 'image_token_index: int = 256001'
+}, model_name='T5Gemma2Model', library='transformers', import_path='transformers.models.t5gemma2'), ModelAttributes(model=, model_type='model', model_parameters={'use_timm_backbone': 'use_timm_backbone=True', 'backbone_config': 'backbone_config=None', 'num_channels': 'num_channels=3', 'num_queries': 'num_queries=100', 'encoder_layers': 'encoder_layers=6', 'encoder_ffn_dim': 'encoder_ffn_dim=2048', 'encoder_attention_heads': 'encoder_attention_heads=8', 'decoder_layers': 'decoder_layers=6', 'decoder_ffn_dim': 'decoder_ffn_dim=2048', 'decoder_attention_heads': 'decoder_attention_heads=8', 'encoder_layerdrop': 'encoder_layerdrop=0.0', 'decoder_layerdrop': 'decoder_layerdrop=0.0', 'is_encoder_decoder': 'is_encoder_decoder=True', 'activation_function': "activation_function='relu'", 'd_model': 'd_model=256', 'dropout': 'dropout=0.1', 'attention_dropout': 'attention_dropout=0.0', 'activation_dropout': 'activation_dropout=0.0', 'init_std': 'init_std=0.02', 'init_xavier_std': 'init_xavier_std=1.0', 'auxiliary_loss': 'auxiliary_loss=False', 'position_embedding_type': "position_embedding_type='sine'", 'backbone': "backbone='resnet50'", 'use_pretrained_backbone': 'use_pretrained_backbone=True', 'backbone_kwargs': 'backbone_kwargs=None', 'dilation': 'dilation=False', 'class_cost': 'class_cost=1', 'bbox_cost': 'bbox_cost=5', 'giou_cost': 'giou_cost=2', 'mask_loss_coefficient': 'mask_loss_coefficient=1', 'dice_loss_coefficient': 'dice_loss_coefficient=1', 'bbox_loss_coefficient': 'bbox_loss_coefficient=5', 'giou_loss_coefficient': 'giou_loss_coefficient=2', 'eos_coefficient': 'eos_coefficient=0.1'
+}, model_name='TableTransformerModel', library='transformers', import_path='transformers.models.table_transformer'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=30522', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=1024', 'type_vocab_sizes': 'type_vocab_sizes=[
+ 3,
+ 256,
+ 256,
+ 2,
+ 256,
+ 256,
+ 10
+ ]', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'pad_token_id': 'pad_token_id=0', 'positive_label_weight': 'positive_label_weight=10.0', 'num_aggregation_labels': 'num_aggregation_labels=0', 'aggregation_loss_weight': 'aggregation_loss_weight=1.0', 'use_answer_as_supervision': 'use_answer_as_supervision=None', 'answer_loss_importance': 'answer_loss_importance=1.0', 'use_normalized_answer_loss': 'use_normalized_answer_loss=False', 'huber_loss_delta': 'huber_loss_delta=None', 'temperature': 'temperature=1.0', 'aggregation_temperature': 'aggregation_temperature=1.0', 'use_gumbel_for_cells': 'use_gumbel_for_cells=False', 'use_gumbel_for_aggregation': 'use_gumbel_for_aggregation=False', 'average_approximation_function': "average_approximation_function='ratio'", 'cell_selection_preference': 'cell_selection_preference=None', 'answer_loss_cutoff': 'answer_loss_cutoff=None', 'max_num_rows': 'max_num_rows=64', 'max_num_columns': 'max_num_columns=32', 'average_logits_per_cell': 'average_logits_per_cell=False', 'select_one_column': 'select_one_column=True', 'allow_empty_column_selection': 'allow_empty_column_selection=False', 'init_cell_selection_weights_to_zero': 'init_cell_selection_weights_to_zero=False', 'reset_position_index_per_cell': 'reset_position_index_per_cell=True', 'disable_per_token_loss': 'disable_per_token_loss=False', 'aggregation_labels': 'aggregation_labels=None', 'no_aggregation_label_index': 'no_aggregation_label_index=None'
+}, model_name='TapasModel', library='transformers', import_path='transformers.models.tapas'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab_file': 'vocab_file', 'do_lower_case': 'do_lower_case=True', 'do_basic_tokenize': 'do_basic_tokenize=True', 'never_split': 'never_split=None', 'unk_token': "unk_token='[UNK]'", 'sep_token': "sep_token='[SEP]'", 'pad_token': "pad_token='[PAD]'", 'cls_token': "cls_token='[CLS]'", 'mask_token': "mask_token='[MASK]'", 'empty_token': "empty_token='[EMPTY]'", 'tokenize_chinese_chars': 'tokenize_chinese_chars=True', 'strip_accents': 'strip_accents=None', 'cell_trim_length': 'cell_trim_length: int = -1', 'max_column_id': 'max_column_id: Optional[int
+ ] = None', 'max_row_id': 'max_row_id: Optional[int
+ ] = None', 'strip_column_names': 'strip_column_names: bool = False', 'update_answer_coordinates': 'update_answer_coordinates: bool = False', 'min_question_length': 'min_question_length=None', 'max_question_length': 'max_question_length=None', 'model_max_length': 'model_max_length: int = 512', 'additional_special_tokens': 'additional_special_tokens: Optional[list[str
+ ]
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=True'
+}, model_name='TapasTokenizer', library='transformers', import_path='transformers.models.tapas'), ModelAttributes(model=, model_type='model', model_parameters={'stem_kernel_size': 'stem_kernel_size=3', 'stem_stride': 'stem_stride=2', 'stem_num_channels': 'stem_num_channels=3', 'stem_out_channels': 'stem_out_channels=64', 'stem_act_func': "stem_act_func='relu'", 'image_size': 'image_size=[
+ 640,
+ 640
+ ]', 'conv_layer_kernel_sizes': 'conv_layer_kernel_sizes=None', 'conv_layer_strides': 'conv_layer_strides=None', 'hidden_sizes': 'hidden_sizes=[
+ 64,
+ 64,
+ 128,
+ 256,
+ 512
+ ]', 'batch_norm_eps': 'batch_norm_eps=1e-05', 'initializer_range': 'initializer_range=0.02', 'out_features': 'out_features=None', 'out_indices': 'out_indices=None'
+}, model_name='TextNetModel', library='transformers', import_path='transformers.models.textnet'), ModelAttributes(model=, model_type='model', model_parameters={'prediction_length': 'prediction_length: Optional[int
+ ] = None', 'context_length': 'context_length: Optional[int
+ ] = None', 'distribution_output': "distribution_output: str = 'student_t'", 'loss': "loss: str = 'nll'", 'input_size': 'input_size: int = 1', 'lags_sequence': 'lags_sequence: list[int
+ ] = [
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7
+ ]', 'scaling': "scaling: Union[str, bool, NoneType] = 'mean'", 'num_dynamic_real_features': 'num_dynamic_real_features: int = 0', 'num_static_categorical_features': 'num_static_categorical_features: int = 0', 'num_static_real_features': 'num_static_real_features: int = 0', 'num_time_features': 'num_time_features: int = 0', 'cardinality': 'cardinality: Optional[list[int
+ ]
+ ] = None', 'embedding_dimension': 'embedding_dimension: Optional[list[int
+ ]
+ ] = None', 'encoder_ffn_dim': 'encoder_ffn_dim: int = 32', 'decoder_ffn_dim': 'decoder_ffn_dim: int = 32', 'encoder_attention_heads': 'encoder_attention_heads: int = 2', 'decoder_attention_heads': 'decoder_attention_heads: int = 2', 'encoder_layers': 'encoder_layers: int = 2', 'decoder_layers': 'decoder_layers: int = 2', 'is_encoder_decoder': 'is_encoder_decoder: bool = True', 'activation_function': "activation_function: str = 'gelu'", 'd_model': 'd_model: int = 64', 'dropout': 'dropout: float = 0.1', 'encoder_layerdrop': 'encoder_layerdrop: float = 0.1', 'decoder_layerdrop': 'decoder_layerdrop: float = 0.1', 'attention_dropout': 'attention_dropout: float = 0.1', 'activation_dropout': 'activation_dropout: float = 0.1', 'num_parallel_samples': 'num_parallel_samples: int = 100', 'init_std': 'init_std: float = 0.02'
+}, model_name='TimeSeriesTransformerModel', library='transformers', import_path='transformers.models.time_series_transformer'), ModelAttributes(model=, model_type='model', model_parameters={'patch_length': 'patch_length: int = 32', 'context_length': 'context_length: int = 512', 'horizon_length': 'horizon_length: int = 128', 'freq_size': 'freq_size: int = 3', 'num_hidden_layers': 'num_hidden_layers: int = 50', 'hidden_size': 'hidden_size: int = 1280', 'intermediate_size': 'intermediate_size: int = 1280', 'head_dim': 'head_dim: int = 80', 'num_attention_heads': 'num_attention_heads: int = 16', 'tolerance': 'tolerance: float = 1e-06', 'rms_norm_eps': 'rms_norm_eps: float = 1e-06', 'quantiles': 'quantiles: list[float
+ ] = [
+ 0.1,
+ 0.2,
+ 0.3,
+ 0.4,
+ 0.5,
+ 0.6,
+ 0.7,
+ 0.8,
+ 0.9
+ ]', 'pad_val': 'pad_val: float = 1123581321.0', 'attention_dropout': 'attention_dropout: float = 0.0', 'use_positional_embedding': 'use_positional_embedding: bool = False', 'initializer_range': 'initializer_range: float = 0.02', 'min_timescale': 'min_timescale: int = 1', 'max_timescale': 'max_timescale: int = 10000'
+}, model_name='TimesFmModel', library='transformers', import_path='transformers.models.timesfm'), ModelAttributes(model=, model_type='model', model_parameters={'image_size': 'image_size=224', 'patch_size': 'patch_size=16', 'num_channels': 'num_channels=3', 'num_frames': 'num_frames=8', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.0', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.0', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-06', 'qkv_bias': 'qkv_bias=True', 'attention_type': "attention_type='divided_space_time'", 'drop_path_rate': 'drop_path_rate=0'
+}, model_name='TimesformerModel', library='transformers', import_path='transformers.models.timesformer'), ModelAttributes(model=, model_type='model', model_parameters={'backbone': 'backbone=None', 'num_channels': 'num_channels=3', 'features_only': 'features_only=True', 'use_pretrained_backbone': 'use_pretrained_backbone=True', 'out_indices': 'out_indices=None', 'freeze_batch_norm_2d': 'freeze_batch_norm_2d=False'
+}, model_name='TimmBackbone', library='transformers', import_path='transformers.models.timm_backbone'), ModelAttributes(model=, model_type='model', model_parameters={'_resnet_': [''
+ ]
+}, model_name='TimmWrapperModel', library='transformers', import_path='transformers.models.timm_wrapper'), ModelAttributes(model=, model_type='model', model_parameters={'backbone_config': 'backbone_config=None', 'backbone': 'backbone=None', 'use_pretrained_backbone': 'use_pretrained_backbone=False', 'use_timm_backbone': 'use_timm_backbone=False', 'backbone_kwargs': 'backbone_kwargs=None', 'distance_loss_weight': 'distance_loss_weight=1.0', 'duration_loss_weight': 'duration_loss_weight=0.1', 'visual_prompter_type': "visual_prompter_type='framepad'", 'visual_prompter_apply': "visual_prompter_apply='replace'", 'visual_prompt_size': 'visual_prompt_size=96', 'max_img_size': 'max_img_size=448', 'num_frames': 'num_frames=48', 'vocab_size': 'vocab_size=30522', 'type_vocab_size': 'type_vocab_size=2', 'hidden_size': 'hidden_size=768', 'intermediate_size': 'intermediate_size=3072', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'max_position_embeddings': 'max_position_embeddings=512', 'max_grid_col_position_embeddings': 'max_grid_col_position_embeddings=100', 'max_grid_row_position_embeddings': 'max_grid_row_position_embeddings=100', 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'hidden_act': "hidden_act='gelu'", 'layer_norm_eps': 'layer_norm_eps=1e-12', 'initializer_range': 'initializer_range=0.02', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1'
+}, model_name='TvpModel', library='transformers', import_path='transformers.models.tvp'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'do_lower_case': 'do_lower_case: bool = False', 'unk_token': "unk_token: str = '[UNK]'", 'sep_token': "sep_token: str = '[SEP]'", 'pad_token': "pad_token: str = '[PAD]'", 'cls_token': "cls_token: str = '[CLS]'", 'mask_token': "mask_token: str = '[MASK]'", 'tokenize_chinese_chars': 'tokenize_chinese_chars: bool = True', 'strip_accents': 'strip_accents: Optional[bool
+ ] = None'
+}, model_name='BertTokenizer', library='transformers', import_path='transformers.models.bert'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=33201', 'd_model': 'd_model=1024', 'd_kv': 'd_kv=64', 'd_ff': 'd_ff=4096', 'num_layers': 'num_layers=24', 'num_decoder_layers': 'num_decoder_layers=None', 'num_heads': 'num_heads=16', 'relative_attention_num_buckets': 'relative_attention_num_buckets=32', 'relative_attention_max_distance': 'relative_attention_max_distance=128', 'relative_bias_args': "relative_bias_args=[{'type': '1d'}, {'type': 'horizontal'}, {'type': 'vertical'}]", 'dropout_rate': 'dropout_rate=0.1', 'layer_norm_epsilon': 'layer_norm_epsilon=1e-06', 'initializer_factor': 'initializer_factor=1.0', 'feed_forward_proj': "feed_forward_proj='relu'", 'is_encoder_decoder': 'is_encoder_decoder=True', 'pad_token_id': 'pad_token_id=0', 'eos_token_id': 'eos_token_id=1', 'max_2d_position_embeddings': 'max_2d_position_embeddings=1024', 'image_size': 'image_size=224', 'patch_size': 'patch_size=16', 'num_channels': 'num_channels=3'
+}, model_name='UdopModel', library='transformers', import_path='transformers.models.udop'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, list[tuple[str, float
+ ]
+ ], NoneType
+ ] = None', 'eos_token': "eos_token=''", 'sep_token': "sep_token=''", 'unk_token': "unk_token=''", 'pad_token': "pad_token=''", 'sep_token_box': 'sep_token_box=[
+ 1000,
+ 1000,
+ 1000,
+ 1000
+ ]', 'pad_token_box': 'pad_token_box=[
+ 0,
+ 0,
+ 0,
+ 0
+ ]', 'pad_token_label': 'pad_token_label=-100', 'only_label_first_subword': 'only_label_first_subword=True', 'extra_special_tokens': 'extra_special_tokens=None'
+}, model_name='UdopTokenizer', library='transformers', import_path='transformers.models.udop'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=250112', 'd_model': 'd_model=512', 'd_kv': 'd_kv=64', 'd_ff': 'd_ff=1024', 'num_layers': 'num_layers=8', 'num_decoder_layers': 'num_decoder_layers=None', 'num_heads': 'num_heads=6', 'relative_attention_num_buckets': 'relative_attention_num_buckets=32', 'relative_attention_max_distance': 'relative_attention_max_distance=128', 'dropout_rate': 'dropout_rate=0.1', 'layer_norm_epsilon': 'layer_norm_epsilon=1e-06', 'initializer_factor': 'initializer_factor=1.0', 'feed_forward_proj': "feed_forward_proj='gated-gelu'", 'is_encoder_decoder': 'is_encoder_decoder=True', 'tokenizer_class': "tokenizer_class='T5Tokenizer'", 'pad_token_id': 'pad_token_id=0', 'eos_token_id': 'eos_token_id=1', 'decoder_start_token_id': 'decoder_start_token_id=0', 'classifier_dropout': 'classifier_dropout=0.0'
+}, model_name='UMT5Model', library='transformers', import_path='transformers.models.umt5'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, list[tuple[str, float
+ ]
+ ], NoneType
+ ] = None', 'eos_token': "eos_token=''", 'unk_token': "unk_token=''", 'pad_token': "pad_token=''", 'extra_ids': 'extra_ids=100', 'additional_special_tokens': 'additional_special_tokens=None'
+}, model_name='T5Tokenizer', library='transformers', import_path='transformers.models.t5'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=32', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout': 'hidden_dropout=0.1', 'activation_dropout': 'activation_dropout=0.1', 'attention_dropout': 'attention_dropout=0.1', 'feat_proj_dropout': 'feat_proj_dropout=0.0', 'feat_quantizer_dropout': 'feat_quantizer_dropout=0.0', 'final_dropout': 'final_dropout=0.1', 'layerdrop': 'layerdrop=0.1', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'feat_extract_norm': "feat_extract_norm='group'", 'feat_extract_activation': "feat_extract_activation='gelu'", 'conv_dim': 'conv_dim=(512,
+ 512,
+ 512,
+ 512,
+ 512,
+ 512,
+ 512)', 'conv_stride': 'conv_stride=(5,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2)', 'conv_kernel': 'conv_kernel=(10,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2)', 'conv_bias': 'conv_bias=False', 'num_conv_pos_embeddings': 'num_conv_pos_embeddings=128', 'num_conv_pos_embedding_groups': 'num_conv_pos_embedding_groups=16', 'do_stable_layer_norm': 'do_stable_layer_norm=False', 'apply_spec_augment': 'apply_spec_augment=True', 'mask_time_prob': 'mask_time_prob=0.05', 'mask_time_length': 'mask_time_length=10', 'mask_time_min_masks': 'mask_time_min_masks=2', 'mask_feature_prob': 'mask_feature_prob=0.0', 'mask_feature_length': 'mask_feature_length=10', 'mask_feature_min_masks': 'mask_feature_min_masks=0', 'num_codevectors_per_group': 'num_codevectors_per_group=320', 'num_codevector_groups': 'num_codevector_groups=2', 'contrastive_logits_temperature': 'contrastive_logits_temperature=0.1', 'num_negatives': 'num_negatives=100', 'codevector_dim': 'codevector_dim=256', 'proj_codevector_dim': 'proj_codevector_dim=256', 'diversity_loss_weight': 'diversity_loss_weight=0.1', 'ctc_loss_reduction': "ctc_loss_reduction='mean'", 'ctc_zero_infinity': 'ctc_zero_infinity=False', 'use_weighted_layer_sum': 'use_weighted_layer_sum=False', 'classifier_proj_size': 'classifier_proj_size=256', 'num_ctc_classes': 'num_ctc_classes=80', 'pad_token_id': 'pad_token_id=0', 'bos_token_id': 'bos_token_id=1', 'eos_token_id': 'eos_token_id=2', 'replace_prob': 'replace_prob=0.5'
+}, model_name='UniSpeechModel', library='transformers', import_path='transformers.models.unispeech'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=32', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout': 'hidden_dropout=0.1', 'activation_dropout': 'activation_dropout=0.1', 'attention_dropout': 'attention_dropout=0.1', 'feat_proj_dropout': 'feat_proj_dropout=0.0', 'feat_quantizer_dropout': 'feat_quantizer_dropout=0.0', 'final_dropout': 'final_dropout=0.1', 'layerdrop': 'layerdrop=0.1', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'feat_extract_norm': "feat_extract_norm='group'", 'feat_extract_activation': "feat_extract_activation='gelu'", 'conv_dim': 'conv_dim=(512,
+ 512,
+ 512,
+ 512,
+ 512,
+ 512,
+ 512)', 'conv_stride': 'conv_stride=(5,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2)', 'conv_kernel': 'conv_kernel=(10,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2)', 'conv_bias': 'conv_bias=False', 'num_conv_pos_embeddings': 'num_conv_pos_embeddings=128', 'num_conv_pos_embedding_groups': 'num_conv_pos_embedding_groups=16', 'do_stable_layer_norm': 'do_stable_layer_norm=False', 'apply_spec_augment': 'apply_spec_augment=True', 'mask_time_prob': 'mask_time_prob=0.05', 'mask_time_length': 'mask_time_length=10', 'mask_time_min_masks': 'mask_time_min_masks=2', 'mask_feature_prob': 'mask_feature_prob=0.0', 'mask_feature_length': 'mask_feature_length=10', 'mask_feature_min_masks': 'mask_feature_min_masks=0', 'num_codevectors_per_group': 'num_codevectors_per_group=320', 'num_codevector_groups': 'num_codevector_groups=2', 'contrastive_logits_temperature': 'contrastive_logits_temperature=0.1', 'num_negatives': 'num_negatives=100', 'codevector_dim': 'codevector_dim=256', 'proj_codevector_dim': 'proj_codevector_dim=256', 'diversity_loss_weight': 'diversity_loss_weight=0.1', 'ctc_loss_reduction': "ctc_loss_reduction='mean'", 'ctc_zero_infinity': 'ctc_zero_infinity=False', 'use_weighted_layer_sum': 'use_weighted_layer_sum=False', 'classifier_proj_size': 'classifier_proj_size=256', 'tdnn_dim': 'tdnn_dim=(512,
+ 512,
+ 512,
+ 512,
+ 1500)', 'tdnn_kernel': 'tdnn_kernel=(5,
+ 3,
+ 3,
+ 1,
+ 1)', 'tdnn_dilation': 'tdnn_dilation=(1,
+ 2,
+ 3,
+ 1,
+ 1)', 'xvector_output_dim': 'xvector_output_dim=512', 'pad_token_id': 'pad_token_id=0', 'bos_token_id': 'bos_token_id=1', 'eos_token_id': 'eos_token_id=2', 'num_clusters': 'num_clusters=504'
+}, model_name='UniSpeechSatModel', library='transformers', import_path='transformers.models.unispeech_sat'), ModelAttributes(model=, model_type='model', model_parameters={'model_in_channels': 'model_in_channels=64', 'model_hidden_channels': 'model_hidden_channels=32', 'num_mel_bins': 'num_mel_bins=100', 'resblock_kernel_sizes': 'resblock_kernel_sizes=[
+ 3,
+ 3,
+ 3
+ ]', 'resblock_stride_sizes': 'resblock_stride_sizes=[
+ 8,
+ 8,
+ 4
+ ]', 'resblock_dilation_sizes': 'resblock_dilation_sizes=[
+ [
+ 1,
+ 3,
+ 9,
+ 27
+ ],
+ [
+ 1,
+ 3,
+ 9,
+ 27
+ ],
+ [
+ 1,
+ 3,
+ 9,
+ 27
+ ]
+ ]', 'kernel_predictor_num_blocks': 'kernel_predictor_num_blocks=3', 'kernel_predictor_hidden_channels': 'kernel_predictor_hidden_channels=64', 'kernel_predictor_conv_size': 'kernel_predictor_conv_size=3', 'kernel_predictor_dropout': 'kernel_predictor_dropout=0.0', 'initializer_range': 'initializer_range=0.01', 'leaky_relu_slope': 'leaky_relu_slope=0.2'
+}, model_name='UnivNetModel', library='transformers', import_path='transformers.models.univnet'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: Optional[int
+ ] = 256000', 'hidden_size': 'hidden_size: Optional[int
+ ] = 2304', 'intermediate_size': 'intermediate_size: Optional[int
+ ] = 9216', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 26', 'num_attention_heads': 'num_attention_heads: Optional[int
+ ] = 8', 'num_key_value_heads': 'num_key_value_heads: Optional[int
+ ] = 4', 'head_dim': 'head_dim: Optional[int
+ ] = 256', 'hidden_activation': "hidden_activation: Optional[str] = 'gelu_pytorch_tanh'", 'max_position_embeddings': 'max_position_embeddings: Optional[int
+ ] = 8192', 'initializer_range': 'initializer_range: Optional[float
+ ] = 0.02', 'rms_norm_eps': 'rms_norm_eps: Optional[int
+ ] = 1e-06', 'pad_token_id': 'pad_token_id: Optional[int
+ ] = 0', 'eos_token_id': 'eos_token_id: Optional[int
+ ] = 1', 'bos_token_id': 'bos_token_id: Optional[int
+ ] = 2', 'tie_word_embeddings': 'tie_word_embeddings: Optional[bool
+ ] = True', 'rope_parameters': 'rope_parameters: Union[transformers.modeling_rope_utils.RopeParameters, dict[str, transformers.modeling_rope_utils.RopeParameters
+ ], NoneType
+ ] = None', 'attention_bias': 'attention_bias: Optional[bool
+ ] = False', 'attention_dropout': 'attention_dropout: Optional[float
+ ] = 0.0', 'query_pre_attn_scalar': 'query_pre_attn_scalar: Optional[int
+ ] = 256', 'sliding_window': 'sliding_window: Optional[int
+ ] = 4096', 'layer_types': 'layer_types: Optional[list[str
+ ]
+ ] = None', 'final_logit_softcapping': 'final_logit_softcapping: Optional[float
+ ] = 30.0', 'attn_logit_softcapping': 'attn_logit_softcapping: Optional[float
+ ] = 50.0'
+}, model_name='VaultGemmaModel', library='transformers', import_path='transformers.models.vaultgemma'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'vision_config': 'vision_config=None', 'image_token_id': 'image_token_id=151655', 'video_token_id': 'video_token_id=151656'
+}, model_name='VideoLlama3Model', library='transformers', import_path='transformers.models.video_llama_3'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=768', 'intermediate_size': 'intermediate_size=3072', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'num_channels': 'num_channels=3', 'patch_size': 'patch_size=16', 'hidden_act': "hidden_act='gelu_pytorch_tanh'", 'layer_norm_eps': 'layer_norm_eps=1e-06', 'attention_dropout': 'attention_dropout=0.0', 'initializer_range': 'initializer_range=0.02'
+}, model_name='VideoLlama3VisionModel', library='transformers', import_path='transformers.models.video_llama_3'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'text_config': 'text_config=None', 'image_token_index': 'image_token_index=32000', 'video_token_index': 'video_token_index=32001', 'projector_hidden_act': "projector_hidden_act='gelu'", 'vision_feature_select_strategy': "vision_feature_select_strategy='default'", 'vision_feature_layer': 'vision_feature_layer=-2', 'image_seq_length': 'image_seq_length=256', 'video_seq_length': 'video_seq_length=2056', 'multimodal_projector_bias': 'multimodal_projector_bias=True'
+}, model_name='VideoLlavaModel', library='transformers', import_path='transformers.models.video_llava'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'image_size': 'image_size=224', 'patch_size': 'patch_size=16', 'num_channels': 'num_channels=3', 'num_frames': 'num_frames=16', 'tubelet_size': 'tubelet_size=2', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.0', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.0', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'qkv_bias': 'qkv_bias=True', 'use_mean_pooling': 'use_mean_pooling=True', 'decoder_num_attention_heads': 'decoder_num_attention_heads=6', 'decoder_hidden_size': 'decoder_hidden_size=384', 'decoder_num_hidden_layers': 'decoder_num_hidden_layers=4', 'decoder_intermediate_size': 'decoder_intermediate_size=1536', 'norm_pix_loss': 'norm_pix_loss=True'
+}, model_name='VideoMAEModel', library='transformers', import_path='transformers.models.videomae'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=30522', 'type_vocab_size': 'type_vocab_size=2', 'modality_type_vocab_size': 'modality_type_vocab_size=2', 'max_position_embeddings': 'max_position_embeddings=40', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.0', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.0', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'image_size': 'image_size=384', 'patch_size': 'patch_size=32', 'num_channels': 'num_channels=3', 'qkv_bias': 'qkv_bias=True', 'max_image_length': 'max_image_length=-1', 'tie_word_embeddings': 'tie_word_embeddings=True', 'num_images': 'num_images=-1'
+}, model_name='ViltModel', library='transformers', import_path='transformers.models.vilt'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'do_lower_case': 'do_lower_case: bool = False', 'unk_token': "unk_token: str = '[UNK]'", 'sep_token': "sep_token: str = '[SEP]'", 'pad_token': "pad_token: str = '[PAD]'", 'cls_token': "cls_token: str = '[CLS]'", 'mask_token': "mask_token: str = '[MASK]'", 'tokenize_chinese_chars': 'tokenize_chinese_chars: bool = True', 'strip_accents': 'strip_accents: Optional[bool
+ ] = None'
+}, model_name='BertTokenizer', library='transformers', import_path='transformers.models.bert'), ModelAttributes(model=, model_type='model', model_parameters={'vision_config': 'vision_config=None', 'text_config': 'text_config=None', 'image_token_index': 'image_token_index=32000', 'projector_hidden_act': "projector_hidden_act='gelu'", 'projector_layernorm_eps': 'projector_layernorm_eps=1e-05', 'vision_feature_layers': 'vision_feature_layers=[
+ -2,
+ -5,
+ -8,
+ -11,
+ 6
+ ]', 'image_seq_length': 'image_seq_length=576'
+}, model_name='VipLlavaModel', library='transformers', import_path='transformers.models.vipllava'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'projection_dim': 'projection_dim=512', 'logit_scale_init_value': 'logit_scale_init_value=2.6592'
+}, model_name='VisionTextDualEncoderModel', library='transformers', import_path='transformers.models.vision_text_dual_encoder'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=30522', 'hidden_size': 'hidden_size=768', 'visual_embedding_dim': 'visual_embedding_dim=512', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=2', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'bypass_transformer': 'bypass_transformer=False', 'special_visual_initialize': 'special_visual_initialize=True', 'pad_token_id': 'pad_token_id=1', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=2'
+}, model_name='VisualBertModel', library='transformers', import_path='transformers.models.visual_bert'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'do_lower_case': 'do_lower_case: bool = False', 'unk_token': "unk_token: str = '[UNK]'", 'sep_token': "sep_token: str = '[SEP]'", 'pad_token': "pad_token: str = '[PAD]'", 'cls_token': "cls_token: str = '[CLS]'", 'mask_token': "mask_token: str = '[MASK]'", 'tokenize_chinese_chars': 'tokenize_chinese_chars: bool = True', 'strip_accents': 'strip_accents: Optional[bool
+ ] = None'
+}, model_name='BertTokenizer', library='transformers', import_path='transformers.models.bert'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.0', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.0', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'image_size': 'image_size=224', 'patch_size': 'patch_size=16', 'num_channels': 'num_channels=3', 'qkv_bias': 'qkv_bias=True', 'encoder_stride': 'encoder_stride=16', 'pooler_output_size': 'pooler_output_size=None', 'pooler_act': "pooler_act='tanh'"
+}, model_name='ViTModel', library='transformers', import_path='transformers.models.vit'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.0', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.0', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'image_size': 'image_size=224', 'patch_size': 'patch_size=16', 'num_channels': 'num_channels=3', 'qkv_bias': 'qkv_bias=True', 'decoder_num_attention_heads': 'decoder_num_attention_heads=16', 'decoder_hidden_size': 'decoder_hidden_size=512', 'decoder_num_hidden_layers': 'decoder_num_hidden_layers=8', 'decoder_intermediate_size': 'decoder_intermediate_size=2048', 'mask_ratio': 'mask_ratio=0.75', 'norm_pix_loss': 'norm_pix_loss=False'
+}, model_name='ViTMAEModel', library='transformers', import_path='transformers.models.vit_mae'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.0', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.0', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-06', 'image_size': 'image_size=224', 'patch_size': 'patch_size=16', 'num_channels': 'num_channels=3', 'qkv_bias': 'qkv_bias=True'
+}, model_name='ViTMSNModel', library='transformers', import_path='transformers.models.vit_msn'), ModelAttributes(model=, model_type='model', model_parameters={'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'mlp_ratio': 'mlp_ratio=4', 'hidden_act': "hidden_act='gelu'", 'dropout_prob': 'dropout_prob=0.0', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-06', 'image_size': 'image_size=224', 'pretrain_image_size': 'pretrain_image_size=224', 'patch_size': 'patch_size=16', 'num_channels': 'num_channels=3', 'qkv_bias': 'qkv_bias=True', 'drop_path_rate': 'drop_path_rate=0.0', 'window_block_indices': 'window_block_indices=[]', 'residual_block_indices': 'residual_block_indices=[]', 'use_absolute_position_embeddings': 'use_absolute_position_embeddings=True', 'use_relative_position_embeddings': 'use_relative_position_embeddings=False', 'window_size': 'window_size=0', 'out_features': 'out_features=None', 'out_indices': 'out_indices=None'
+}, model_name='VitDetModel', library='transformers', import_path='transformers.models.vitdet'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=38', 'hidden_size': 'hidden_size=192', 'num_hidden_layers': 'num_hidden_layers=6', 'num_attention_heads': 'num_attention_heads=2', 'window_size': 'window_size=4', 'use_bias': 'use_bias=True', 'ffn_dim': 'ffn_dim=768', 'layerdrop': 'layerdrop=0.1', 'ffn_kernel_size': 'ffn_kernel_size=3', 'flow_size': 'flow_size=192', 'spectrogram_bins': 'spectrogram_bins=513', 'hidden_act': "hidden_act='relu'", 'hidden_dropout': 'hidden_dropout=0.1', 'attention_dropout': 'attention_dropout=0.1', 'activation_dropout': 'activation_dropout=0.1', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'use_stochastic_duration_prediction': 'use_stochastic_duration_prediction=True', 'num_speakers': 'num_speakers=1', 'speaker_embedding_size': 'speaker_embedding_size=0', 'upsample_initial_channel': 'upsample_initial_channel=512', 'upsample_rates': 'upsample_rates=[
+ 8,
+ 8,
+ 2,
+ 2
+ ]', 'upsample_kernel_sizes': 'upsample_kernel_sizes=[
+ 16,
+ 16,
+ 4,
+ 4
+ ]', 'resblock_kernel_sizes': 'resblock_kernel_sizes=[
+ 3,
+ 7,
+ 11
+ ]', 'resblock_dilation_sizes': 'resblock_dilation_sizes=[
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ],
+ [
+ 1,
+ 3,
+ 5
+ ]
+ ]', 'leaky_relu_slope': 'leaky_relu_slope=0.1', 'depth_separable_channels': 'depth_separable_channels=2', 'depth_separable_num_layers': 'depth_separable_num_layers=3', 'duration_predictor_flow_bins': 'duration_predictor_flow_bins=10', 'duration_predictor_tail_bound': 'duration_predictor_tail_bound=5.0', 'duration_predictor_kernel_size': 'duration_predictor_kernel_size=3', 'duration_predictor_dropout': 'duration_predictor_dropout=0.5', 'duration_predictor_num_flows': 'duration_predictor_num_flows=4', 'duration_predictor_filter_channels': 'duration_predictor_filter_channels=256', 'prior_encoder_num_flows': 'prior_encoder_num_flows=4', 'prior_encoder_num_wavenet_layers': 'prior_encoder_num_wavenet_layers=4', 'posterior_encoder_num_wavenet_layers': 'posterior_encoder_num_wavenet_layers=16', 'wavenet_kernel_size': 'wavenet_kernel_size=5', 'wavenet_dilation_rate': 'wavenet_dilation_rate=1', 'wavenet_dropout': 'wavenet_dropout=0.0', 'speaking_rate': 'speaking_rate=1.0', 'noise_scale': 'noise_scale=0.667', 'noise_scale_duration': 'noise_scale_duration=0.8', 'sampling_rate': 'sampling_rate=16000'
+}, model_name='VitsModel', library='transformers', import_path='transformers.models.vits'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab_file': 'vocab_file', 'pad_token': "pad_token=''", 'unk_token': "unk_token=''", 'language': 'language=None', 'add_blank': 'add_blank=True', 'normalize': 'normalize=True', 'phonemize': 'phonemize=True', 'is_uroman': 'is_uroman=False'
+}, model_name='VitsTokenizer', library='transformers', import_path='transformers.models.vits'), ModelAttributes(model=, model_type='model', model_parameters={'image_size': 'image_size=224', 'num_frames': 'num_frames=32', 'tubelet_size': 'tubelet_size=[
+ 2,
+ 16,
+ 16
+ ]', 'num_channels': 'num_channels=3', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu_fast'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.0', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.0', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-06', 'qkv_bias': 'qkv_bias=True'
+}, model_name='VivitModel', library='transformers', import_path='transformers.models.vivit'), ModelAttributes(model=, model_type='model', model_parameters={'patch_size': 'patch_size=16', 'crop_size': 'crop_size=256', 'frames_per_clip': 'frames_per_clip=64', 'tubelet_size': 'tubelet_size=2', 'hidden_size': 'hidden_size=1024', 'in_chans': 'in_chans=3', 'num_attention_heads': 'num_attention_heads=16', 'num_hidden_layers': 'num_hidden_layers=24', 'drop_path_rate': 'drop_path_rate=0.0', 'mlp_ratio': 'mlp_ratio=4.0', 'layer_norm_eps': 'layer_norm_eps=1e-06', 'qkv_bias': 'qkv_bias=True', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.0', 'hidden_act': "hidden_act='gelu'", 'initializer_range': 'initializer_range=0.02', 'attention_dropout': 'attention_dropout=0.0', 'num_pooler_layers': 'num_pooler_layers=3', 'pred_hidden_size': 'pred_hidden_size=384', 'pred_num_attention_heads': 'pred_num_attention_heads=12', 'pred_num_hidden_layers': 'pred_num_hidden_layers=12', 'pred_num_mask_tokens': 'pred_num_mask_tokens=10', 'pred_zero_init_mask_tokens': 'pred_zero_init_mask_tokens=True', 'pred_mlp_ratio': 'pred_mlp_ratio=4.0'
+}, model_name='VJEPA2Model', library='transformers', import_path='transformers.models.vjepa2'), ModelAttributes(model=, model_type='model', model_parameters={'audio_config': 'audio_config=None', 'text_config': 'text_config=None', 'audio_token_id': 'audio_token_id=None', 'projector_hidden_act': "projector_hidden_act='gelu'"
+}, model_name='VoxtralForConditionalGeneration', library='transformers', import_path='transformers.models.voxtral'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict, list, NoneType
+ ] = None', 'merges': 'merges: Union[str, list, NoneType
+ ] = None', 'clean_up_tokenization_spaces': 'clean_up_tokenization_spaces=False', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'use_default_system_prompt': 'use_default_system_prompt=False', 'legacy': 'legacy=False', 'add_prefix_space': 'add_prefix_space=None'
+}, model_name='LlamaTokenizer', library='transformers', import_path='transformers.models.llama'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=51866', 'hidden_size': 'hidden_size=1280', 'intermediate_size': 'intermediate_size=5120', 'num_hidden_layers': 'num_hidden_layers=32', 'num_attention_heads': 'num_attention_heads=20', 'scale_embedding': 'scale_embedding=False', 'activation_function': "activation_function='gelu'", 'num_mel_bins': 'num_mel_bins=128', 'max_source_positions': 'max_source_positions=1500', 'initializer_range': 'initializer_range=0.02', 'attention_dropout': 'attention_dropout=0.0'
+}, model_name='VoxtralEncoder', library='transformers', import_path='transformers.models.voxtral'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=32', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout': 'hidden_dropout=0.1', 'activation_dropout': 'activation_dropout=0.1', 'attention_dropout': 'attention_dropout=0.1', 'feat_proj_dropout': 'feat_proj_dropout=0.0', 'feat_quantizer_dropout': 'feat_quantizer_dropout=0.0', 'final_dropout': 'final_dropout=0.1', 'layerdrop': 'layerdrop=0.1', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'feat_extract_norm': "feat_extract_norm='group'", 'feat_extract_activation': "feat_extract_activation='gelu'", 'conv_dim': 'conv_dim=(512,
+ 512,
+ 512,
+ 512,
+ 512,
+ 512,
+ 512)', 'conv_stride': 'conv_stride=(5,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2)', 'conv_kernel': 'conv_kernel=(10,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2)', 'conv_bias': 'conv_bias=False', 'num_conv_pos_embeddings': 'num_conv_pos_embeddings=128', 'num_conv_pos_embedding_groups': 'num_conv_pos_embedding_groups=16', 'do_stable_layer_norm': 'do_stable_layer_norm=False', 'apply_spec_augment': 'apply_spec_augment=True', 'mask_time_prob': 'mask_time_prob=0.05', 'mask_time_length': 'mask_time_length=10', 'mask_time_min_masks': 'mask_time_min_masks=2', 'mask_feature_prob': 'mask_feature_prob=0.0', 'mask_feature_length': 'mask_feature_length=10', 'mask_feature_min_masks': 'mask_feature_min_masks=0', 'num_codevectors_per_group': 'num_codevectors_per_group=320', 'num_codevector_groups': 'num_codevector_groups=2', 'contrastive_logits_temperature': 'contrastive_logits_temperature=0.1', 'num_negatives': 'num_negatives=100', 'codevector_dim': 'codevector_dim=256', 'proj_codevector_dim': 'proj_codevector_dim=256', 'diversity_loss_weight': 'diversity_loss_weight=0.1', 'ctc_loss_reduction': "ctc_loss_reduction='sum'", 'ctc_zero_infinity': 'ctc_zero_infinity=False', 'use_weighted_layer_sum': 'use_weighted_layer_sum=False', 'classifier_proj_size': 'classifier_proj_size=256', 'tdnn_dim': 'tdnn_dim=(512,
+ 512,
+ 512,
+ 512,
+ 1500)', 'tdnn_kernel': 'tdnn_kernel=(5,
+ 3,
+ 3,
+ 1,
+ 1)', 'tdnn_dilation': 'tdnn_dilation=(1,
+ 2,
+ 3,
+ 1,
+ 1)', 'xvector_output_dim': 'xvector_output_dim=512', 'pad_token_id': 'pad_token_id=0', 'bos_token_id': 'bos_token_id=1', 'eos_token_id': 'eos_token_id=2', 'add_adapter': 'add_adapter=False', 'adapter_kernel_size': 'adapter_kernel_size=3', 'adapter_stride': 'adapter_stride=2', 'num_adapter_layers': 'num_adapter_layers=3', 'output_hidden_size': 'output_hidden_size=None', 'adapter_attn_dim': 'adapter_attn_dim=None'
+}, model_name='Wav2Vec2Model', library='transformers', import_path='transformers.models.wav2vec2'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab_file': 'vocab_file', 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'unk_token': "unk_token=''", 'pad_token': "pad_token=''", 'word_delimiter_token': "word_delimiter_token='|'", 'replace_word_delimiter_char': "replace_word_delimiter_char=' '", 'do_lower_case': 'do_lower_case=False', 'target_lang': 'target_lang=None'
+}, model_name='Wav2Vec2CTCTokenizer', library='transformers', import_path='transformers.models.wav2vec2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=None', 'hidden_size': 'hidden_size=1024', 'num_hidden_layers': 'num_hidden_layers=24', 'num_attention_heads': 'num_attention_heads=16', 'intermediate_size': 'intermediate_size=4096', 'feature_projection_input_dim': 'feature_projection_input_dim=160', 'hidden_act': "hidden_act='swish'", 'hidden_dropout': 'hidden_dropout=0.0', 'activation_dropout': 'activation_dropout=0.0', 'attention_dropout': 'attention_dropout=0.0', 'feat_proj_dropout': 'feat_proj_dropout=0.0', 'final_dropout': 'final_dropout=0.1', 'layerdrop': 'layerdrop=0.1', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'apply_spec_augment': 'apply_spec_augment=True', 'mask_time_prob': 'mask_time_prob=0.05', 'mask_time_length': 'mask_time_length=10', 'mask_time_min_masks': 'mask_time_min_masks=2', 'mask_feature_prob': 'mask_feature_prob=0.0', 'mask_feature_length': 'mask_feature_length=10', 'mask_feature_min_masks': 'mask_feature_min_masks=0', 'ctc_loss_reduction': "ctc_loss_reduction='sum'", 'ctc_zero_infinity': 'ctc_zero_infinity=False', 'use_weighted_layer_sum': 'use_weighted_layer_sum=False', 'classifier_proj_size': 'classifier_proj_size=768', 'tdnn_dim': 'tdnn_dim=(512,
+ 512,
+ 512,
+ 512,
+ 1500)', 'tdnn_kernel': 'tdnn_kernel=(5,
+ 3,
+ 3,
+ 1,
+ 1)', 'tdnn_dilation': 'tdnn_dilation=(1,
+ 2,
+ 3,
+ 1,
+ 1)', 'xvector_output_dim': 'xvector_output_dim=512', 'pad_token_id': 'pad_token_id=0', 'bos_token_id': 'bos_token_id=1', 'eos_token_id': 'eos_token_id=2', 'add_adapter': 'add_adapter=False', 'adapter_kernel_size': 'adapter_kernel_size=3', 'adapter_stride': 'adapter_stride=2', 'num_adapter_layers': 'num_adapter_layers=1', 'adapter_act': "adapter_act='relu'", 'use_intermediate_ffn_before_adapter': 'use_intermediate_ffn_before_adapter=False', 'output_hidden_size': 'output_hidden_size=None', 'position_embeddings_type': "position_embeddings_type='relative_key'", 'rotary_embedding_base': 'rotary_embedding_base=10000', 'max_source_positions': 'max_source_positions=5000', 'left_max_position_embeddings': 'left_max_position_embeddings=64', 'right_max_position_embeddings': 'right_max_position_embeddings=8', 'conv_depthwise_kernel_size': 'conv_depthwise_kernel_size=31', 'conformer_conv_dropout': 'conformer_conv_dropout=0.1'
+}, model_name='Wav2Vec2BertModel', library='transformers', import_path='transformers.models.wav2vec2_bert'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab_file': 'vocab_file', 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'unk_token': "unk_token=''", 'pad_token': "pad_token=''", 'word_delimiter_token': "word_delimiter_token='|'", 'replace_word_delimiter_char': "replace_word_delimiter_char=' '", 'do_lower_case': 'do_lower_case=False', 'target_lang': 'target_lang=None'
+}, model_name='Wav2Vec2CTCTokenizer', library='transformers', import_path='transformers.models.wav2vec2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=None', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout': 'hidden_dropout=0.1', 'activation_dropout': 'activation_dropout=0.1', 'attention_dropout': 'attention_dropout=0.1', 'feat_proj_dropout': 'feat_proj_dropout=0.0', 'feat_quantizer_dropout': 'feat_quantizer_dropout=0.0', 'final_dropout': 'final_dropout=0.1', 'layerdrop': 'layerdrop=0.1', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'feat_extract_norm': "feat_extract_norm='group'", 'feat_extract_activation': "feat_extract_activation='gelu'", 'conv_dim': 'conv_dim=(512,
+ 512,
+ 512,
+ 512,
+ 512,
+ 512,
+ 512)', 'conv_stride': 'conv_stride=(5,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2)', 'conv_kernel': 'conv_kernel=(10,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2)', 'conv_bias': 'conv_bias=False', 'num_conv_pos_embeddings': 'num_conv_pos_embeddings=128', 'num_conv_pos_embedding_groups': 'num_conv_pos_embedding_groups=16', 'apply_spec_augment': 'apply_spec_augment=True', 'mask_time_prob': 'mask_time_prob=0.05', 'mask_time_length': 'mask_time_length=10', 'mask_time_min_masks': 'mask_time_min_masks=2', 'mask_feature_prob': 'mask_feature_prob=0.0', 'mask_feature_length': 'mask_feature_length=10', 'mask_feature_min_masks': 'mask_feature_min_masks=0', 'num_codevectors_per_group': 'num_codevectors_per_group=320', 'num_codevector_groups': 'num_codevector_groups=2', 'contrastive_logits_temperature': 'contrastive_logits_temperature=0.1', 'num_negatives': 'num_negatives=100', 'codevector_dim': 'codevector_dim=256', 'proj_codevector_dim': 'proj_codevector_dim=256', 'diversity_loss_weight': 'diversity_loss_weight=0.1', 'ctc_loss_reduction': "ctc_loss_reduction='sum'", 'ctc_zero_infinity': 'ctc_zero_infinity=False', 'use_weighted_layer_sum': 'use_weighted_layer_sum=False', 'classifier_proj_size': 'classifier_proj_size=256', 'tdnn_dim': 'tdnn_dim=(512,
+ 512,
+ 512,
+ 512,
+ 1500)', 'tdnn_kernel': 'tdnn_kernel=(5,
+ 3,
+ 3,
+ 1,
+ 1)', 'tdnn_dilation': 'tdnn_dilation=(1,
+ 2,
+ 3,
+ 1,
+ 1)', 'xvector_output_dim': 'xvector_output_dim=512', 'pad_token_id': 'pad_token_id=0', 'bos_token_id': 'bos_token_id=1', 'eos_token_id': 'eos_token_id=2', 'add_adapter': 'add_adapter=False', 'adapter_kernel_size': 'adapter_kernel_size=3', 'adapter_stride': 'adapter_stride=2', 'num_adapter_layers': 'num_adapter_layers=3', 'output_hidden_size': 'output_hidden_size=None', 'position_embeddings_type': "position_embeddings_type='relative'", 'rotary_embedding_base': 'rotary_embedding_base=10000', 'max_source_positions': 'max_source_positions=5000', 'conv_depthwise_kernel_size': 'conv_depthwise_kernel_size=31', 'conformer_conv_dropout': 'conformer_conv_dropout=0.1'
+}, model_name='Wav2Vec2ConformerModel', library='transformers', import_path='transformers.models.wav2vec2_conformer'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab_file': 'vocab_file', 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'unk_token': "unk_token=''", 'pad_token': "pad_token=''", 'word_delimiter_token': "word_delimiter_token='|'", 'replace_word_delimiter_char': "replace_word_delimiter_char=' '", 'do_lower_case': 'do_lower_case=False', 'target_lang': 'target_lang=None'
+}, model_name='Wav2Vec2CTCTokenizer', library='transformers', import_path='transformers.models.wav2vec2'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=32', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout': 'hidden_dropout=0.1', 'activation_dropout': 'activation_dropout=0.1', 'attention_dropout': 'attention_dropout=0.1', 'feat_proj_dropout': 'feat_proj_dropout=0.0', 'final_dropout': 'final_dropout=0.1', 'layerdrop': 'layerdrop=0.1', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'feat_extract_norm': "feat_extract_norm='group'", 'feat_extract_activation': "feat_extract_activation='gelu'", 'conv_dim': 'conv_dim=(512,
+ 512,
+ 512,
+ 512,
+ 512,
+ 512,
+ 512)', 'conv_stride': 'conv_stride=(5,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2,
+ 2)', 'conv_kernel': 'conv_kernel=(10,
+ 3,
+ 3,
+ 3,
+ 3,
+ 2,
+ 2)', 'conv_bias': 'conv_bias=False', 'num_conv_pos_embeddings': 'num_conv_pos_embeddings=128', 'num_conv_pos_embedding_groups': 'num_conv_pos_embedding_groups=16', 'num_buckets': 'num_buckets=320', 'max_bucket_distance': 'max_bucket_distance=800', 'do_stable_layer_norm': 'do_stable_layer_norm=False', 'apply_spec_augment': 'apply_spec_augment=True', 'mask_time_prob': 'mask_time_prob=0.05', 'mask_time_length': 'mask_time_length=10', 'mask_time_min_masks': 'mask_time_min_masks=2', 'mask_feature_prob': 'mask_feature_prob=0.0', 'mask_feature_length': 'mask_feature_length=10', 'num_codevectors_per_group': 'num_codevectors_per_group=320', 'num_codevector_groups': 'num_codevector_groups=2', 'contrastive_logits_temperature': 'contrastive_logits_temperature=0.1', 'num_negatives': 'num_negatives=100', 'codevector_dim': 'codevector_dim=256', 'proj_codevector_dim': 'proj_codevector_dim=256', 'diversity_loss_weight': 'diversity_loss_weight=0.1', 'ctc_loss_reduction': "ctc_loss_reduction='mean'", 'ctc_zero_infinity': 'ctc_zero_infinity=False', 'use_weighted_layer_sum': 'use_weighted_layer_sum=False', 'classifier_proj_size': 'classifier_proj_size=256', 'tdnn_dim': 'tdnn_dim=(512,
+ 512,
+ 512,
+ 512,
+ 1500)', 'tdnn_kernel': 'tdnn_kernel=(5,
+ 3,
+ 3,
+ 1,
+ 1)', 'tdnn_dilation': 'tdnn_dilation=(1,
+ 2,
+ 3,
+ 1,
+ 1)', 'xvector_output_dim': 'xvector_output_dim=512', 'num_ctc_classes': 'num_ctc_classes=80', 'pad_token_id': 'pad_token_id=0', 'bos_token_id': 'bos_token_id=1', 'eos_token_id': 'eos_token_id=2', 'add_adapter': 'add_adapter=False', 'adapter_kernel_size': 'adapter_kernel_size=3', 'adapter_stride': 'adapter_stride=2', 'num_adapter_layers': 'num_adapter_layers=3', 'output_hidden_size': 'output_hidden_size=None'
+}, model_name='WavLMModel', library='transformers', import_path='transformers.models.wavlm'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=51865', 'num_mel_bins': 'num_mel_bins=80', 'encoder_layers': 'encoder_layers=4', 'encoder_attention_heads': 'encoder_attention_heads=6', 'decoder_layers': 'decoder_layers=4', 'decoder_attention_heads': 'decoder_attention_heads=6', 'decoder_ffn_dim': 'decoder_ffn_dim=1536', 'encoder_ffn_dim': 'encoder_ffn_dim=1536', 'encoder_layerdrop': 'encoder_layerdrop=0.0', 'decoder_layerdrop': 'decoder_layerdrop=0.0', 'decoder_start_token_id': 'decoder_start_token_id=50257', 'is_encoder_decoder': 'is_encoder_decoder=True', 'activation_function': "activation_function='gelu'", 'd_model': 'd_model=384', 'dropout': 'dropout=0.0', 'attention_dropout': 'attention_dropout=0.0', 'activation_dropout': 'activation_dropout=0.0', 'init_std': 'init_std=0.02', 'scale_embedding': 'scale_embedding=False', 'max_source_positions': 'max_source_positions=1500', 'max_target_positions': 'max_target_positions=448', 'pad_token_id': 'pad_token_id=50256', 'bos_token_id': 'bos_token_id=50256', 'eos_token_id': 'eos_token_id=50256', 'suppress_tokens': 'suppress_tokens=None', 'begin_suppress_tokens': 'begin_suppress_tokens=[
+ 220,
+ 50256
+ ]', 'use_weighted_layer_sum': 'use_weighted_layer_sum=False', 'classifier_proj_size': 'classifier_proj_size=256', 'apply_spec_augment': 'apply_spec_augment=False', 'mask_time_prob': 'mask_time_prob=0.05', 'mask_time_length': 'mask_time_length=10', 'mask_time_min_masks': 'mask_time_min_masks=2', 'mask_feature_prob': 'mask_feature_prob=0.0', 'mask_feature_length': 'mask_feature_length=10', 'mask_feature_min_masks': 'mask_feature_min_masks=0', 'median_filter_width': 'median_filter_width=7'
+}, model_name='WhisperModel', library='transformers', import_path='transformers.models.whisper'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges=None', 'normalizer_file': 'normalizer_file=None', 'unk_token': "unk_token='<|endoftext|>'", 'bos_token': "bos_token='<|endoftext|>'", 'eos_token': "eos_token='<|endoftext|>'", 'add_prefix_space': 'add_prefix_space=False', 'language': 'language=None', 'task': 'task=None', 'predict_timestamps': 'predict_timestamps=False'
+}, model_name='WhisperTokenizer', library='transformers', import_path='transformers.models.whisper'), ModelAttributes(model=, model_type='model', model_parameters={'text_config': 'text_config=None', 'vision_config': 'vision_config=None', 'projection_dim': 'projection_dim=512', 'prompt_layers': 'prompt_layers=2', 'prompt_alpha': 'prompt_alpha=0.1', 'prompt_hidden_act': "prompt_hidden_act='quick_gelu'", 'prompt_num_attention_heads': 'prompt_num_attention_heads=8', 'prompt_attention_dropout': 'prompt_attention_dropout=0.0', 'prompt_projection_dropout': 'prompt_projection_dropout=0.0', 'logit_scale_init_value': 'logit_scale_init_value=2.6592'
+}, model_name='XCLIPModel', library='transformers', import_path='transformers.models.x_clip'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': "bos_token: str = '<|startoftext|>'", 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|endoftext|>'"
+}, model_name='CLIPTokenizer', library='transformers', import_path='transformers.models.clip'), ModelAttributes(model=, model_type='model', model_parameters={'target_bandwidths': 'target_bandwidths: Optional[list[float
+ ]
+ ] = None', 'sample_rate': 'sample_rate: int = 16000', 'kernel_size': 'kernel_size: int = 3', 'channel_ratios': 'channel_ratios: list[float
+ ] = [
+ 1,
+ 1
+ ]', 'strides': 'strides: list[int
+ ] = [
+ 1,
+ 1
+ ]', 'block_dilations': 'block_dilations: list[int
+ ] = [
+ 1,
+ 1
+ ]', 'unit_kernel_size': 'unit_kernel_size: int = 3', 'codebook_size': 'codebook_size: int = 1024', 'codebook_dim': 'codebook_dim: Optional[int
+ ] = None', 'initializer_range': 'initializer_range: float = 0.02', 'acoustic_model_config': 'acoustic_model_config: Union[dict, transformers.models.dac.configuration_dac.DacConfig, NoneType
+ ] = None', 'semantic_model_config': 'semantic_model_config: Union[dict, transformers.models.hubert.configuration_hubert.HubertConfig, NoneType
+ ] = None'
+}, model_name='XcodecModel', library='transformers', import_path='transformers.models.xcodec'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=256008', 'max_position_embeddings': 'max_position_embeddings=2048', 'd_model': 'd_model=1024', 'ffn_dim': 'ffn_dim=4096', 'num_layers': 'num_layers=24', 'attention_heads': 'attention_heads=16', 'activation_function': "activation_function='gelu'", 'dropout': 'dropout=0.1', 'attention_dropout': 'attention_dropout=0.1', 'activation_dropout': 'activation_dropout=0.0', 'layerdrop': 'layerdrop=0.0', 'init_std': 'init_std=0.02', 'scale_embedding': 'scale_embedding=True', 'decoder_start_token_id': 'decoder_start_token_id=2', 'pad_token_id': 'pad_token_id=1', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=2'
+}, model_name='XGLMModel', library='transformers', import_path='transformers.models.xglm'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, list[tuple[str, float
+ ]
+ ], NoneType
+ ] = None', 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = ''", 'sep_token': "sep_token: str = ''", 'cls_token': "cls_token: str = ''", 'unk_token': "unk_token: str = ''", 'pad_token': "pad_token: str = ''", 'add_prefix_space': 'add_prefix_space: bool = True'
+}, model_name='XGLMTokenizer', library='transformers', import_path='transformers.models.xglm'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=30145', 'emb_dim': 'emb_dim=2048', 'n_layers': 'n_layers=12', 'n_heads': 'n_heads=16', 'dropout': 'dropout=0.1', 'attention_dropout': 'attention_dropout=0.1', 'gelu_activation': 'gelu_activation=True', 'sinusoidal_embeddings': 'sinusoidal_embeddings=False', 'causal': 'causal=False', 'asm': 'asm=False', 'n_langs': 'n_langs=1', 'use_lang_emb': 'use_lang_emb=True', 'max_position_embeddings': 'max_position_embeddings=512', 'embed_init_std': 'embed_init_std=0.02209708691207961', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'init_std': 'init_std=0.02', 'bos_index': 'bos_index=0', 'eos_index': 'eos_index=1', 'pad_index': 'pad_index=2', 'unk_index': 'unk_index=3', 'mask_index': 'mask_index=5', 'is_encoder': 'is_encoder=True', 'summary_type': "summary_type='first'", 'summary_use_proj': 'summary_use_proj=True', 'summary_activation': 'summary_activation=None', 'summary_proj_to_labels': 'summary_proj_to_labels=True', 'summary_first_dropout': 'summary_first_dropout=0.1', 'start_n_top': 'start_n_top=5', 'end_n_top': 'end_n_top=5', 'mask_token_id': 'mask_token_id=0', 'lang_id': 'lang_id=0', 'pad_token_id': 'pad_token_id=2', 'bos_token_id': 'bos_token_id=0'
+}, model_name='XLMModel', library='transformers', import_path='transformers.models.xlm'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab_file': 'vocab_file', 'merges_file': 'merges_file', 'unk_token': "unk_token=''", 'bos_token': "bos_token=''", 'sep_token': "sep_token=''", 'pad_token': "pad_token=''", 'cls_token': "cls_token=''", 'mask_token': "mask_token=''", 'additional_special_tokens': "additional_special_tokens=['', '', '', '', '', '', '', '', '', '']", 'lang2id': 'lang2id=None', 'id2lang': 'id2lang=None', 'do_lowercase_and_remove_accent': 'do_lowercase_and_remove_accent=True'
+}, model_name='XLMTokenizer', library='transformers', import_path='transformers.models.xlm'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=30522', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=2', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'pad_token_id': 'pad_token_id=1', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=2', 'classifier_dropout': 'classifier_dropout=None'
+}, model_name='XLMRobertaModel', library='transformers', import_path='transformers.models.xlm_roberta'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, list[tuple[str, float
+ ]
+ ], NoneType
+ ] = None', 'add_prefix_space': 'add_prefix_space: bool = True', 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = ''", 'sep_token': "sep_token: str = ''", 'cls_token': "cls_token: str = ''", 'unk_token': "unk_token: str = ''", 'pad_token': "pad_token: str = ''", 'mask_token': "mask_token: str = ''"
+}, model_name='XLMRobertaTokenizer', library='transformers', import_path='transformers.models.xlm_roberta'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=250880', 'hidden_size': 'hidden_size=2560', 'num_hidden_layers': 'num_hidden_layers=36', 'num_attention_heads': 'num_attention_heads=32', 'intermediate_size': 'intermediate_size=10240', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=514', 'type_vocab_size': 'type_vocab_size=1', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-05', 'pad_token_id': 'pad_token_id=1', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=2', 'classifier_dropout': 'classifier_dropout=None'
+}, model_name='XLMRobertaXLModel', library='transformers', import_path='transformers.models.xlm_roberta_xl'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, list[tuple[str, float
+ ]
+ ], NoneType
+ ] = None', 'add_prefix_space': 'add_prefix_space: bool = True', 'bos_token': "bos_token: str = ''", 'eos_token': "eos_token: str = ''", 'sep_token': "sep_token: str = ''", 'cls_token': "cls_token: str = ''", 'unk_token': "unk_token: str = ''", 'pad_token': "pad_token: str = ''", 'mask_token': "mask_token: str = ''"
+}, model_name='XLMRobertaTokenizer', library='transformers', import_path='transformers.models.xlm_roberta'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=32000', 'd_model': 'd_model=1024', 'n_layer': 'n_layer=24', 'n_head': 'n_head=16', 'd_inner': 'd_inner=4096', 'ff_activation': "ff_activation='gelu'", 'attn_type': "attn_type='bi'", 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'dropout': 'dropout=0.1', 'mem_len': 'mem_len=512', 'reuse_len': 'reuse_len=None', 'use_mems_eval': 'use_mems_eval=True', 'use_mems_train': 'use_mems_train=False', 'bi_data': 'bi_data=False', 'clamp_len': 'clamp_len=-1', 'same_length': 'same_length=False', 'summary_type': "summary_type='last'", 'summary_use_proj': 'summary_use_proj=True', 'summary_activation': "summary_activation='tanh'", 'summary_last_dropout': 'summary_last_dropout=0.1', 'start_n_top': 'start_n_top=5', 'end_n_top': 'end_n_top=5', 'pad_token_id': 'pad_token_id=5', 'bos_token_id': 'bos_token_id=1', 'eos_token_id': 'eos_token_id=2'
+}, model_name='XLNetModel', library='transformers', import_path='transformers.models.xlnet'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, list[tuple[str, float
+ ]
+ ], NoneType
+ ] = None', 'unk_id': 'unk_id: int = 0', 'do_lower_case': 'do_lower_case=False', 'remove_space': 'remove_space=True', 'keep_accents': 'keep_accents=False', 'bos_token': "bos_token=''", 'eos_token': "eos_token=''", 'unk_token': "unk_token=''", 'sep_token': "sep_token=''", 'pad_token': "pad_token=''", 'cls_token': "cls_token=''", 'mask_token': "mask_token=''", 'additional_special_tokens': 'additional_special_tokens=None'
+}, model_name='XLNetTokenizer', library='transformers', import_path='transformers.models.xlnet'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size: int = 50304', 'hidden_size': 'hidden_size: int = 4096', 'embedding_dim': 'embedding_dim: Optional[int
+ ] = None', 'num_hidden_layers': 'num_hidden_layers: Optional[int
+ ] = 32', 'num_blocks': 'num_blocks: Optional[int
+ ] = None', 'num_heads': 'num_heads: int = 8', 'use_bias': 'use_bias: bool = False', 'norm_reduction_force_float32': 'norm_reduction_force_float32: bool = True', 'tie_word_embeddings': 'tie_word_embeddings: bool = False', 'add_out_norm': 'add_out_norm: bool = True', 'norm_eps': 'norm_eps: float = 1e-06', 'qk_dim_factor': 'qk_dim_factor: float = 0.5', 'v_dim_factor': 'v_dim_factor: float = 1.0', 'chunkwise_kernel': "chunkwise_kernel: Literal['chunkwise--native_autograd', 'parallel--native_autograd'] = 'chunkwise--native_autograd'", 'sequence_kernel': "sequence_kernel: Literal['native_sequence__native'] = 'native_sequence__native'", 'step_kernel': "step_kernel: Literal['native'] = 'native'", 'mode': "mode: Literal['train', 'train_with_padding', 'inference'] = 'inference'", 'chunk_size': 'chunk_size: int = 64', 'return_last_states': 'return_last_states: bool = True', 'autocast_kernel_dtype': "autocast_kernel_dtype: Literal['float32', 'bfloat16', 'float16'] = 'bfloat16'", 'eps': 'eps: float = 1e-06', 'inference_state_dtype': "inference_state_dtype: Literal['float32', 'bfloat16', 'float16'] = 'float32'", 'ffn_proj_factor': 'ffn_proj_factor: float = 2.667', 'ffn_round_up_to_multiple_of': 'ffn_round_up_to_multiple_of: int = 64', 'gate_soft_cap': 'gate_soft_cap: float = 15.0', 'output_logit_soft_cap': 'output_logit_soft_cap: float = 30.0', 'weight_mode': "weight_mode: Literal['single', 'fused'] = 'single'", 'pad_token_id': 'pad_token_id: int = 1', 'bos_token_id': 'bos_token_id: int = 0', 'eos_token_id': 'eos_token_id: int = 2', 'max_inference_chunksize': 'max_inference_chunksize: int = 16384'
+}, model_name='xLSTMModel', library='transformers', import_path='transformers.models.xlstm'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, dict[str, int
+ ], NoneType
+ ] = None', 'merges': 'merges: Union[str, list[str
+ ], NoneType
+ ] = None', 'errors': "errors: str = 'replace'", 'unk_token': "unk_token: str = '<|endoftext|>'", 'bos_token': "bos_token: str = '<|endoftext|>'", 'eos_token': "eos_token: str = '<|endoftext|>'", 'pad_token': "pad_token: str = '<|padding|>'", 'add_prefix_space': 'add_prefix_space: bool = False', 'trim_offsets': 'trim_offsets: bool = True'
+}, model_name='GPTNeoXTokenizer', library='transformers', import_path='transformers.models.gpt_neox'), ModelAttributes(model=, model_type='model', model_parameters={'vocab_size': 'vocab_size=30522', 'hidden_size': 'hidden_size=768', 'num_hidden_layers': 'num_hidden_layers=12', 'num_attention_heads': 'num_attention_heads=12', 'intermediate_size': 'intermediate_size=3072', 'hidden_act': "hidden_act='gelu'", 'hidden_dropout_prob': 'hidden_dropout_prob=0.1', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob=0.1', 'max_position_embeddings': 'max_position_embeddings=512', 'type_vocab_size': 'type_vocab_size=2', 'initializer_range': 'initializer_range=0.02', 'layer_norm_eps': 'layer_norm_eps=1e-12', 'pad_token_id': 'pad_token_id=1', 'bos_token_id': 'bos_token_id=0', 'eos_token_id': 'eos_token_id=2', 'classifier_dropout': 'classifier_dropout=None', 'pre_norm': 'pre_norm=False', 'adapter_reduction_factor': 'adapter_reduction_factor=2', 'adapter_layer_norm': 'adapter_layer_norm=False', 'adapter_reuse_layer_norm': 'adapter_reuse_layer_norm=True', 'ln_before_adapter': 'ln_before_adapter=True', 'languages': "languages=('en_XX',)", 'default_language': 'default_language=None'
+}, model_name='XmodModel', library='transformers', import_path='transformers.models.xmod'), ModelAttributes(model=, model_type='tokenizer', model_parameters={'vocab': 'vocab: Union[str, list[tuple[str, float
+ ]
+ ], NoneType
+ ] = None', 'add_prefix_space': 'add_prefix_space: bool = True', 'bos_token': "bos_token: str = '