Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
39 commits
Select commit Hold shift + click to select a range
a90f0b3
Update pyproject.toml, bump python version, update README
FWao Jul 18, 2025
539f556
Separate gpu and cpu dependencies
FWao Jul 18, 2025
a7e3b6d
Add uni to no-build-isolation
FWao Jul 18, 2025
8aa47e8
update uv.lock
FWao Jul 18, 2025
e721c74
Update Uni source
FWao Jul 18, 2025
50943d0
Update README, update uv.lock
FWao Jul 18, 2025
3cbd68b
Update README
FWao Jul 18, 2025
1bd7410
Add torch index
FWao Jul 18, 2025
c167c19
Add flash-attention dep group to no-build-isolation
FWao Jul 18, 2025
35bd5f7
Remove torch index
FWao Jul 18, 2025
a62ed79
Revert flash-attention optional dependency group
FWao Jul 18, 2025
ff2b2d9
Revert conflicts
FWao Jul 18, 2025
3242a88
Change CONCH source
FWao Jul 18, 2025
682c032
re-add flash attention group
FWao Jul 18, 2025
d80716c
README: add --no-build-isolation
FWao Jul 18, 2025
52a9db2
Add triton constraint to build to avoid downgrading torch during buil…
FWao Jul 18, 2025
159e770
Update README, add fairscale (musk) dependency
FWao Jul 18, 2025
c7b0339
Remove README copy
FWao Jul 18, 2025
878e729
Fix max text chunk error in PNG metadata
georg-wolflein May 27, 2025
c482b62
add extension note
EzicStar May 28, 2025
088707b
forgot to add it in other fields
EzicStar May 28, 2025
d23a0d5
remove att masking on valid, test and pred
EzicStar Jun 23, 2025
253e39e
fix compatibility and format
EzicStar Jun 24, 2025
bddd951
add formatting
EzicStar Jun 25, 2025
a70577f
update docs
EzicStar Jul 1, 2025
3dc2451
Separate gpu and cpu dependencies
FWao Jul 18, 2025
e20583a
Add uni to no-build-isolation
FWao Jul 18, 2025
27783c4
Add torch index
FWao Jul 18, 2025
4c47330
Remove torch index
FWao Jul 18, 2025
95bb98a
Revert flash-attention optional dependency group
FWao Jul 18, 2025
5ad25bb
Revert conflicts
FWao Jul 18, 2025
8fda1a9
re-add flash attention group
FWao Jul 18, 2025
54952ff
Fix pyproject.toml
FWao Jul 18, 2025
f6423a8
Update README / getting started
FWao Jul 18, 2025
93181da
Update README
FWao Jul 18, 2025
2d48c0d
Remove README copy
FWao Jul 18, 2025
2f4d24c
Merge branch 'main' into fix/build
FWao Jul 18, 2025
46d4dbe
Update github build workflow
FWao Jul 18, 2025
fbc4dd7
Update README
FWao Jul 21, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ jobs:
# python-version: ${{ matrix.python-version }}

- name: Install the project
run: uv sync --extra all --dev
run: uv sync --extra cpu --dev

- name: Build
run: uv build
Expand Down Expand Up @@ -77,7 +77,7 @@ jobs:
# python-version: ${{ matrix.python-version }}

- name: Install the project
run: uv sync --extra all --dev
run: uv sync --extra cpu --dev

- name: Build
run: uv build
Expand Down
2 changes: 1 addition & 1 deletion .python-version
Original file line number Diff line number Diff line change
@@ -1 +1 @@
3.11
3.12
105 changes: 98 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,32 +16,123 @@ A Protocol for End-to-End Deep Learning in Computational Pathology".
[stamp paper]: https://www.nature.com/articles/s41596-024-01047-2 "From whole-slide image to biomarker prediction: end-to-end weakly supervised deep learning in computational pathology"
[stamp v1]: https://github.com/KatherLab/STAMP/tree/v1

## Installing stamp
## Installation

We recommend installing STAMP with [uv](https://docs.astral.sh/uv/):

### Install or Update uv:

```bash
# Install uv
curl -LsSf https://astral.sh/uv/install.sh | sh

# Update uv
uv self update
```

### Install STAMP in a Virtual Environment:

```bash
uv venv --python=3.12
source .venv/bin/activate

# For a CPU-only installation:
uv pip install "git+https://github.com/KatherLab/STAMP.git[cpu]" --torch-backend=cpu

# For a GPU (CUDA) installation:
uv pip install "git+https://github.com/KatherLab/STAMP.git[build]"
uv pip install "git+https://github.com/KatherLab/STAMP.git[build,gpu]" --no-build-isolation

# Note: You must run one after the other, the build dependencies must be installed first!
```

### Install STAMP from the Repository:

```bash
git clone https://github.com/KatherLab/STAMP.git
cd STAMP
```

cd STAMP/

uv sync --all-extras
```bash
# CPU-only Installation (excluding COBRA, Gigapath (and flash-attn))

uv sync --extra cpu
source .venv/bin/activate
```

```bash
# GPU (CUDA) Installation (Using flash-attn on CUDA systems for gigapath and other models)

# First run this!!
uv sync --extra build

# And then this for all models:
uv sync --extra build --extra gpu

# Alternatively, you can install only a specific model:
uv sync --extra build --extra uni


# In case building flash-attn uses too much memory, you can limit the number of parallel compilation jobs:
MAX_JOBS=4 uv sync --extra build --extra gpu
```

### Additional Dependencies

> [!IMPORTANT]
> STAMP additionally requires OpenSlide to be installed, as well as OpenCV dependencies.
> STAMP additionally requires OpenCV dependencies to be installed. If you want to use `flash-attn`, you also need to install the `clang` compiler and a [CUDA toolkit](https://developer.nvidia.com/cuda-downloads).
>

> For Ubuntu < 23.10:
> ```bash
> apt update && apt install -y openslide-tools libgl1-mesa-glx # libgl1-mesa-glx is needed for OpenCV
> apt update && apt install -y libgl1-mesa-glx clang
> ```
>
> For Ubuntu >= 23.10:
> ```bash
> apt update && apt install -y openslide-tools libgl1 libglx-mesa0 libglib2.0-0 # libgl1, libglx-mesa0, libglib2.0-0 are needed for OpenCV
> apt update && apt install -y libgl1 libglx-mesa0 libglib2.0-0 clang
> ```


### Installation Troubleshooting

> [!NOTE]
> Installing the GPU version of STAMP will force the compilation of the `flash-attn` package (as well as `mamba-ssm` and `causal_conv1d`). This can take a long time and requires a lot of memory. You can limit the number of parallel compilation jobs by setting the `MAX_JOBS` environment variable before running the installation command, e.g. `MAX_JOBS=4 uv sync --extra build --extra gpu`.


#### Undefined Symbol Error

If you encounter an error similar to the following when importing flash_attn, mamba or causal_conv1d on a GPU system, it usually indicates that the torch version in your environment does not match the torch version used to build the flash-attn, mamba or causal_conv1d package. This can happen if you already built these packages for another environment or if for any reason between the installation commands with only `--extra build` and `--extra gpu` the torch version was changed.

```
> import flash_attn_2_cuda as flash_attn_gpu
E ImportError: [...]/.venv/lib/python3.12/site-packages/flash_attn_2_cuda.cpython-312-x86_64-linux-gnu.so: undefined symbol: _ZN3c105ErrorC2ENS_14SourceLocationENSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE

.venv/lib/python3.12/site-packages/flash_attn/flash_attn_interface.py:15: ImportError
```

In case you encounter this error on a gpu installation, you can fix it by going back to the environment just with `--extra build`, clearing the uv cache and then reinstalling the `--extra gpu` packages:

```bash
uv cache clean flash_attn
uv cache clean mamba-ssm
uv cache clean causal_conv1d

# Now it should re-build the packages with the correct torch version

# With uv pip install
uv pip install "git+https://github.com/KatherLab/STAMP.git[build]"
uv pip install "git+https://github.com/KatherLab/STAMP.git[build,gpu] --no-build-isolation"

# With uv sync in the cloned repository
uv sync --extra build
uv sync --extra build --extra gpu
```


## Basic Usage

If the installation was successful, running `stamp` in your terminal should yield the following output:
```
$ stamp
Expand All @@ -68,7 +159,7 @@ options:
Path to config file. Default: config.yaml
```

## Running stamp
## Getting Started Guide

For a quick introduction how to run stamp,
check out our [getting started guide](getting-started.md).
Expand Down
6 changes: 1 addition & 5 deletions getting-started.md
Original file line number Diff line number Diff line change
Expand Up @@ -66,11 +66,7 @@ we will stick with ctranspath for this example.
In order to use a feature extractor,
you also have to install their respective dependencies.
You can do so by specifying the feature extractor you want to use
when installing stamp:
```sh
# Install stamp including the dependencies for all feature extractors
pip install "git+https://github.com/KatherLab/stamp@v2[all]"
```
when installing stamp. Please refer to the [installation instructions](README.md#installation)

Open the `stamp-test-experiment/config.yaml` we created in the last step
and modify the `output_dir`, `wsi_dir` and `cache_dir` entries
Expand Down
108 changes: 93 additions & 15 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,8 @@ authors = [
{ name = "Tim Lenz", email = "tim.lenz@tu-dresden.de" },
{ name = "Laura Žigutytė", email = "laura.zigutyte@tu-dresden.de" },
{ name = "Cornelius Kummer", email = "cornelius.kummer@tu-dresden.de" },
{ name = "Juan Pablo Ricapito", email = "juan_pablo.ricapito@tu-dresden.de"}
{ name = "Juan Pablo Ricapito", email = "juan_pablo.ricapito@tu-dresden.de" },
{ name = "Fabian Wolf", email = "fabian.wolf2@tu-dresden.de" }
]
description = "A protocol for Solid Tumor Associative Modeling in Pathology"
readme = "README.md"
Expand All @@ -30,8 +31,8 @@ dependencies = [
"numpy>=2.2.2",
"opencv-python>=4.10.0.84",
"openpyxl>=3.1.5",
"openslide-bin>=4.0.0.6",
"openslide-python>=1.4.1",
"openslide-bin>=4.0.0.8",
"openslide-python>=1.4.2",
"packaging>=24.2",
"pandas>=2.2.3",
"pillow>=11.1.0",
Expand All @@ -44,17 +45,31 @@ dependencies = [
"torchvision>=0.20.1",
"tqdm>=4.66.6",
"timm>=0.9.11",
"transformers"
]

[project.optional-dependencies]
build = [
"setuptools",
"hatchling",
"psutil",
"ninja"
]
flash-attention = [
"stamp[build]",
"flash-attn @ git+https://github.com/KatherLab/flash-attention.git@30e60b16f727af429dcfa12fc0c996f92d92a3bb",
]
conch = [
"huggingface-hub>=0.26.2",
"conch @ git+https://github.com/Mahmoodlab/CONCH.git@02d6ac59cc20874bff0f581de258c2b257f69a84",
"conch @ git+https://github.com/KatherLab/CONCH",
]
conch1_5 = [
conch1_5_cpu = [
"transformers>=4.45.2",
"einops-exts==0.0.4",
]
conch1_5 = [
"stamp[conch1_5_cpu, flash-attention]",
]
ctranspath = [
"gdown>=5.2.0",
]
Expand All @@ -63,36 +78,65 @@ chief_ctranspath = [
"torch>=2.0.0"
]
gigapath = [
"gigapath @ git+https://github.com/EzicStar/prov-gigapath.git@d4cf55321df37aaf867e24a31c61bcf490a296eb"
"stamp[flash-attention]",
"gigapath @ git+https://github.com/KatherLab/prov-gigapath.git@edffc189af3e665bfad48a2a0a3ba81bb5bc5518",
"fvcore",
"iopath",
"monai",
"scikit-image",
"webdataset",
"lifelines",
"scikit-survival",
"fairscale",
"wandb",
]
uni = [
"huggingface-hub>=0.26.2",
"uni @ git+https://github.com/mahmoodlab/UNI.git",
"uni @ git+https://github.com/KatherLab/uni.git@f37c299eb0bffa0e585f120974082cfec6ee6d53",
]
virchow2 = [
"huggingface-hub>=0.27.1",
"torch>=2.0.0",
]
cobra = [
"jinja2>=3.1.4",
"cobra @ git+https://github.com/KatherLab/COBRA.git@f1a576e1133330ffc2d1df6ee110701921c7b7c9",
]
prism = [
"stamp[flash-attention]",
"causal-conv1d @ git+https://github.com/KatherLab/causal-conv1d.git@52ec902314b9eda800162c73502a89f3572fc522",
"mamba-ssm @ git+https://github.com/KatherLab/mamba.git@423692d1dddd558884fc3efb7c063e907d022c74",
"cobra @ git+http://github.com/KatherLab/COBRA.git@468ba1171f6b5488cd4c946438b90998fd8defad",
"jinja2>=3.1.4",
"triton==3.2.0", # Fix triton to 3.2.0 (also makes torch==2.6.0) until this is solved: https://github.com/pytorch/pytorch/issues/153737
]
prism_cpu = [
"sacremoses==0.1.1",
"environs==11.0.0",
]
prism = [
"stamp[prism_cpu, flash-attention]",
]
madeleine = [
"madeleine @ git+https://github.com/mahmoodlab/MADELEINE.git@de7c85acc2bdad352e6df8eee5694f8b6f288012"
]
musk = [
musk_cpu = [
"musk @ git+https://github.com/lilab-stanford/MUSK.git@e1699c27687f44bbf6d4adfcbb2abe89795d347f",
"fairscale",
]
plip = [
musk = [
"stamp[musk_cpu, flash-attention]",
]
plip_cpu = [
"transformers>=4.45.2"
]
plip = [
"stamp[plip_cpu, flash-attention]",
]


# Blanket target
all = ["stamp[conch,ctranspath,uni,virchow2,chief_ctranspath,conch1_5,prism,madeleine,musk,plip]"]
all = [
"stamp[conch,ctranspath,uni,virchow2,chief_ctranspath,conch1_5,prism,madeleine,musk,plip,gigapath,cobra]"
]
cpu = ["stamp[conch,ctranspath,uni,virchow2,chief_ctranspath,conch1_5_cpu,prism_cpu,madeleine,musk_cpu,plip_cpu]"]
gpu = ["stamp[conch,ctranspath,uni,virchow2,chief_ctranspath,conch1_5,prism,madeleine,musk,plip,gigapath,cobra]"]

[project.scripts]
"stamp" = "stamp.__main__:main"
Expand All @@ -107,7 +151,7 @@ dev = [
"ipykernel>=6.29.5",
"pyright>=1.1.389,!=1.1.391",
"pytest>=8.3.4",
"ruff>=0.8.1",
"ruff>=0.12.3",
]

[build-system]
Expand All @@ -126,6 +170,18 @@ markers = [
[tool.ruff]
lint.ignore = ["F722"] # https://docs.kidger.site/jaxtyping/faq/#flake8-or-ruff-are-throwing-an-error

[tool.uv]
no-build-isolation-package = ["flash-attn", "cobra", "mamba-ssm", "causal-conv1d", "gigapath", "flash-attention"]
no-binary-package = ["flash-attn", "gigapath"]
conflicts = [
[
{ extra = "cpu" },
{ extra = "gpu" }
]
]



[[tool.uv.dependency-metadata]]
name = "uni"
version = "v0.1.0"
Expand All @@ -139,4 +195,26 @@ requires-dist = [
"tqdm",
"transformers",
"xformers; sys_platform != 'darwin'" # xformers is not supported on macOS
]

[[tool.uv.dependency-metadata]]
name = "flash-attn"
version = "2.8.1"
requires-dist = [
"torch",
"einops",
]

[[tool.uv.dependency-metadata]]
name = "mamba-ssm"
version = "v2.2.4"
requires-dist = [
"setuptools",
]

[[tool.uv.dependency-metadata]]
name = "causal-conv1d"
version = "v1.5.0.post8"
requires-dist = [
"setuptools",
]
Loading