diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml
index 171f69f4..a0935c6d 100644
--- a/.github/workflows/python-package.yml
+++ b/.github/workflows/python-package.yml
@@ -26,8 +26,14 @@ jobs:
- name: Install dependencies
run: |
python -m pip install --upgrade pip
- python -m pip install pytest
- if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
+ pip install -e .[dev,test]
+ - name: Check style
+ run: |
+ black stlearn tests
+ ruff check stlearn tests
+ - name: Check types
+ run: |
+ mypy stlearn tests
- name: Test with pytest
run: |
- pytest
+ pytest
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
index c5ab06d4..fcd9e498 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,21 +1,71 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
*.pyc
-.ipynb_checkpoints
-*/.ipynb_checkpoints/*
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
build/
+docs/api/
+docs/_build/
+docs/generated/
+data/samples
+develop-eggs/
dist/
-*.egg-info
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# Unit tests / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+*/.ipynb_checkpoints/*
/*.ipynb
+
+# Data files
/*.csv
-output/
+
+# MacOS caching
.DS_Store
*/.DS_Store
+
+# PyCharm etc
.idea/
-docs/_build
+
+# Sphinx documentation
+docs.bk/_build
+
+# Distribution/package/temporary files
data/
tiling/
-.pytest_cache
figures/
*.h5ad
-inferCNV/
-stlearn/tools/microenv/cci/junk_code.py
-stlearn/tools/microenv/cci/.Rhistory
diff --git a/.readthedocs.yml b/.readthedocs.yml
index 6a8f1a14..e841d344 100644
--- a/.readthedocs.yml
+++ b/.readthedocs.yml
@@ -1,4 +1,25 @@
+# Read the Docs configuration file
+# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
+
+# Required
+version: 2
+
+# Set the OS, Python version, and other tools you might need
build:
- image: latest
+ os: ubuntu-24.04
+ tools:
+ python: "3.10"
+
+# Build documentation in the "docs/" directory with Sphinx
+sphinx:
+ configuration: docs/conf.py
+
+# Optionally, but recommended,
+# declare the Python requirements required to build your documentation
+# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
python:
- version: 3.8
+ install:
+ - method: pip
+ path: .
+ extra_requirements:
+ - dev
\ No newline at end of file
diff --git a/AUTHORS.rst b/AUTHORS.rst
index d30eaa6e..a024f3f5 100644
--- a/AUTHORS.rst
+++ b/AUTHORS.rst
@@ -5,9 +5,12 @@ Credits
Development Lead
----------------
-* Genomics and Machine Learning lab
+* Genomics and Machine Learning Lab
Contributors
------------
-None yet. Why not be the first?
+* Brad Balderson
+* Andrew Newman
+* Duy Pham
+* Xiao Tan
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index aa232892..b9769b45 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -64,11 +64,19 @@ Ready to contribute? Here's how to set up `stlearn` for local development.
$ git clone git@github.com:your_name_here/stlearn.git
-3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
+3. Install your local copy into a virtualenv. This is how you set up your fork for local development::
- $ mkvirtualenv stlearn
+ $ conda create -n stlearn-dev python=3.10 --y
+ $ conda activate stlearn-dev
$ cd stlearn/
- $ python setup.py develop
+ $ pip install -e .[dev,test]
+
+ Or if you prefer pip/virtualenv::
+
+ $ python -m venv stlearn-env
+ $ source stlearn-env/bin/activate # On Windows: stlearn-env\Scripts\activate
+ $ cd stlearn/
+ $ pip install -e .[dev,test]
4. Create a branch for local development::
@@ -76,14 +84,16 @@ Ready to contribute? Here's how to set up `stlearn` for local development.
Now you can make your changes locally.
-5. When you're done making changes, check that your changes pass flake8 and the
- tests, including testing other Python versions with tox::
+5. When you're done making changes, check that your changes pass linters and tests::
- $ flake8 stlearn tests
- $ python setup.py test or pytest
- $ tox
+ $ black stlearn tests
+ $ ruff check stlearn tests
+ $ mypy stlearn tests
+ $ pytest
+
+Or run everything with tox::
- To get flake8 and tox, just pip install them into your virtualenv.
+ $ tox
6. Commit your changes and push your branch to GitHub::
diff --git a/HISTORY.rst b/HISTORY.rst
index 39a6759c..815cb9dd 100644
--- a/HISTORY.rst
+++ b/HISTORY.rst
@@ -2,24 +2,51 @@
History
=======
+1.1.0 (2025-07-02)
+------------------
+* Support Python 3.10.x
+* Added quality checks black, ruff and mypy and fixed appropriate source code.
+* Copy parameters now work with the same semantics as scanpy.
+* Library upgrades for leidenalg, louvain, numba, numpy, scanpy, and tensorflow.
+* datasets.xenium_sge - loads Xenium data (and caches it) similar to scanpy.visium_sge.
+
+API and Bug Fixes:
+* Xenium TIFF and cell positions are now aligned.
+* Consistent with type annotations - mainly missing None annotations.
+* pl.cluster_plot - Does not keep colours from previous runs when clustering.
+* pl.trajectory.pseudotime_plot - Fix typing of cluster values in .uns["split_node"].
+* Removed datasets.example_bcba - Replaced with wrapper for scanpy.visium_sge.
+* Moved spatials directory to spatial, cleaned up pl and tl packages.
+
0.4.11 (2022-11-25)
------------------
+
0.4.10 (2022-11-22)
------------------
+
0.4.8 (2022-06-15)
------------------
+
0.4.7 (2022-03-28)
------------------
+
0.4.6 (2022-03-09)
------------------
+
0.4.5 (2022-03-02)
------------------
+
0.4.0 (2022-02-03)
------------------
+
0.3.2 (2021-03-29)
------------------
+
0.3.1 (2020-12-24)
------------------
+
0.2.7 (2020-09-12)
------------------
+
0.2.6 (2020-08-04)
+------------------
diff --git a/LICENSE b/LICENSE
index 626beb6e..fafffeca 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,8 +1,6 @@
-
-
BSD License
-Copyright (c) 2020, Genomics and Machine Learning lab
+Copyright (c) 2020-2025, Genomics and Machine Learning lab
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
diff --git a/docs/Makefile b/docs/Makefile
index 96688bf3..d4bb2cbb 100644
--- a/docs/Makefile
+++ b/docs/Makefile
@@ -1,10 +1,10 @@
# Minimal makefile for Sphinx documentation
#
-# You can set these variables from the command line.
-SPHINXOPTS =
-SPHINXBUILD = python -msphinx
-SPHINXPROJ = stlearn
+# You can set these variables from the command line, and also
+# from the environment for the first two.
+SPHINXOPTS ?=
+SPHINXBUILD ?= sphinx-build
SOURCEDIR = .
BUILDDIR = _build
diff --git a/docs/_static/css/custom.css b/docs/_static/css/custom.css
new file mode 100644
index 00000000..6beb551f
--- /dev/null
+++ b/docs/_static/css/custom.css
@@ -0,0 +1,5 @@
+/* Custom styling for stLearn documentation */
+
+p img {
+ vertical-align: bottom
+}
diff --git a/docs/_temp/example_cci.py b/docs/_temp/example_cci.py
deleted file mode 100644
index 15fe9a84..00000000
--- a/docs/_temp/example_cci.py
+++ /dev/null
@@ -1,180 +0,0 @@
-# """ Example code for running CCI analysis using new interface/approach.
-
-# Tested: * Within-spot mode
-# * Between-spot mode
-
-# TODO tests: * Above with cell heterogeneity information
-# """
-
-################################################################################
-# Environment setup #
-################################################################################
-import stlearn as st
-import matplotlib.pyplot as plt
-
-################################################################################
-# Load your data #
-################################################################################
-# TODO - load as an AnnData & perform usual pre-processing.
-data = None # replace with your code
-
-# """ # Adding cell heterogeneity information if you have it.
-# st.add.labels(data, 'tutorials/label_transfer_bc.csv', sep='\t')
-# st.pl.cluster_plot(data, use_label="predictions")
-# """
-
-################################################################################
-# Performing cci_rank analysis #
-################################################################################
-# Load the NATMI literature-curated database of LR pairs, data formatted #
-lrs = st.tl.cci.load_lrs(["connectomeDB2020_lit"])
-
-st.tl.cci.run(
- data,
- lrs,
- use_label=None, # Need to add the label transfer results to object first, above code puts into 'label_transfer'
- use_het="cell_het", # Slot for cell het. results in adata.obsm, only if use_label specified
- min_spots=6, # Filter out any LR pairs with no scores for less than 6 spots
- distance=None, # distance=0 for within-spot mode, None to auto-select distance to nearest neighbourhood.
- n_pairs=1000, # Number of random pairs to generate
- adj_method="fdr_bh", # MHT correction method
- min_expr=0, # min expression for gene to be considered expressed.
- pval_adj_cutoff=0.05,
-)
-# """
-# Example output:
-
-# Calculating neighbours...
-# 0 spots with no neighbours, 6 median spot neighbours.
-# Spot neighbour indices stored in adata.uns['spot_neighbours']
-# Altogether 1393 valid L-R pairs
-# Generating random gene pairs...
-# Generating the background...
-# Calculating p-values for each LR pair in each spot...: 100%|██████████ [ time left: 00:00 ]
-
-# Storing results:
-
-# lr_scores stored in adata.obsm['lr_scores'].
-# p_vals stored in adata.obsm['p_vals'].
-# p_adjs stored in adata.obsm['p_adjs'].
-# -log10(p_adjs) stored in adata.obsm['-log10(p_adjs)'].
-# lr_sig_scores stored in adata.obsm['lr_sig_scores'].
-
-# Per-spot results in adata.obsm have columns in same order as rows in adata.uns['lr_summary'].
-# Summary of LR results in adata.uns['lr_summary'].
-# """
-
-################################################################################
-# Visualising results #
-################################################################################
-# Plotting the -log10(p_adjs) for the lr with the highest number of spots.
-# Set use_lr to any listed in data.uns['lr_summary'] to visualise alternate lrs.
-st.pl.lr_result_plot(
- data,
- use_lr=None, # Which LR to use, if None then uses top resuls from data.uns['lr_results']
- use_result="-log10(p_adjs)", # Which result to visualise, must be one of
- # p_vals, p_adjs, -log10(p_adjs), lr_sig_scores
-)
-plt.show()
-
-################################################################################
-# Extra diagnostic plots for results #
-################################################################################
-# TODO:
-# Below needs to be updated with new way of storing results.
-
-# Looking at which LR pairs were significant across the most spots #
-print(data.uns["lr_summary"]) # Rank-ordered by pairs with most significant spots
-
-# Now looking at the LR pair with the highest number of sig. spots #
-best_lr = data.uns["lr_summary"].index.values[0]
-
-# Binary LR coexpression plot for all spots #
-st.pl.lr_plot(
- data,
- best_lr,
- inner_size_prop=0.1,
- outer_mode="binary",
- pt_scale=10,
- use_label=None,
- show_image=True,
- sig_spots=False,
-)
-plt.show()
-
-# Significance scores for all spots #
-st.pl.lr_plot(
- data,
- best_lr,
- inner_size_prop=1,
- outer_mode=None,
- pt_scale=20,
- use_label="lr_scores",
- show_image=True,
- sig_spots=False,
-)
-plt.show()
-
-# Binary LR coexpression plot for significant spots #
-st.pl.lr_plot(
- data,
- best_lr,
- outter_size_prop=1,
- outer_mode="binary",
- pt_scale=20,
- use_label=None,
- show_image=True,
- sig_spots=True,
-)
-plt.show()
-
-# Continuous LR coexpression for signficant spots #
-st.pl.lr_plot(
- data,
- best_lr,
- inner_size_prop=0.1,
- middle_size_prop=0.2,
- outter_size_prop=0.4,
- outer_mode="continuous",
- pt_scale=150,
- use_label=None,
- show_image=True,
- sig_spots=True,
-)
-plt.show()
-
-# Continous LR coexpression for significant spots with tissue_type information #
-st.pl.lr_plot(
- data,
- best_lr,
- inner_size_prop=0.08,
- middle_size_prop=0.3,
- outter_size_prop=0.5,
- outer_mode="continuous",
- pt_scale=150,
- use_label="tissue_type",
- show_image=True,
- sig_spots=True,
-)
-plt.show()
-
-
-# # Old version of visualisation #
-# """
-# # LR enrichment scores
-# data.obsm[f'{best_lr}_scores'] = data.uns['per_lr_results'][best_lr].loc[:,
-# 'lr_scores'].values
-# # -log10(p_adj) of LR enrichment scores
-# data.obsm[f'{best_lr}_log-p_adj'] = data.uns['per_lr_results'][best_lr].loc[:,
-# '-log10(p_adj)'].values
-# # Significant LR enrichment scores
-# data.obsm[f'{best_lr}_sig-scores'] = data.uns['per_lr_results'][best_lr].loc[:,
-# 'lr_sig_scores'].values
-
-# # Visualising these results #
-# st.pl.het_plot(data, use_het=f'{best_lr}_scores', cell_alpha=0.7)
-# plt.show()
-
-# st.pl.het_plot(data, use_het=f'{best_lr}_sig-scores', cell_alpha=0.7)
-# plt.show()
-# """
diff --git a/docs/_templates/autosummary/base.rst b/docs/_templates/autosummary/base.rst
deleted file mode 100644
index 7a780868..00000000
--- a/docs/_templates/autosummary/base.rst
+++ /dev/null
@@ -1,4 +0,0 @@
-
-{% extends "!autosummary/base.rst" %}
-
-.. http://www.sphinx-doc.org/en/stable/ext/autosummary.html#customizing-templates
diff --git a/docs/_templates/autosummary/class.rst b/docs/_templates/autosummary/class.rst
deleted file mode 100644
index 42c37f16..00000000
--- a/docs/_templates/autosummary/class.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-{{ fullname | escape | underline}}
-
-.. currentmodule:: {{ module }}
-
-.. add toctree option to make autodoc generate the pages
diff --git a/docs/api.rst b/docs/api.rst
index 19568d0a..c27132ff 100644
--- a/docs/api.rst
+++ b/docs/api.rst
@@ -13,11 +13,10 @@ Import stLearn as::
Wrapper functions: `wrapper`
------------------------------
-.. module:: stlearn.wrapper
.. currentmodule:: stlearn
.. autosummary::
- :toctree: .
+ :toctree: api/
Read10X
ReadOldST
@@ -31,11 +30,10 @@ Wrapper functions: `wrapper`
Add: `add`
-------------------
-.. module:: stlearn.add
.. currentmodule:: stlearn
.. autosummary::
- :toctree: .
+ :toctree: api/
add.image
add.positions
@@ -56,7 +54,7 @@ Preprocessing: `pp`
.. currentmodule:: stlearn
.. autosummary::
- :toctree: .
+ :toctree: api/
pp.filter_genes
pp.log1p
@@ -75,7 +73,7 @@ Embedding: `em`
.. currentmodule:: stlearn
.. autosummary::
- :toctree: .
+ :toctree: api/
em.run_pca
em.run_umap
@@ -91,7 +89,7 @@ Spatial: `spatial`
.. currentmodule:: stlearn
.. autosummary::
- :toctree: .
+ :toctree: api/
spatial.clustering.localization
@@ -99,7 +97,7 @@ Spatial: `spatial`
.. currentmodule:: stlearn
.. autosummary::
- :toctree: .
+ :toctree: api/
spatial.trajectory.pseudotime
spatial.trajectory.pseudotimespace_global
@@ -113,7 +111,7 @@ Spatial: `spatial`
.. currentmodule:: stlearn
.. autosummary::
- :toctree: .
+ :toctree: api/
spatial.morphology.adjust
@@ -121,7 +119,7 @@ Spatial: `spatial`
.. currentmodule:: stlearn
.. autosummary::
- :toctree: .
+ :toctree: api/
spatial.SME.SME_impute0
spatial.SME.pseudo_spot
@@ -130,22 +128,13 @@ Spatial: `spatial`
Tools: `tl`
-------------------
-.. module:: stlearn.tl.clustering
.. currentmodule:: stlearn
.. autosummary::
- :toctree: .
+ :toctree: api/
tl.clustering.kmeans
tl.clustering.louvain
-
-
-.. module:: stlearn.tl.cci
-.. currentmodule:: stlearn
-
-.. autosummary::
- :toctree: .
-
tl.cci.load_lrs
tl.cci.grid
tl.cci.run
@@ -156,11 +145,10 @@ Tools: `tl`
Plot: `pl`
-------------------
-.. module:: stlearn.pl
.. currentmodule:: stlearn
.. autosummary::
- :toctree: .
+ :toctree: api/
pl.QC_plot
pl.gene_plot
@@ -168,7 +156,6 @@ Plot: `pl`
pl.cluster_plot
pl.cluster_plot_interactive
pl.subcluster_plot
- pl.subcluster_plot
pl.non_spatial_plot
pl.deconvolution_plot
pl.plot_mask
@@ -186,11 +173,10 @@ Plot: `pl`
pl.lr_plot_interactive
pl.spatialcci_plot_interactive
-.. module:: stlearn.pl.trajectory
.. currentmodule:: stlearn
.. autosummary::
- :toctree: .
+ :toctree: api/
pl.trajectory.pseudotime_plot
pl.trajectory.local_plot
@@ -198,13 +184,13 @@ Plot: `pl`
pl.trajectory.transition_markers_plot
pl.trajectory.DE_transition_plot
-Tools: `datasets`
--------------------
+Datasets: `datasets`
+---------------------------
-.. module:: stlearn.datasets
.. currentmodule:: stlearn
.. autosummary::
- :toctree: .
+ :toctree: api/
- datasets.example_bcba()
+ datasets.visium_sge
+ datasets.xenium_sge
diff --git a/docs/conf.py b/docs/conf.py
index 272a059b..ccda1308 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -1,216 +1,78 @@
-#!/usr/bin/env python
-#
-# stlearn documentation build configuration file, created by
-# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
-#
-# This file is execfile()d with the current directory set to its
-# containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
-
-# If extensions (or modules to document with autodoc) are in another
-# directory, add these directories to sys.path here. If the directory is
-# relative to the documentation root, use os.path.abspath to make it
-# absolute, like shown here.
-#
import os
import sys
+import re
+import requests
+
+def download_gdrive_file(file_id, filename):
+ session = requests.Session()
+ url = f"https://docs.google.com/uc?export=download&id={file_id}"
+ response = session.get(url)
+
+ form_action_match = re.search(r'action="([^"]+)"', response.text)
+ if not form_action_match:
+ raise Exception("Could not find form action URL")
+ download_url = form_action_match.group(1)
+
+ params = {}
+ hidden_inputs = re.findall(
+ r':5000` in your web browser.
-
-Check the detail tutorial in this pdf file: `Link `_
diff --git a/docs/list_tutorial.txt b/docs/list_tutorial.txt
deleted file mode 100644
index 53badb09..00000000
--- a/docs/list_tutorial.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-https://raw.githubusercontent.com/BiomedicalMachineLearning/stLearn/master/tutorials/Pseudo-time-space-tutorial.ipynb
-https://raw.githubusercontent.com/BiomedicalMachineLearning/stLearn/master/tutorials/Read_MERFISH.ipynb
-https://raw.githubusercontent.com/BiomedicalMachineLearning/stLearn/master/tutorials/Read_seqfish.ipynb
-https://raw.githubusercontent.com/BiomedicalMachineLearning/stLearn/master/tutorials/Read_slideseq.ipynb
-https://raw.githubusercontent.com/BiomedicalMachineLearning/stLearn/master/tutorials/ST_deconvolution_visualization.ipynb
-https://raw.githubusercontent.com/BiomedicalMachineLearning/stLearn/master/tutorials/Working-with-Old-Spatial-Transcriptomics-data.ipynb
-https://raw.githubusercontent.com/BiomedicalMachineLearning/stLearn/master/tutorials/stLearn-CCI.ipynb
-https://raw.githubusercontent.com/BiomedicalMachineLearning/stLearn/master/tutorials/stSME_clustering.ipynb
-https://raw.githubusercontent.com/BiomedicalMachineLearning/stLearn/master/tutorials/stSME_comparison.ipynb
-https://raw.githubusercontent.com/BiomedicalMachineLearning/stLearn/master/tutorials/Xenium_PSTS.ipynb
-https://raw.githubusercontent.com/BiomedicalMachineLearning/stLearn/master/tutorials/Xenium_CCI.ipynb
diff --git a/docs/make.bat b/docs/make.bat
index 2afd47f0..954237b9 100644
--- a/docs/make.bat
+++ b/docs/make.bat
@@ -5,32 +5,31 @@ pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
- set SPHINXBUILD=python -msphinx
+ set SPHINXBUILD=sphinx-build
)
set SOURCEDIR=.
set BUILDDIR=_build
-set SPHINXPROJ=stlearn
-
-if "%1" == "" goto help
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
- echo.The Sphinx module was not found. Make sure you have Sphinx installed,
- echo.then set the SPHINXBUILD environment variable to point to the full
- echo.path of the 'sphinx-build' executable. Alternatively you may add the
- echo.Sphinx directory to PATH.
+ echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+ echo.installed, then set the SPHINXBUILD environment variable to point
+ echo.to the full path of the 'sphinx-build' executable. Alternatively you
+ echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
- echo.http://sphinx-doc.org/
+ echo.https://www.sphinx-doc.org/
exit /b 1
)
-%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
+if "%1" == "" goto help
+
+%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
goto end
:help
-%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
+%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
:end
popd
diff --git a/docs/release_notes/0.3.2.rst b/docs/release_notes/0.3.2.rst
index 9b141ff5..d8e459c7 100644
--- a/docs/release_notes/0.3.2.rst
+++ b/docs/release_notes/0.3.2.rst
@@ -1,7 +1,7 @@
0.3.2 `2021-03-29`
~~~~~~~~~~~~~~~~~~~~~~~~~
-.. rubric:: Feature
+.. rubric:: Features
- Add interactive plotting functions: :func:`~stlearn.pl.gene_plot_interactive`, :func:`~stlearn.pl.cluster_plot_interactive`, :func:`~stlearn.pl.het_plot_interactive`
- Add basic unittest (will add more in the future).
diff --git a/docs/release_notes/0.4.6.rst b/docs/release_notes/0.4.6.rst
index b2f08dd6..b8ee0324 100644
--- a/docs/release_notes/0.4.6.rst
+++ b/docs/release_notes/0.4.6.rst
@@ -1,7 +1,7 @@
0.4.0 `2022-02-03`
~~~~~~~~~~~~~~~~~~~~~~~~~
-.. rubric:: Feature
+.. rubric:: Features
- Upgrade stSME, PSTS and CCI analysis methods.
diff --git a/docs/release_notes/1.1.0.rst b/docs/release_notes/1.1.0.rst
new file mode 100644
index 00000000..bd32dc46
--- /dev/null
+++ b/docs/release_notes/1.1.0.rst
@@ -0,0 +1,19 @@
+1.1.0 `2025-07-02`
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. rubric:: Features
+
+* Support Python 3.10.x
+* Added quality checks black, ruff and mypy and fixed appropriate source code.
+* Copy parameters now work with the same semantics as scanpy.
+* Library upgrades for leidenalg, louvain, numba, numpy, scanpy, and tensorflow.
+* datasets.xenium_sge - loads Xenium data (and caches it) similar to scanpy.visium_sge.
+
+.. rubric:: Bug fixes
+
+* Xenium TIFF and cell positions are now aligned.
+* Consistent with type annotations - mainly missing None annotations.
+* pl.cluster_plot - Does not keep colours from previous runs when clustering.
+* pl.trajectory.pseudotime_plot - Fix typing of cluster values in .uns["split_node"].
+* Removed datasets.example_bcba - Replaced with wrapper for scanpy.visium_sge.
+* Moved spatials directory to spatial, cleaned up pl and tl packages.
\ No newline at end of file
diff --git a/docs/release_notes/index.rst b/docs/release_notes/index.rst
index 48c9c3be..6194c62c 100644
--- a/docs/release_notes/index.rst
+++ b/docs/release_notes/index.rst
@@ -1,10 +1,7 @@
-Release notes
+Release Notes
===================================================
-Version 0.4.9
----------------------------
-
-.. include:: 0.4.10.rst
+.. include:: 1.1.0.rst
.. include:: 0.4.6.rst
diff --git a/docs/requirements.txt b/docs/requirements.txt
deleted file mode 100644
index a2c20d28..00000000
--- a/docs/requirements.txt
+++ /dev/null
@@ -1,15 +0,0 @@
--r ../requirements.txt
-ipyvolume
-ipywebrtc
-ipywidgets
-jupyter_sphinx
-nbclean
-nbformat
-nbsphinx
-pygments
-recommonmark
-sphinx
-sphinx-autodoc-typehints
-sphinx_gallery==0.10.1
-sphinx_rtd_theme
-typing_extensions
diff --git a/docs/tutorials.rst b/docs/tutorials.rst
index 0c0ecf16..83889f22 100644
--- a/docs/tutorials.rst
+++ b/docs/tutorials.rst
@@ -4,33 +4,24 @@ Tutorials
.. nbgallery::
:caption: Main features:
- tutorials/stSME_clustering
- tutorials/stSME_comparison
- tutorials/Pseudo-time-space-tutorial
- tutorials/stLearn-CCI
- tutorials/Xenium_PSTS
- tutorials/Xenium_CCI
+ tutorials/cell_cell_interaction
+ tutorials/cell_cell_interaction_xenium
+ tutorials/pseudotime_space
+ tutorials/pseudotime_space_xenium
+ tutorials/stsme_clustering
+ tutorials/stsme_comparison
+
.. nbgallery::
:caption: Visualisation and additional functionalities:
- tutorials/Interactive_plot
- tutorials/Core_plots
- tutorials/ST_deconvolution_visualization
- tutorials/Integration_multiple_datasets
-
+ tutorials/core_plots
+ tutorials/integrate_multiple_datasets
.. nbgallery::
:caption: Supporting platforms:
-
- tutorials/Read_MERFISH
- tutorials/Read_seqfish
- tutorials/Working-with-Old-Spatial-Transcriptomics-data
- tutorials/Read_slideseq
-
.. nbgallery::
:caption: Integration with other spatial tools:
- tutorials/Read_any_data
- tutorials/Working_with_scanpy
+ tutorials/working_with_scanpy
diff --git a/mypy.ini b/mypy.ini
new file mode 100644
index 00000000..5d8b6f99
--- /dev/null
+++ b/mypy.ini
@@ -0,0 +1,4 @@
+[mypy]
+follow_untyped_imports = True
+no_site_packages = True
+ignore_missing_imports = True
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 00000000..d6e72578
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,86 @@
+[build-system]
+requires = ["setuptools>=61.0", "wheel"]
+build-backend = "setuptools.build_meta"
+
+[project]
+name = "stlearn"
+version = "1.1.0"
+authors = [
+ {name = "Genomics and Machine Learning lab", email = "andrew.newman@uq.edu.au"},
+]
+description = "A downstream analysis toolkit for Spatial Transcriptomic data"
+readme = {file = "README.md", content-type = "text/markdown"}
+license = {text = "BSD license"}
+requires-python = "~=3.10.0"
+keywords = ["stlearn"]
+classifiers = [
+ "Development Status :: 2 - Pre-Alpha",
+ "Intended Audience :: Developers",
+ "License :: OSI Approved :: BSD License",
+ "Natural Language :: English",
+ "Programming Language :: Python :: 3.10",
+]
+dynamic = ["dependencies"]
+
+[project.optional-dependencies]
+dev = [
+ "black>=23.0",
+ "ruff>=0.1.0",
+ "mypy>=1.16",
+ "pytest>=7.0",
+ "tox>=4.0",
+ "ghp-import>=2.1.0",
+ "sphinx>=4.0",
+ "furo==2024.8.6",
+ "myst-parser>=0.18",
+ "nbsphinx>=0.9.0",
+ "sphinx-autodoc-typehints>=1.24.0",
+ "sphinx-autosummary-accessors>=2023.4.0",
+]
+test = [
+ "pytest",
+ "pytest-cov",
+]
+webapp = [
+ "flask>=2.0.0",
+ "flask-wtf>=1.0.0",
+ "wtforms>=3.0.0",
+ "markupsafe>2.1.0",
+]
+jupyter = [
+ "jupyter>=1.0.0",
+ "jupyterlab>=3.0.0",
+ "ipywidgets>=7.6.0",
+ "plotly>=5.0.0",
+ "bokeh>=2.4.0",
+ "rpy2>=3.4.0",
+]
+
+[project.urls]
+Homepage = "https://github.com/BiomedicalMachineLearning/stLearn"
+Repository = "https://github.com/BiomedicalMachineLearning/stLearn"
+
+[project.scripts]
+stlearn = "stlearn.app.cli:main"
+
+[tool.setuptools.packages.find]
+include = ["stlearn", "stlearn.*"]
+
+[tool.setuptools.package-data]
+"*" = ["*"]
+
+[tool.setuptools.dynamic]
+dependencies = {file = ["requirements.txt"]}
+
+[tool.ruff]
+line-length=88
+target-version = "py310"
+
+[tool.ruff.lint]
+select = ["E", "F", "W", "I", "N", "UP"]
+ignore = ["E722", "F811", "N802", "N803", "N806", "N818", "N999", "UP031"]
+exclude = [".git", "__pycache__", "build", "dist"]
+
+[tool.ruff.format]
+quote-style = "double"
+indent-style = "space"
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index c5452f88..4c11dc46 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,10 +1,14 @@
-bokeh>= 2.4.2
-click>=8.0.4
-leidenalg
-louvain
-numba<=0.57.1
-numpy>=1.18,<1.22
-Pillow>=9.0.1
-scanpy>=1.8.2
-scikit-image>=0.19.2
-tensorflow
+bokeh==3.7.3
+click==8.2.1
+leidenalg==0.10.2
+louvain==0.8.2
+numba==0.58.1
+numpy==1.26.4
+pillow==11.2.1
+scanpy==1.10.4
+scikit-image==0.22.0
+tensorflow==2.14.1
+keras==2.14.0
+types-tensorflow>=2.8.0
+imageio==2.37.0
+scipy==1.11.4
\ No newline at end of file
diff --git a/setup.cfg b/setup.cfg
deleted file mode 100644
index f877626d..00000000
--- a/setup.cfg
+++ /dev/null
@@ -1,21 +0,0 @@
-[bumpversion]
-current_version = 0.4.11
-commit = True
-tag = True
-
-[bumpversion:file:setup.py]
-search = version='{current_version}'
-replace = version='{new_version}'
-
-[bumpversion:file:stlearn/__init__.py]
-search = __version__ = '{current_version}'
-replace = __version__ = '{new_version}'
-
-[bdist_wheel]
-universal = 1
-
-[flake8]
-exclude = docs
-
-[aliases]
-# Define setup.py command aliases here
diff --git a/setup.py b/setup.py
deleted file mode 100644
index e728fba4..00000000
--- a/setup.py
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/usr/bin/env python
-
-"""The setup script."""
-
-from setuptools import setup, find_packages
-
-with open("README.md", encoding="utf8") as readme_file:
- readme = readme_file.read()
-
-with open("HISTORY.rst") as history_file:
- history = history_file.read()
-
-with open("requirements.txt") as f:
- requirements = f.read().splitlines()
-
-
-setup_requirements = []
-
-test_requirements = []
-
-setup(
- author="Genomics and Machine Learning lab",
- author_email="duy.pham@uq.edu.au",
- python_requires=">=3.7",
- classifiers=[
- "Development Status :: 2 - Pre-Alpha",
- "Intended Audience :: Developers",
- "License :: OSI Approved :: BSD License",
- "Natural Language :: English",
- "Programming Language :: Python :: 3",
- "Programming Language :: Python :: 3.7",
- "Programming Language :: Python :: 3.8",
- ],
- description="A downstream analysis toolkit for Spatial Transcriptomic data",
- entry_points={
- "console_scripts": [
- "stlearn=stlearn.app.cli:main",
- ],
- },
- install_requires=requirements,
- license="BSD license",
- long_description=readme + "\n\n" + history,
- long_description_content_type="text/markdown",
- include_package_data=True,
- keywords="stlearn",
- name="stlearn",
- packages=find_packages(include=["stlearn", "stlearn.*"]),
- setup_requires=setup_requirements,
- test_suite="tests",
- tests_require=test_requirements,
- url="https://github.com/BiomedicalMachineLearning/stLearn",
- version="0.4.11",
- zip_safe=False,
-)
diff --git a/stlearn/__init__.py b/stlearn/__init__.py
index 1fc79b20..213fe82f 100644
--- a/stlearn/__init__.py
+++ b/stlearn/__init__.py
@@ -1,30 +1,43 @@
"""Top-level package for stLearn."""
-__author__ = """Genomics and Machine Learning lab"""
-__email__ = "duy.pham@uq.edu.au"
-__version__ = "0.4.11"
-
-
-from . import add
-from . import pp
-from . import em
-from . import tl
-from . import pl
-from . import spatial
-from . import datasets
-
-# Wrapper
-
-from .wrapper.read import ReadSlideSeq
-from .wrapper.read import Read10X
-from .wrapper.read import ReadOldST
-from .wrapper.read import ReadMERFISH
-from .wrapper.read import ReadSeqFish
-from .wrapper.read import ReadXenium
-from .wrapper.read import create_stlearn
+__author__ = """Genomics and Machine Learning Lab"""
+__email__ = "andrew.newman@uq.edu.au"
+__version__ = "1.1.0"
+from . import add, datasets, em, pl, pp, spatial, tl, types
from ._settings import settings
-from .wrapper.convert_scanpy import convert_scanpy
from .wrapper.concatenate_spatial_adata import concatenate_spatial_adata
+from .wrapper.convert_scanpy import convert_scanpy
+
+# Wrapper
+from .wrapper.read import (
+ Read10X,
+ ReadMERFISH,
+ ReadOldST,
+ ReadSeqFish,
+ ReadSlideSeq,
+ ReadXenium,
+ create_stlearn,
+)
# from . import cli
+__all__ = [
+ "add",
+ "pp",
+ "em",
+ "tl",
+ "pl",
+ "spatial",
+ "datasets",
+ "ReadSlideSeq",
+ "Read10X",
+ "ReadOldST",
+ "ReadMERFISH",
+ "ReadSeqFish",
+ "ReadXenium",
+ "create_stlearn",
+ "settings",
+ "types",
+ "convert_scanpy",
+ "concatenate_spatial_adata",
+]
diff --git a/stlearn/__main__.py b/stlearn/__main__.py
index 981709a2..43559dfc 100644
--- a/stlearn/__main__.py
+++ b/stlearn/__main__.py
@@ -2,9 +2,7 @@
"""Package entry point."""
-
-from stlearn.app import main
-
+from stlearn.app import cli
if __name__ == "__main__": # pragma: no cover
- main()
+ cli.main()
diff --git a/stlearn/_compat.py b/stlearn/_compat.py
deleted file mode 100644
index 0ef291a2..00000000
--- a/stlearn/_compat.py
+++ /dev/null
@@ -1,15 +0,0 @@
-try:
- from typing import Literal
-except ImportError:
- try:
- from typing_extensions import Literal
- except ImportError:
-
- class LiteralMeta(type):
- def __getitem__(cls, values):
- if not isinstance(values, tuple):
- values = (values,)
- return type("Literal_", (Literal,), dict(__args__=values))
-
- class Literal(metaclass=LiteralMeta):
- pass
diff --git a/stlearn/_datasets/_datasets.py b/stlearn/_datasets/_datasets.py
index 19ffb6d5..56f17fd5 100644
--- a/stlearn/_datasets/_datasets.py
+++ b/stlearn/_datasets/_datasets.py
@@ -1,18 +1,89 @@
+import zipfile as zf
+
import scanpy as sc
-from .._settings import settings
-from pathlib import Path
from anndata import AnnData
+from .._settings import settings
+
+
+# TODO - Add scanpy and covert this over.
+def visium_sge(
+ sample_id="V1_Breast_Cancer_Block_A_Section_1",
+ *,
+ include_hires_tiff: bool = False,
+) -> AnnData:
+ """Processed Visium Spatial Gene Expression data from 10x Genomics’ database.
-def example_bcba() -> AnnData:
- """\
- Download processed BCBA data (10X genomics published data).
- Reference: https://support.10xgenomics.com/spatial-gene-expression/datasets/1.1.0/V1_Breast_Cancer_Block_A_Section_1
+ The database_ can be browsed online to find the ``sample_id`` you want.
+
+ .. _database: https://support.10xgenomics.com/spatial-gene-expression/datasets
+
+ Parameters
+ ----------
+ sample_id
+ The ID of the data sample in 10x’s spatial database.
+ include_hires_tiff
+ Download and include the high-resolution tissue image (tiff) in
+ `adata.uns["spatial"][sample_id]["metadata"]["source_image_path"]`.
+
+ Returns
+ -------
+ Annotated data matrix.
"""
- settings.datasetdir.mkdir(exist_ok=True)
- filename = settings.datasetdir / "example_bcba.h5"
- url = "https://www.dropbox.com/s/u3m2f16mvdom1am/example_bcba.h5ad?dl=1"
- if not filename.is_file():
- sc.readwrite._download(url=url, path=filename)
- adata = sc.read_h5ad(filename)
- return adata
+ sc.settings.datasetdir = settings.datasetdir
+ return sc.datasets.visium_sge(sample_id, include_hires_tiff=include_hires_tiff)
+
+
+def xenium_sge(
+ base_url="https://cf.10xgenomics.com/samples/xenium/1.0.1",
+ image_filename="he_image.ome.tif",
+ alignment_filename="he_imagealignment.csv",
+ zip_filename="outs.zip",
+ library_id="Xenium_FFPE_Human_Breast_Cancer_Rep1",
+ include_hires_tiff: bool = False,
+):
+ """
+ Download and extract Xenium SGE data files. Unlike scanpy this current does not
+ load the data. Data is located in `settings.datasetdir` / `library_id`.
+
+ Args:
+ base_url: Base URL for downloads
+ image_filename: Name of the image file to download
+ alignment_filename: Name of the affine transformation file to download
+ zip_filename: Name of the zip file to download
+ library_id: Identifier for the library
+ include_hires_tiff: Whether to download the high-res TIFF image
+ """
+ sc.settings.datasetdir = settings.datasetdir
+ library_dir = settings.datasetdir / library_id
+ library_dir.mkdir(parents=True, exist_ok=True)
+
+ files_to_extract = ["cell_feature_matrix.h5", "cells.csv.gz", "experiment.xenium"]
+ all_sge_files_exist = all(
+ (library_dir / sge_file).exists() for sge_file in files_to_extract
+ )
+
+ download_filenames = []
+ if not all_sge_files_exist:
+ download_filenames.append(zip_filename)
+ if include_hires_tiff and (
+ not (library_dir / alignment_filename).exists()
+ or not (library_dir / image_filename).exists()
+ ):
+ download_filenames += [alignment_filename, image_filename]
+
+ for file_name in download_filenames:
+ file_path = library_dir / file_name
+ url = f"{base_url}/{library_id}/{library_id}_{file_name}"
+ if not file_path.is_file():
+ sc.readwrite._download(url=url, path=file_path)
+
+ if not all_sge_files_exist:
+ try:
+ zip_file_path = library_dir / zip_filename
+ with zf.ZipFile(zip_file_path, "r") as zip_ref:
+ for zip_filename in files_to_extract:
+ with open(library_dir / zip_filename, "wb") as file_name:
+ file_name.write(zip_ref.read(f"outs/{zip_filename}"))
+ except zf.BadZipFile:
+ raise ValueError(f"Invalid zip file: {library_dir / zip_filename}")
diff --git a/stlearn/_settings.py b/stlearn/_settings.py
index 30eb017a..9e75a8d4 100644
--- a/stlearn/_settings.py
+++ b/stlearn/_settings.py
@@ -1,21 +1,20 @@
import inspect
import sys
+from collections.abc import Iterable, Iterator
from contextlib import contextmanager
from enum import IntEnum
+from logging import getLevelName
from pathlib import Path
from time import time
-from logging import getLevelName
-from typing import Any, Union, Optional, Iterable, TextIO
-from typing import Tuple, List, ContextManager
+from typing import Any, Literal, TextIO
from . import logging
-from .logging import _set_log_level, _set_log_file, _RootLogger
-from ._compat import Literal
+from .logging import _RootLogger, _set_log_file, _set_log_level
# All the code here migrated from scanpy
# It help to work with scanpy package
-_VERBOSITY_TO_LOGLEVEL = {
+_VERBOSITY_TO_LOGLEVEL: dict[str | int, str] = {
"error": "ERROR",
"warning": "WARNING",
"info": "INFO",
@@ -40,7 +39,7 @@ def level(self) -> int:
return getLevelName(_VERBOSITY_TO_LOGLEVEL[self])
@contextmanager
- def override(self, verbosity: "Verbosity") -> ContextManager["Verbosity"]:
+ def override(self, verbosity: "Verbosity") -> Iterator["Verbosity"]:
"""\
Temporarily override verbosity
"""
@@ -49,7 +48,7 @@ def override(self, verbosity: "Verbosity") -> ContextManager["Verbosity"]:
settings.verbosity = self
-def _type_check(var: Any, varname: str, types: Union[type, Tuple[type, ...]]):
+def _type_check(var: Any, varname: str, types: type | tuple[type, ...]):
if isinstance(var, types):
return
if isinstance(types, type):
@@ -62,11 +61,15 @@ def _type_check(var: Any, varname: str, types: Union[type, Tuple[type, ...]]):
raise TypeError(f"{varname} must be of type {possible_types_str}")
-class stLearnConfig:
+class stLearnConfig: # noqa N801
"""\
Config manager for scanpy.
"""
+ _logpath: Path | None
+ _logfile: TextIO
+ _verbosity: Verbosity
+
def __init__(
self,
*,
@@ -76,14 +79,14 @@ def __init__(
file_format_figs: str = "pdf",
autosave: bool = False,
autoshow: bool = True,
- writedir: Union[str, Path] = "./write/",
- cachedir: Union[str, Path] = "./cache/",
- datasetdir: Union[str, Path] = "./data/",
- figdir: Union[str, Path] = "./figures/",
- cache_compression: Union[str, None] = "lzf",
+ writedir: str | Path = "./write/",
+ cachedir: str | Path = "./cache/",
+ datasetdir: str | Path = "./data/",
+ figdir: str | Path = "./figures/",
+ cache_compression: str | None = "lzf",
max_memory=15,
n_jobs=1,
- logfile: Union[str, Path, None] = None,
+ logfile: str | Path | None = None,
categories_to_ignore: Iterable[str] = ("N/A", "dontknow", "no_gate", "?"),
_frameon: bool = True,
_vector_friendly: bool = False,
@@ -139,14 +142,14 @@ def verbosity(self) -> Verbosity:
return self._verbosity
@verbosity.setter
- def verbosity(self, verbosity: Union[Verbosity, int, str]):
+ def verbosity(self, verbosity: Verbosity | int | str):
verbosity_str_options = [
v for v in _VERBOSITY_TO_LOGLEVEL if isinstance(v, str)
]
if isinstance(verbosity, Verbosity):
- self._verbosity = verbosity
+ new_verbosity = verbosity
elif isinstance(verbosity, int):
- self._verbosity = Verbosity(verbosity)
+ new_verbosity = Verbosity(verbosity)
elif isinstance(verbosity, str):
verbosity = verbosity.lower()
if verbosity not in verbosity_str_options:
@@ -155,10 +158,9 @@ def verbosity(self, verbosity: Union[Verbosity, int, str]):
f"Accepted string values are: {verbosity_str_options}"
)
else:
- self._verbosity = Verbosity(verbosity_str_options.index(verbosity))
- else:
- _type_check(verbosity, "verbosity", (str, int))
- _set_log_level(self, _VERBOSITY_TO_LOGLEVEL[self._verbosity])
+ new_verbosity = Verbosity(verbosity_str_options.index(verbosity))
+ self._verbosity = new_verbosity
+ _set_log_level(self, self._verbosity)
@property
def plot_suffix(self) -> str:
@@ -207,7 +209,8 @@ def file_format_figs(self, figure_format: str):
@property
def autosave(self) -> bool:
"""\
- Automatically save figures in :attr:`~stlearn._settings.stLearnConfig.figdir` (default `False`).
+ Automatically save figures in :attr:`~stlearn._settings.stLearnConfig.figdir`
+ (default `False`).
Do not show plots/figures interactively.
"""
@@ -240,7 +243,7 @@ def writedir(self) -> Path:
return self._writedir
@writedir.setter
- def writedir(self, writedir: Union[str, Path]):
+ def writedir(self, writedir: str | Path):
_type_check(writedir, "writedir", (str, Path))
self._writedir = Path(writedir)
@@ -252,7 +255,7 @@ def cachedir(self) -> Path:
return self._cachedir
@cachedir.setter
- def cachedir(self, cachedir: Union[str, Path]):
+ def cachedir(self, cachedir: str | Path):
_type_check(cachedir, "cachedir", (str, Path))
self._cachedir = Path(cachedir)
@@ -264,7 +267,7 @@ def datasetdir(self) -> Path:
return self._datasetdir
@datasetdir.setter
- def datasetdir(self, datasetdir: Union[str, Path]):
+ def datasetdir(self, datasetdir: str | Path):
_type_check(datasetdir, "datasetdir", (str, Path))
self._datasetdir = Path(datasetdir).resolve()
@@ -276,12 +279,12 @@ def figdir(self) -> Path:
return self._figdir
@figdir.setter
- def figdir(self, figdir: Union[str, Path]):
+ def figdir(self, figdir: str | Path):
_type_check(figdir, "figdir", (str, Path))
self._figdir = Path(figdir)
@property
- def cache_compression(self) -> Optional[str]:
+ def cache_compression(self) -> str | None:
"""\
Compression for `sc.read(..., cache=True)` (default `'lzf'`).
@@ -290,7 +293,7 @@ def cache_compression(self) -> Optional[str]:
return self._cache_compression
@cache_compression.setter
- def cache_compression(self, cache_compression: Optional[str]):
+ def cache_compression(self, cache_compression: str | None):
if cache_compression not in {"lzf", "gzip", None}:
raise ValueError(
f"`cache_compression` ({cache_compression}) "
@@ -299,7 +302,7 @@ def cache_compression(self, cache_compression: Optional[str]):
self._cache_compression = cache_compression
@property
- def max_memory(self) -> Union[int, float]:
+ def max_memory(self) -> int | float:
"""\
Maximal memory usage in Gigabyte.
@@ -308,7 +311,7 @@ def max_memory(self) -> Union[int, float]:
return self._max_memory
@max_memory.setter
- def max_memory(self, max_memory: Union[int, float]):
+ def max_memory(self, max_memory: int | float):
_type_check(max_memory, "max_memory", (int, float))
self._max_memory = max_memory
@@ -325,18 +328,21 @@ def n_jobs(self, n_jobs: int):
self._n_jobs = n_jobs
@property
- def logpath(self) -> Optional[Path]:
+ def logpath(self) -> Path | None:
"""\
The file path `logfile` was set to.
"""
return self._logpath
@logpath.setter
- def logpath(self, logpath: Union[str, Path, None]):
- _type_check(logpath, "logfile", (str, Path))
- # set via “file object” branch of logfile.setter
- self.logfile = Path(logpath).open("a")
- self._logpath = Path(logpath)
+ def logpath(self, logpath: str | Path | None):
+ if logpath is None:
+ self._logpath = None
+ else:
+ _type_check(logpath, "logpath", (str, Path))
+ # set via “file object” branch of logfile.setter
+ self.logfile = Path(logpath).open("a")
+ self._logpath = Path(logpath)
@property
def logfile(self) -> TextIO:
@@ -347,23 +353,27 @@ def logfile(self) -> TextIO:
The default `None` corresponds to :obj:`sys.stdout` in jupyter notebooks
and to :obj:`sys.stderr` otherwise.
- For backwards compatibility, setting it to `''` behaves like setting it to `None`.
+ For backwards compatibility, setting it to `''` behaves like setting it
+ to `None`.
"""
return self._logfile
@logfile.setter
- def logfile(self, logfile: Union[str, Path, TextIO, None]):
- if not hasattr(logfile, "write") and logfile:
- self.logpath = logfile
- else: # file object
- if not logfile: # None or ''
- logfile = sys.stdout if self._is_run_from_ipython() else sys.stderr
+ def logfile(self, logfile: str | Path | TextIO | None):
+ if logfile is None or logfile == "":
+ self._logfile = sys.stdout if self._is_run_from_ipython() else sys.stderr
+ self._logpath = None
+ elif isinstance(logfile, (str | Path)):
+ path = Path(logfile)
+ self._logfile = path.open("a")
+ self._logpath = path
+ elif isinstance(logfile, TextIO):
self._logfile = logfile
self._logpath = None
- _set_log_file(self)
+ _set_log_file(self)
@property
- def categories_to_ignore(self) -> List[str]:
+ def categories_to_ignore(self) -> list[str]:
"""\
Categories that are omitted in plotting etc.
"""
@@ -403,7 +413,7 @@ def set_figure_params(
frameon: bool = True,
vector_friendly: bool = True,
fontsize: int = 14,
- color_map: Optional[str] = None,
+ color_map: str | None = None,
format: _Format = "pdf",
transparent: bool = False,
ipython_format: str = "png2x",
@@ -414,18 +424,21 @@ def set_figure_params(
Parameters
----------
dpi
- Resolution of rendered figures – this influences the size of figures in notebooks.
+ Resolution of rendered figures – this influences the size of figures
+ in notebooks.
dpi_save
Resolution of saved figures. This should typically be higher to achieve
publication quality.
frameon
Add frames and axes labels to scatter plots.
vector_friendly
- Plot scatter plots using `png` backend even when exporting as `pdf` or `svg`.
+ Plot scatter plots using `png` backend even when exporting as
+ `pdf` or `svg`.
fontsize
Set the fontsize for several `rcParams` entries. Ignored if `scanpy=False`.
color_map
- Convenience method for setting the default color map. Ignored if `scanpy=False`.
+ Convenience method for setting the default color map. Ignored if
+ `scanpy=False`.
format
This sets the default format for saving figures: `file_format_figs`.
transparent
@@ -438,9 +451,7 @@ def set_figure_params(
try:
import IPython
- if isinstance(ipython_format, str):
- ipython_format = [ipython_format]
- IPython.display.set_matplotlib_formats(*ipython_format)
+ IPython.display.set_matplotlib_formats(*[ipython_format])
except Exception:
pass
from matplotlib import rcParams
diff --git a/stlearn/add.py b/stlearn/add.py
index fde7173d..025a232a 100644
--- a/stlearn/add.py
+++ b/stlearn/add.py
@@ -1,10 +1,22 @@
+from .adds.add_deconvolution import add_deconvolution
from .adds.add_image import image
-from .adds.add_positions import positions
-from .adds.parsing import parsing
-from .adds.add_lr import lr
-from .adds.annotation import annotation
from .adds.add_labels import labels
-from .adds.add_deconvolution import add_deconvolution
-from .adds.add_mask import add_mask
-from .adds.add_mask import apply_mask
from .adds.add_loupe_clusters import add_loupe_clusters
+from .adds.add_lr import lr
+from .adds.add_mask import add_mask, apply_mask
+from .adds.add_positions import positions
+from .adds.annotation import annotation
+from .adds.parsing import parsing
+
+__all__ = [
+ "image",
+ "positions",
+ "parsing",
+ "lr",
+ "annotation",
+ "labels",
+ "add_deconvolution",
+ "add_mask",
+ "apply_mask",
+ "add_loupe_clusters",
+]
diff --git a/stlearn/adds/add_deconvolution.py b/stlearn/adds/add_deconvolution.py
index 3b5445be..5d892dda 100644
--- a/stlearn/adds/add_deconvolution.py
+++ b/stlearn/adds/add_deconvolution.py
@@ -1,16 +1,14 @@
-from typing import Optional, Union
-from anndata import AnnData
-import pandas as pd
-import numpy as np
from pathlib import Path
+import pandas as pd
+from anndata import AnnData
+
def add_deconvolution(
adata: AnnData,
- annotation_path: Union[Path, str],
+ annotation_path: Path | str,
copy: bool = False,
-) -> Optional[AnnData]:
-
+) -> AnnData | None:
"""\
Adding label transfered from Seurat
@@ -29,7 +27,11 @@ def add_deconvolution(
The annotation of cluster results.
"""
+ adata = adata.copy() if copy else adata
+
label = pd.read_csv(annotation_path, index_col=0)
label = label[adata.obs_names]
adata.obsm["deconvolution"] = label[adata.obs.index].T
+
+ return adata
diff --git a/stlearn/adds/add_image.py b/stlearn/adds/add_image.py
index 83c92d6b..20376ece 100644
--- a/stlearn/adds/add_image.py
+++ b/stlearn/adds/add_image.py
@@ -1,8 +1,8 @@
-from typing import Optional, Union
+import os
+from pathlib import Path
+
from anndata import AnnData
from matplotlib import pyplot as plt
-from pathlib import Path
-import os
from PIL import Image
Image.MAX_IMAGE_PIXELS = None
@@ -10,15 +10,14 @@
def image(
adata: AnnData,
- imgpath: Union[Path, str],
+ imgpath: Path | str | None,
library_id: str,
quality: str = "hires",
scale: float = 1.0,
visium: bool = False,
spot_diameter_fullres: float = 50,
copy: bool = False,
-) -> Optional[AnnData]:
-
+) -> AnnData | None:
"""\
Adding image data to the Anndata object
@@ -29,11 +28,13 @@ def image(
imgpath
Image path.
library_id
- Identifier for the visium library. Can be modified when concatenating multiple adata objects.
+ Identifier for the visium library. Can be modified when concatenating
+ multiple adata objects.
scale
Set scale factor.
quality
- Set quality that convert to stlearn to use. Store in anndata.obs['imagecol' & 'imagerow'].
+ Set quality that convert to stlearn to use. Store in
+ anndata.obs['imagecol' & 'imagerow'].
visium
Is this anndata read from Visium platform or not.
copy
@@ -44,6 +45,7 @@ def image(
**tissue_img** : `adata.uns` field
Array format of image, saving by Pillow package.
"""
+ adata = adata.copy() if copy else adata
if imgpath is not None and os.path.isfile(imgpath):
try:
@@ -68,8 +70,6 @@ def image(
adata.obs[["imagecol", "imagerow"]] = adata.obsm["spatial"] * scale
print("Added tissue image to the object!")
-
- return adata if copy else None
except:
raise ValueError(
f"""\
diff --git a/stlearn/adds/add_labels.py b/stlearn/adds/add_labels.py
index d4a05451..d11cad49 100644
--- a/stlearn/adds/add_labels.py
+++ b/stlearn/adds/add_labels.py
@@ -1,41 +1,47 @@
-from typing import Optional, Union
-from anndata import AnnData
-from pathlib import Path
-import os
-import pandas as pd
import numpy as np
+import pandas as pd
+from anndata import AnnData
from natsort import natsorted
def labels(
adata: AnnData,
- label_filepath: str = None,
+ label_filepath: str,
index_col: int = 0,
- use_label: str = None,
+ use_label: str | None = None,
sep: str = "\t",
copy: bool = False,
-) -> Optional[AnnData]:
+) -> AnnData | None:
"""\
Add label transfer results into AnnData object
Parameters
----------
- adata: AnnData The data object to add L-R info into
- label_filepath: str The path to the label transfer results file
- use_label: str Where to store the label_transfer results, defaults to 'predictions' in adata.obs & 'label_transfer' in adata.uns.
- sep: str Separator of the csv file
- copy: bool Copy flag indicating copy or direct edit
+ adata: AnnData
+ The data object to add L-R info into
+ label_filepath: str
+ The path to the label transfer results file
+ use_label: str
+ Where to store the label_transfer results, defaults to 'predictions'
+ in adata.obs & 'label_transfer' in adata.uns.
+ sep: str
+ Separator of the csv file
+ copy: bool
+ Copy flag indicating copy or direct edit
Returns
-------
- adata: AnnData The data object that L-R added into
+ adata: AnnData
+ The data object that L-R added into
"""
+ adata = adata.copy() if copy else adata
+
labels = pd.read_csv(label_filepath, index_col=index_col, sep=sep)
- uns_key = "label_transfer" if type(use_label) == type(None) else use_label
+ uns_key = "label_transfer" if use_label is None else use_label
adata.uns[uns_key] = labels.drop(["predicted.id", "prediction.score.max"], axis=1)
- key_add = "predictions" if type(use_label) == type(None) else use_label
+ key_add = "predictions" if use_label is None else use_label
key_source = "predicted.id"
adata.obs[key_add] = pd.Categorical(
values=np.array(labels[key_source]).astype("U"),
diff --git a/stlearn/adds/add_loupe_clusters.py b/stlearn/adds/add_loupe_clusters.py
index af614bd8..f257f80f 100644
--- a/stlearn/adds/add_loupe_clusters.py
+++ b/stlearn/adds/add_loupe_clusters.py
@@ -1,19 +1,17 @@
-from typing import Optional, Union
-from anndata import AnnData
-import pandas as pd
-import numpy as np
-import stlearn
from pathlib import Path
+
+import numpy as np
+import pandas as pd
+from anndata import AnnData
from natsort import natsorted
def add_loupe_clusters(
adata: AnnData,
- loupe_path: Union[Path, str],
+ loupe_path: Path | str,
key_add: str = "multiplex",
copy: bool = False,
-) -> Optional[AnnData]:
-
+) -> AnnData | None:
"""\
Adding label transfered from Seurat
@@ -36,9 +34,13 @@ def add_loupe_clusters(
The annotation of cluster results.
"""
+ adata = adata.copy() if copy else adata
+
label = pd.read_csv(loupe_path)
adata.obs[key_add] = pd.Categorical(
values=np.array(label[key_add]).astype("U"),
categories=natsorted(label[key_add].unique().astype("U")),
)
+
+ return adata if copy else None
diff --git a/stlearn/adds/add_lr.py b/stlearn/adds/add_lr.py
index 6ed99cde..d40d11a8 100644
--- a/stlearn/adds/add_lr.py
+++ b/stlearn/adds/add_lr.py
@@ -1,32 +1,35 @@
-from typing import Optional, Union
-from anndata import AnnData
-from pathlib import Path
-import os
import pandas as pd
+from anndata import AnnData
def lr(
adata: AnnData,
- db_filepath: str = None,
+ db_filepath: str,
sep: str = "\t",
source: str = "connectomedb",
copy: bool = False,
-) -> Optional[AnnData]:
+) -> AnnData | None:
"""Add significant Ligand-Receptor pairs into AnnData object
Parameters
----------
- adata: AnnData The data object to add L-R info into
- db_filepath: str The path to the CPDB results file
- sep: str Separator of the CPDB results file
- source: str Source of LR database (default: connectomedb, can also support 'cellphonedb')
- copy: bool Copy flag indicating copy or direct edit
+ adata: AnnData
+ The data object to add L-R info into
+ db_filepath: str
+ The path to the CPDB results file
+ sep: str
+ Separator of the CPDB results file
+ source: str
+ Source of LR database (default: connectomedb, can also support 'cellphonedb')
+ copy: bool
+ Copy flag indicating copy or direct edit
Returns
-------
adata: AnnData The data object that L-R added into
"""
+ adata = adata.copy() if copy else adata
if source == "cellphonedb":
cpdb = pd.read_csv(db_filepath, sep=sep)
diff --git a/stlearn/adds/add_mask.py b/stlearn/adds/add_mask.py
index 6608e00f..d25a488c 100644
--- a/stlearn/adds/add_mask.py
+++ b/stlearn/adds/add_mask.py
@@ -1,19 +1,18 @@
+import os
from pathlib import Path
+
import matplotlib
-from matplotlib import pyplot as plt
import numpy as np
-from typing import Optional, Union
from anndata import AnnData
-import os
-from stlearn._compat import Literal
+from matplotlib import pyplot as plt
def add_mask(
adata: AnnData,
- imgpath: Union[Path, str],
+ imgpath: Path | str,
key: str = "mask",
copy: bool = False,
-) -> Optional[AnnData]:
+) -> AnnData | None:
"""\
Adding binary mask image to the Anndata object
@@ -33,12 +32,14 @@ def add_mask(
**mask_image** : `adata.uns` field
Array format of image, saving by Pillow package.
"""
+ adata = adata.copy() if copy else adata
+
try:
library_id = list(adata.uns["spatial"].keys())[0]
quality = adata.uns["spatial"][library_id]["use_quality"]
except:
raise KeyError(
- f"""\
+ """\
Please read ST data first and try again
"""
)
@@ -59,8 +60,6 @@ def add_mask(
adata.uns["mask_image"][library_id][key][quality] = img
print("Added tissue mask to the object!")
-
- return adata if copy else None
except:
raise ValueError(
f"""\
@@ -78,11 +77,11 @@ def add_mask(
def apply_mask(
adata: AnnData,
- masks: Optional[list] = "all",
+ masks: list | str = "all",
select: str = "black",
- cmap: str = "default",
+ cmap_name: str = "default",
copy: bool = False,
-) -> Optional[AnnData]:
+) -> AnnData | None:
"""\
Parsing the old spaital transcriptomics data
@@ -106,19 +105,22 @@ def apply_mask(
Array format of image, saving by Pillow package.
"""
from scanpy.plotting import palettes
- from stlearn.plotting import palettes_st
- if cmap == "vega_10_scanpy":
+ from stlearn.pl import palettes_st
+
+ adata = adata.copy() if copy else adata
+
+ if cmap_name == "vega_10_scanpy":
cmap = palettes.vega_10_scanpy
- elif cmap == "vega_20_scanpy":
+ elif cmap_name == "vega_20_scanpy":
cmap = palettes.vega_20_scanpy
- elif cmap == "default_102":
+ elif cmap_name == "default_102":
cmap = palettes.default_102
- elif cmap == "default_28":
+ elif cmap_name == "default_28":
cmap = palettes.default_28
- elif cmap == "jana_40":
+ elif cmap_name == "jana_40":
cmap = palettes_st.jana_40
- elif cmap == "default":
+ elif cmap_name == "default":
cmap = palettes_st.default
else:
raise ValueError(
@@ -126,7 +128,6 @@ def apply_mask(
)
cmaps = matplotlib.colors.LinearSegmentedColormap.from_list("", cmap)
-
cmap_ = plt.cm.get_cmap(cmaps)
try:
@@ -134,7 +135,7 @@ def apply_mask(
quality = adata.uns["spatial"][library_id]["use_quality"]
except:
raise KeyError(
- f"""\
+ """\
Please read ST data first and try again
"""
)
@@ -163,16 +164,18 @@ def apply_mask(
mask_image = np.where(mask_image > 155, 0, 1)
else:
raise ValueError(
- f"""\
+ """\
Only support black and white mask yet.
"""
)
mask_image_2d = mask_image.mean(axis=2)
- apply_spot_mask = (
- lambda x: [i, mask]
- if mask_image_2d[int(x["imagerow"]), int(x["imagecol"])] == 1
- else [x[key + "_code"], x[key]]
- )
+
+ def apply_spot_mask(x):
+ if mask_image_2d[int(x["imagerow"]), int(x["imagecol"])] == 1:
+ return [i, mask]
+ else:
+ return [x[key + "_code"], x[key]]
+
spot_mask_df = adata.obs.apply(apply_spot_mask, axis=1, result_type="expand")
adata.obs[key + "_code"] = spot_mask_df[0]
adata.obs[key] = spot_mask_df[1]
diff --git a/stlearn/adds/add_positions.py b/stlearn/adds/add_positions.py
index 52872384..7b4c3cb7 100644
--- a/stlearn/adds/add_positions.py
+++ b/stlearn/adds/add_positions.py
@@ -1,18 +1,16 @@
-from typing import Optional, Union
-from anndata import AnnData
-import pandas as pd
from pathlib import Path
-import os
+
+import pandas as pd
+from anndata import AnnData
def positions(
adata: AnnData,
- position_filepath: Union[Path, str] = None,
- scale_filepath: Union[Path, str] = None,
+ position_filepath: Path | str,
+ scale_filepath: Path | str,
quality: str = "low",
copy: bool = False,
-) -> Optional[AnnData]:
-
+) -> AnnData | None:
"""\
Adding spatial information into the Anndata object
@@ -35,6 +33,8 @@ def positions(
Spatial information of the tissue image.
"""
+ adata = adata.copy() if copy else adata
+
tissue_positions = pd.read_csv(position_filepath, header=None)
tissue_positions.columns = [
"barcode",
diff --git a/stlearn/adds/annotation.py b/stlearn/adds/annotation.py
index a8bc1ac9..8f5df9db 100644
--- a/stlearn/adds/annotation.py
+++ b/stlearn/adds/annotation.py
@@ -1,16 +1,12 @@
-from typing import Optional, Union, List
from anndata import AnnData
-from matplotlib import pyplot as plt
-from pathlib import Path
-import os
def annotation(
adata: AnnData,
- label_list: List[str],
+ label_list: list[str],
use_label: str = "louvain",
copy: bool = False,
-) -> Optional[AnnData]:
+) -> AnnData | None:
"""\
Adding annotation for cluster
@@ -30,10 +26,11 @@ def annotation(
**[cluster method name]_anno** : `adata.obs` field
The annotation of cluster results.
"""
-
if label_list is None:
raise ValueError("Please give the label list!")
+ adata = adata.copy() if copy else adata
+
if len(label_list) != len(adata.obs[use_label].unique()):
raise ValueError("Please give the correct number of label list!")
diff --git a/stlearn/adds/parsing.py b/stlearn/adds/parsing.py
index d92932cc..0ae6a9f0 100644
--- a/stlearn/adds/parsing.py
+++ b/stlearn/adds/parsing.py
@@ -1,18 +1,14 @@
-from typing import Optional, Union
-from anndata import AnnData
-from matplotlib import pyplot as plt
-from pathlib import Path
-import os
-import sys
+from os import PathLike
+
import numpy as np
+from anndata import AnnData
def parsing(
adata: AnnData,
- coordinates_file: Union[Path, str],
+ coordinates_file: int | str | bytes | PathLike[str] | PathLike[bytes],
copy: bool = True,
-) -> Optional[AnnData]:
-
+) -> AnnData | None:
"""\
Parsing the old spaital transcriptomics data
@@ -33,7 +29,7 @@ def parsing(
# Get a map of the new coordinates
new_coordinates = dict()
- with open(coordinates_file, "r") as filehandler:
+ with open(coordinates_file) as filehandler:
for line in filehandler.readlines():
tokens = line.split()
assert len(tokens) >= 6 or len(tokens) == 4
@@ -52,6 +48,8 @@ def parsing(
"the coordinates file only contains 4 columns\n"
)
+ adata = adata.copy() if copy else adata
+
counts_table = adata.to_df()
new_index_values = list()
@@ -66,7 +64,7 @@ def parsing(
imgcol.append(new_x)
imgrow.append(new_y)
- new_index_values.append("{0}x{1}".format(new_x, new_y))
+ new_index_values.append(f"{new_x}x{new_y}")
except KeyError:
counts_table.drop(index, inplace=True)
@@ -80,7 +78,6 @@ def parsing(
adata.obs["imagecol"] = imgcol
adata.obs["imagerow"] = imgrow
-
adata.obsm["spatial"] = np.c_[[imgcol, imgrow]].reshape(-1, 2)
return adata if copy else None
diff --git a/stlearn/app/app.py b/stlearn/app/app.py
index 6eb6a5dc..d1e914f3 100644
--- a/stlearn/app/app.py
+++ b/stlearn/app/app.py
@@ -1,53 +1,36 @@
-import os, sys, subprocess
+import os
+import sys
+from threading import Thread
sys.path.append(os.path.dirname(__file__))
-try:
- import flask
-except ImportError:
- subprocess.call(
- "pip install -r " + os.path.dirname(__file__) + "//requirements.txt", shell=True
- )
+import asyncio
+import tempfile
+import numpy
+import numpy as np
+import scanpy
+from bokeh.application import Application
+from bokeh.application.handlers import FunctionHandler
+from bokeh.embed import server_document
+from bokeh.layouts import row
+from bokeh.server.server import Server
from flask import (
Flask,
- render_template,
- request,
flash,
- url_for,
redirect,
- session,
+ render_template,
+ request,
send_file,
+ url_for,
)
-from bokeh.embed import components
-from bokeh.plotting import figure
-from bokeh.resources import INLINE
+from tornado.ioloop import IOLoop
from werkzeug.utils import secure_filename
-import tempfile
-import traceback
-
-import tempfile
-import shutil
import stlearn
-import scanpy
-import numpy
-import numpy as np
-
-import asyncio
-from bokeh.server.server import BaseServer
-from bokeh.server.tornado import BokehTornado
-from tornado.httpserver import HTTPServer
-from tornado.ioloop import IOLoop
-from bokeh.application import Application
-from bokeh.application.handlers import FunctionHandler
-from bokeh.server.server import Server
-from bokeh.embed import server_document
-
-from bokeh.layouts import column, row
# Functions related to processing the forms.
-from source.forms import views # for changing data in response to input
+from stlearn.app.source.forms import views # for changing data in response to input
# Global variables.
@@ -171,7 +154,6 @@ def folder_uploader():
uploaded = []
i = 0
for file in files:
-
filename = secure_filename(file.filename)
if allow_files[0] in filename:
@@ -243,7 +225,6 @@ def folder_uploader():
@app.route("/file_uploader", methods=["GET", "POST"])
def file_uploader():
if request.method == "POST":
-
global adata, step_log
# Clean uploads folder before upload a new data
@@ -385,7 +366,7 @@ def save_adata():
def modify_doc_gene_plot(doc):
- from stlearn.plotting.classes_bokeh import BokehGenePlot
+ from stlearn.pl.classes_bokeh import BokehGenePlot
gp_object = BokehGenePlot(adata)
doc.add_root(row(gp_object.layout, width=800))
@@ -402,7 +383,7 @@ def modify_doc_gene_plot(doc):
def modify_doc_cluster_plot(doc):
- from stlearn.plotting.classes_bokeh import BokehClusterPlot
+ from stlearn.pl.classes_bokeh import BokehClusterPlot
gp_object = BokehClusterPlot(adata)
doc.add_root(row(gp_object.layout, width=800))
@@ -423,7 +404,7 @@ def modify_doc_cluster_plot(doc):
def modify_doc_spatial_cci_plot(doc):
- from stlearn.plotting.classes_bokeh import BokehSpatialCciPlot
+ from stlearn.pl.classes_bokeh import BokehSpatialCciPlot
gp_object = BokehSpatialCciPlot(adata)
doc.add_root(row(gp_object.layout, width=800))
@@ -439,7 +420,7 @@ def modify_doc_spatial_cci_plot(doc):
def modify_doc_lr_plot(doc):
- from stlearn.plotting.classes_bokeh import BokehLRPlot
+ from stlearn.pl.classes_bokeh import BokehLRPlot
gp_object = BokehLRPlot(adata)
doc.add_root(row(gp_object.layout, width=800))
@@ -453,7 +434,7 @@ def modify_doc_lr_plot(doc):
def modify_doc_annotate_plot(doc):
- from stlearn.plotting.classes_bokeh import Annotate
+ from stlearn.pl.classes_bokeh import Annotate
gp_object = Annotate(adata)
doc.add_root(row(gp_object.layout, width=800))
@@ -491,12 +472,10 @@ def bk_worker():
"/bokeh_annotate_plot": bkapp4,
},
io_loop=IOLoop(),
- allow_websocket_origin=["127.0.0.1:5000", "localhost:5000"],
+ allow_websocket_origin=["127.0.0.1:3000", "localhost:3000"],
)
server.start()
server.io_loop.start()
-from threading import Thread
-
Thread(target=bk_worker).start()
diff --git a/stlearn/app/cli.py b/stlearn/app/cli.py
index 20154df4..78bfe02b 100644
--- a/stlearn/app/cli.py
+++ b/stlearn/app/cli.py
@@ -1,7 +1,8 @@
+import errno
+
import click
-from .. import __version__
-import os
+from .. import __version__
@click.group(
@@ -18,7 +19,6 @@
help="Show the software version and exit.",
)
def main():
- os._exit
click.echo("Please run `stlearn launch` to start the web app")
@@ -27,10 +27,14 @@ def launch():
from .app import app
try:
- app.run(host="0.0.0.0", port=5000, debug=True, use_reloader=False)
+ app.run(host="0.0.0.0", port=3000, debug=True, use_reloader=False)
except OSError as e:
if e.errno == errno.EADDRINUSE:
raise click.ClickException(
"Port is in use, please specify an open port using the --port flag."
) from e
raise
+
+
+if __name__ == "__main__":
+ main()
diff --git a/stlearn/app/source/forms/form_validators.py b/stlearn/app/source/forms/form_validators.py
index 3a82f887..4a279164 100644
--- a/stlearn/app/source/forms/form_validators.py
+++ b/stlearn/app/source/forms/form_validators.py
@@ -1,16 +1,15 @@
-""" Contains different kinds of form validators.
-"""
+"""Contains different kinds of form validators."""
+
from wtforms.validators import ValidationError
-class CheckNumberRange(object):
+class CheckNumberRange:
def __init__(self, lower, upper, hint=""):
self.lower = lower
self.upper = upper
self.hint = hint
def __call__(self, form, field):
-
if field.data is not None:
if not (self.lower <= float(field.data) <= self.upper):
if self.hint:
diff --git a/stlearn/app/source/forms/forms.py b/stlearn/app/source/forms/forms.py
index 0eef6b1d..466c1da1 100644
--- a/stlearn/app/source/forms/forms.py
+++ b/stlearn/app/source/forms/forms.py
@@ -1,64 +1,63 @@
"""Purpose of this script is to create general forms that are programmable with
- particular input. Will impliment forms for subsetting the data and
- visualisation options in a general way so can be used with any
- SingleCellAnalysis dataset.
+particular input. Will impliment forms for subsetting the data and
+visualisation options in a general way so can be used with any
+SingleCellAnalysis dataset.
"""
-import sys
+import wtforms
from flask_wtf import FlaskForm
# from flask_wtf.file import FileField
-from wtforms import SelectMultipleField, SelectField
-import wtforms
+from wtforms import SelectField, SelectMultipleField
def createSuperForm(elements, element_fields, element_values, validators=None):
""" Creates a general form; goal is to create a fully programmable form \
- that essentially governs all the options the user will select.
+ that essentially governs all the options the user will select.
- Args:
- elements (list): Element names to be rendered on the page, in \
- order of how they will appear on the page.
+ Args:
+ elements (list): Element names to be rendered on the page, in \
+ order of how they will appear on the page.
- element_fields (list): The names of the fields to be rendered. \
- Each field is in same order as 'elements'. \
- Currently supported are: \
- 'Title', 'SelectMultipleField', 'SelectField', \
- 'StringField', 'Text', 'List'.
+ element_fields (list): The names of the fields to be rendered. \
+ Each field is in same order as 'elements'. \
+ Currently supported are: \
+ 'Title', 'SelectMultipleField', 'SelectField', \
+ 'StringField', 'Text', 'List'.
- element_values (list