From c7343a36aa53f5cb1d5d5d3871bed7c8788988fb Mon Sep 17 00:00:00 2001 From: Ian Beck Date: Sun, 24 Aug 2025 11:29:45 -0700 Subject: [PATCH 01/15] Modified poetry makefile target to run arbitrary commands --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index be76863..1942262 100644 --- a/Makefile +++ b/Makefile @@ -48,8 +48,8 @@ migrate: clean-api ## Run database migrations; or specify a revision: `make migrate-new: clean-api ## Autogenerate a new database migration: `make migrate-new ARGS='Description here'` $(DOCKER_COMPOSE) run --rm -u root -w /code --entrypoint alembic api revision --autogenerate -m "$(ARGS)" -poetry-add: clean-api ## Add a poetry dependency: `make poetry-add ARGS='pytest --group dev'` - $(DOCKER_COMPOSE) run --rm -e STANDALONE=true --no-deps -u root -w /code --entrypoint poetry api add $(ARGS) +poetry-%: clean-api ## Run arbitrary poetry actions with support for optional ARGS; e.g. `make poetry-lock` + $(DOCKER_COMPOSE) run --rm -e STANDALONE=true --no-deps -u root -w /code --entrypoint poetry api $* $(ARGS) # This ensures that even if they pass in an empty value, we default to parsing the "api" folder ifndef FILEPATH From 2a7e27b58834fe590286ce907214a04f0b255e2b Mon Sep 17 00:00:00 2001 From: Ian Beck Date: Sun, 24 Aug 2025 11:30:29 -0700 Subject: [PATCH 02/15] Upgraded to SQLAlchemy 1.4 Closes #91. --- api/db.py | 4 -- poetry.lock | 181 +++++++++++++++++++++++++++++++++++++------------ pyproject.toml | 2 +- 3 files changed, 137 insertions(+), 50 deletions(-) diff --git a/api/db.py b/api/db.py index 48c0935..5982076 100644 --- a/api/db.py +++ b/api/db.py @@ -8,7 +8,6 @@ class SomeModel(db.AlchemyBase): from sqlalchemy import ( BigInteger, - Binary, Boolean, Column, Date, @@ -73,7 +72,6 @@ class SomeModel(db.AlchemyBase): within_group, ) from sqlalchemy.dialects.postgresql import JSONB, TIMESTAMP, UUID -from sqlalchemy.engine import RowProxy from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.orm import ( @@ -106,7 +104,6 @@ class SomeModel(db.AlchemyBase): BigInteger, Integer, SmallInteger, - Binary, LargeBinary, Boolean, Date, @@ -171,7 +168,6 @@ class SomeModel(db.AlchemyBase): Table, UniqueConstraint, Query, - RowProxy, hybrid_property, # ORM flag_modified, diff --git a/poetry.lock b/poetry.lock index 72578d0..0877a81 100644 --- a/poetry.lock +++ b/poetry.lock @@ -565,6 +565,75 @@ files = [ [package.dependencies] python-dateutil = ">=2.7" +[[package]] +name = "greenlet" +version = "3.2.4" +description = "Lightweight in-process concurrent programming" +optional = false +python-versions = ">=3.9" +groups = ["main", "dev"] +markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\"" +files = [ + {file = "greenlet-3.2.4-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:8c68325b0d0acf8d91dde4e6f930967dd52a5302cd4062932a6b2e7c2969f47c"}, + {file = "greenlet-3.2.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:94385f101946790ae13da500603491f04a76b6e4c059dab271b3ce2e283b2590"}, + {file = "greenlet-3.2.4-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f10fd42b5ee276335863712fa3da6608e93f70629c631bf77145021600abc23c"}, + {file = "greenlet-3.2.4-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:c8c9e331e58180d0d83c5b7999255721b725913ff6bc6cf39fa2a45841a4fd4b"}, + {file = "greenlet-3.2.4-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:58b97143c9cc7b86fc458f215bd0932f1757ce649e05b640fea2e79b54cedb31"}, + {file = "greenlet-3.2.4-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c2ca18a03a8cfb5b25bc1cbe20f3d9a4c80d8c3b13ba3df49ac3961af0b1018d"}, + {file = "greenlet-3.2.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9fe0a28a7b952a21e2c062cd5756d34354117796c6d9215a87f55e38d15402c5"}, + {file = "greenlet-3.2.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8854167e06950ca75b898b104b63cc646573aa5fef1353d4508ecdd1ee76254f"}, + {file = "greenlet-3.2.4-cp310-cp310-win_amd64.whl", hash = "sha256:73f49b5368b5359d04e18d15828eecc1806033db5233397748f4ca813ff1056c"}, + {file = "greenlet-3.2.4-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:96378df1de302bc38e99c3a9aa311967b7dc80ced1dcc6f171e99842987882a2"}, + {file = "greenlet-3.2.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1ee8fae0519a337f2329cb78bd7a8e128ec0f881073d43f023c7b8d4831d5246"}, + {file = "greenlet-3.2.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:94abf90142c2a18151632371140b3dba4dee031633fe614cb592dbb6c9e17bc3"}, + {file = "greenlet-3.2.4-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:4d1378601b85e2e5171b99be8d2dc85f594c79967599328f95c1dc1a40f1c633"}, + {file = "greenlet-3.2.4-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0db5594dce18db94f7d1650d7489909b57afde4c580806b8d9203b6e79cdc079"}, + {file = "greenlet-3.2.4-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2523e5246274f54fdadbce8494458a2ebdcdbc7b802318466ac5606d3cded1f8"}, + {file = "greenlet-3.2.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1987de92fec508535687fb807a5cea1560f6196285a4cde35c100b8cd632cc52"}, + {file = "greenlet-3.2.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:55e9c5affaa6775e2c6b67659f3a71684de4c549b3dd9afca3bc773533d284fa"}, + {file = "greenlet-3.2.4-cp311-cp311-win_amd64.whl", hash = "sha256:9c40adce87eaa9ddb593ccb0fa6a07caf34015a29bf8d344811665b573138db9"}, + {file = "greenlet-3.2.4-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:3b67ca49f54cede0186854a008109d6ee71f66bd57bb36abd6d0a0267b540cdd"}, + {file = "greenlet-3.2.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ddf9164e7a5b08e9d22511526865780a576f19ddd00d62f8a665949327fde8bb"}, + {file = "greenlet-3.2.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f28588772bb5fb869a8eb331374ec06f24a83a9c25bfa1f38b6993afe9c1e968"}, + {file = "greenlet-3.2.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:5c9320971821a7cb77cfab8d956fa8e39cd07ca44b6070db358ceb7f8797c8c9"}, + {file = "greenlet-3.2.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c60a6d84229b271d44b70fb6e5fa23781abb5d742af7b808ae3f6efd7c9c60f6"}, + {file = "greenlet-3.2.4-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3b3812d8d0c9579967815af437d96623f45c0f2ae5f04e366de62a12d83a8fb0"}, + {file = "greenlet-3.2.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:abbf57b5a870d30c4675928c37278493044d7c14378350b3aa5d484fa65575f0"}, + {file = "greenlet-3.2.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:20fb936b4652b6e307b8f347665e2c615540d4b42b3b4c8a321d8286da7e520f"}, + {file = "greenlet-3.2.4-cp312-cp312-win_amd64.whl", hash = "sha256:a7d4e128405eea3814a12cc2605e0e6aedb4035bf32697f72deca74de4105e02"}, + {file = "greenlet-3.2.4-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:1a921e542453fe531144e91e1feedf12e07351b1cf6c9e8a3325ea600a715a31"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd3c8e693bff0fff6ba55f140bf390fa92c994083f838fece0f63be121334945"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:710638eb93b1fa52823aa91bf75326f9ecdfd5e0466f00789246a5280f4ba0fc"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:c5111ccdc9c88f423426df3fd1811bfc40ed66264d35aa373420a34377efc98a"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d76383238584e9711e20ebe14db6c88ddcedc1829a9ad31a584389463b5aa504"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:23768528f2911bcd7e475210822ffb5254ed10d71f4028387e5a99b4c6699671"}, + {file = "greenlet-3.2.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:00fadb3fedccc447f517ee0d3fd8fe49eae949e1cd0f6a611818f4f6fb7dc83b"}, + {file = "greenlet-3.2.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:d25c5091190f2dc0eaa3f950252122edbbadbb682aa7b1ef2f8af0f8c0afefae"}, + {file = "greenlet-3.2.4-cp313-cp313-win_amd64.whl", hash = "sha256:554b03b6e73aaabec3745364d6239e9e012d64c68ccd0b8430c64ccc14939a8b"}, + {file = "greenlet-3.2.4-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:49a30d5fda2507ae77be16479bdb62a660fa51b1eb4928b524975b3bde77b3c0"}, + {file = "greenlet-3.2.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:299fd615cd8fc86267b47597123e3f43ad79c9d8a22bebdce535e53550763e2f"}, + {file = "greenlet-3.2.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:c17b6b34111ea72fc5a4e4beec9711d2226285f0386ea83477cbb97c30a3f3a5"}, + {file = "greenlet-3.2.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b4a1870c51720687af7fa3e7cda6d08d801dae660f75a76f3845b642b4da6ee1"}, + {file = "greenlet-3.2.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:061dc4cf2c34852b052a8620d40f36324554bc192be474b9e9770e8c042fd735"}, + {file = "greenlet-3.2.4-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:44358b9bf66c8576a9f57a590d5f5d6e72fa4228b763d0e43fee6d3b06d3a337"}, + {file = "greenlet-3.2.4-cp314-cp314-win_amd64.whl", hash = "sha256:e37ab26028f12dbb0ff65f29a8d3d44a765c61e729647bf2ddfbbed621726f01"}, + {file = "greenlet-3.2.4-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:b6a7c19cf0d2742d0809a4c05975db036fdff50cd294a93632d6a310bf9ac02c"}, + {file = "greenlet-3.2.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:27890167f55d2387576d1f41d9487ef171849ea0359ce1510ca6e06c8bece11d"}, + {file = "greenlet-3.2.4-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:18d9260df2b5fbf41ae5139e1be4e796d99655f023a636cd0e11e6406cca7d58"}, + {file = "greenlet-3.2.4-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:671df96c1f23c4a0d4077a325483c1503c96a1b7d9db26592ae770daa41233d4"}, + {file = "greenlet-3.2.4-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:16458c245a38991aa19676900d48bd1a6f2ce3e16595051a4db9d012154e8433"}, + {file = "greenlet-3.2.4-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c9913f1a30e4526f432991f89ae263459b1c64d1608c0d22a5c79c287b3c70df"}, + {file = "greenlet-3.2.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b90654e092f928f110e0007f572007c9727b5265f7632c2fa7415b4689351594"}, + {file = "greenlet-3.2.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:81701fd84f26330f0d5f4944d4e92e61afe6319dcd9775e39396e39d7c3e5f98"}, + {file = "greenlet-3.2.4-cp39-cp39-win32.whl", hash = "sha256:65458b409c1ed459ea899e939f0e1cdb14f58dbc803f2f93c5eab5694d32671b"}, + {file = "greenlet-3.2.4-cp39-cp39-win_amd64.whl", hash = "sha256:d2e685ade4dafd447ede19c31277a224a239a0a1a4eca4e6390efedf20260cfb"}, + {file = "greenlet-3.2.4.tar.gz", hash = "sha256:0dca0d95ff849f9a364385f36ab49f50065d76964944638be9691e1832e9f86d"}, +] + +[package.extras] +docs = ["Sphinx", "furo"] +test = ["objgraph", "psutil", "setuptools"] + [[package]] name = "gunicorn" version = "23.0.0" @@ -1152,7 +1221,7 @@ astroid = ">=3.3.8,<=3.4.0.dev0" colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} dill = [ {version = ">=0.3.7", markers = "python_version >= \"3.12\""}, - {version = ">=0.3.6", markers = "python_version >= \"3.11\""}, + {version = ">=0.3.6", markers = "python_version == \"3.11\""}, ] isort = ">=4.2.5,<5.13 || >5.13,<7" mccabe = ">=0.6,<0.8" @@ -1357,7 +1426,7 @@ ecdsa = ">=0.19.1,<1" python-http-client = ">=3.2.1" werkzeug = [ {version = ">=2.3.5", markers = "python_version >= \"3.12\""}, - {version = ">=2.2.0", markers = "python_version == \"3.11\""}, + {version = ">=2.2.0", markers = "python_version >= \"3.11\""}, ] [[package]] @@ -1386,59 +1455,81 @@ files = [ [[package]] name = "sqlalchemy" -version = "1.3.24" +version = "1.4.54" description = "Database Abstraction Library" optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" groups = ["main", "dev"] files = [ - {file = "SQLAlchemy-1.3.24-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:87a2725ad7d41cd7376373c15fd8bf674e9c33ca56d0b8036add2d634dba372e"}, - {file = "SQLAlchemy-1.3.24-cp27-cp27m-win32.whl", hash = "sha256:f597a243b8550a3a0b15122b14e49d8a7e622ba1c9d29776af741f1845478d79"}, - {file = "SQLAlchemy-1.3.24-cp27-cp27m-win_amd64.whl", hash = "sha256:fc4cddb0b474b12ed7bdce6be1b9edc65352e8ce66bc10ff8cbbfb3d4047dbf4"}, - {file = "SQLAlchemy-1.3.24-cp35-cp35m-macosx_10_14_x86_64.whl", hash = "sha256:f1149d6e5c49d069163e58a3196865e4321bad1803d7886e07d8710de392c548"}, - {file = "SQLAlchemy-1.3.24-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:14f0eb5db872c231b20c18b1e5806352723a3a89fb4254af3b3e14f22eaaec75"}, - {file = "SQLAlchemy-1.3.24-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:e98d09f487267f1e8d1179bf3b9d7709b30a916491997137dd24d6ae44d18d79"}, - {file = "SQLAlchemy-1.3.24-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:fc1f2a5a5963e2e73bac4926bdaf7790c4d7d77e8fc0590817880e22dd9d0b8b"}, - {file = "SQLAlchemy-1.3.24-cp35-cp35m-win32.whl", hash = "sha256:f3c5c52f7cb8b84bfaaf22d82cb9e6e9a8297f7c2ed14d806a0f5e4d22e83fb7"}, - {file = "SQLAlchemy-1.3.24-cp35-cp35m-win_amd64.whl", hash = "sha256:0352db1befcbed2f9282e72843f1963860bf0e0472a4fa5cf8ee084318e0e6ab"}, - {file = "SQLAlchemy-1.3.24-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:2ed6343b625b16bcb63c5b10523fd15ed8934e1ed0f772c534985e9f5e73d894"}, - {file = "SQLAlchemy-1.3.24-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:34fcec18f6e4b24b4a5f6185205a04f1eab1e56f8f1d028a2a03694ebcc2ddd4"}, - {file = "SQLAlchemy-1.3.24-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:e47e257ba5934550d7235665eee6c911dc7178419b614ba9e1fbb1ce6325b14f"}, - {file = "SQLAlchemy-1.3.24-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:816de75418ea0953b5eb7b8a74933ee5a46719491cd2b16f718afc4b291a9658"}, - {file = "SQLAlchemy-1.3.24-cp36-cp36m-win32.whl", hash = "sha256:26155ea7a243cbf23287f390dba13d7927ffa1586d3208e0e8d615d0c506f996"}, - {file = "SQLAlchemy-1.3.24-cp36-cp36m-win_amd64.whl", hash = "sha256:f03bd97650d2e42710fbe4cf8a59fae657f191df851fc9fc683ecef10746a375"}, - {file = "SQLAlchemy-1.3.24-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:a006d05d9aa052657ee3e4dc92544faae5fcbaafc6128217310945610d862d39"}, - {file = "SQLAlchemy-1.3.24-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:1e2f89d2e5e3c7a88e25a3b0e43626dba8db2aa700253023b82e630d12b37109"}, - {file = "SQLAlchemy-1.3.24-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:0d5d862b1cfbec5028ce1ecac06a3b42bc7703eb80e4b53fceb2738724311443"}, - {file = "SQLAlchemy-1.3.24-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:0172423a27fbcae3751ef016663b72e1a516777de324a76e30efa170dbd3dd2d"}, - {file = "SQLAlchemy-1.3.24-cp37-cp37m-win32.whl", hash = "sha256:d37843fb8df90376e9e91336724d78a32b988d3d20ab6656da4eb8ee3a45b63c"}, - {file = "SQLAlchemy-1.3.24-cp37-cp37m-win_amd64.whl", hash = "sha256:c10ff6112d119f82b1618b6dc28126798481b9355d8748b64b9b55051eb4f01b"}, - {file = "SQLAlchemy-1.3.24-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:861e459b0e97673af6cc5e7f597035c2e3acdfb2608132665406cded25ba64c7"}, - {file = "SQLAlchemy-1.3.24-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:5de2464c254380d8a6c20a2746614d5a436260be1507491442cf1088e59430d2"}, - {file = "SQLAlchemy-1.3.24-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:d375d8ccd3cebae8d90270f7aa8532fe05908f79e78ae489068f3b4eee5994e8"}, - {file = "SQLAlchemy-1.3.24-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:014ea143572fee1c18322b7908140ad23b3994036ef4c0d630110faf942652f8"}, - {file = "SQLAlchemy-1.3.24-cp38-cp38-win32.whl", hash = "sha256:6607ae6cd3a07f8a4c3198ffbf256c261661965742e2b5265a77cd5c679c9bba"}, - {file = "SQLAlchemy-1.3.24-cp38-cp38-win_amd64.whl", hash = "sha256:fcb251305fa24a490b6a9ee2180e5f8252915fb778d3dafc70f9cc3f863827b9"}, - {file = "SQLAlchemy-1.3.24-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:01aa5f803db724447c1d423ed583e42bf5264c597fd55e4add4301f163b0be48"}, - {file = "SQLAlchemy-1.3.24-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:4d0e3515ef98aa4f0dc289ff2eebb0ece6260bbf37c2ea2022aad63797eacf60"}, - {file = "SQLAlchemy-1.3.24-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:bce28277f308db43a6b4965734366f533b3ff009571ec7ffa583cb77539b84d6"}, - {file = "SQLAlchemy-1.3.24-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:8110e6c414d3efc574543109ee618fe2c1f96fa31833a1ff36cc34e968c4f233"}, - {file = "SQLAlchemy-1.3.24-cp39-cp39-win32.whl", hash = "sha256:ee5f5188edb20a29c1cc4a039b074fdc5575337c9a68f3063449ab47757bb064"}, - {file = "SQLAlchemy-1.3.24-cp39-cp39-win_amd64.whl", hash = "sha256:09083c2487ca3c0865dc588e07aeaa25416da3d95f7482c07e92f47e080aa17b"}, - {file = "SQLAlchemy-1.3.24.tar.gz", hash = "sha256:ebbb777cbf9312359b897bf81ba00dae0f5cb69fba2a18265dcc18a6f5ef7519"}, + {file = "SQLAlchemy-1.4.54-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:af00236fe21c4d4f4c227b6ccc19b44c594160cc3ff28d104cdce85855369277"}, + {file = "SQLAlchemy-1.4.54-cp310-cp310-manylinux1_x86_64.manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_5_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1183599e25fa38a1a322294b949da02b4f0da13dbc2688ef9dbe746df573f8a6"}, + {file = "SQLAlchemy-1.4.54-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1990d5a6a5dc358a0894c8ca02043fb9a5ad9538422001fb2826e91c50f1d539"}, + {file = "SQLAlchemy-1.4.54-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:14b3f4783275339170984cadda66e3ec011cce87b405968dc8d51cf0f9997b0d"}, + {file = "SQLAlchemy-1.4.54-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b24364150738ce488333b3fb48bfa14c189a66de41cd632796fbcacb26b4585"}, + {file = "SQLAlchemy-1.4.54-cp310-cp310-win32.whl", hash = "sha256:a8a72259a1652f192c68377be7011eac3c463e9892ef2948828c7d58e4829988"}, + {file = "SQLAlchemy-1.4.54-cp310-cp310-win_amd64.whl", hash = "sha256:b67589f7955924865344e6eacfdcf70675e64f36800a576aa5e961f0008cde2a"}, + {file = "SQLAlchemy-1.4.54-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b05e0626ec1c391432eabb47a8abd3bf199fb74bfde7cc44a26d2b1b352c2c6e"}, + {file = "SQLAlchemy-1.4.54-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:13e91d6892b5fcb94a36ba061fb7a1f03d0185ed9d8a77c84ba389e5bb05e936"}, + {file = "SQLAlchemy-1.4.54-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb59a11689ff3c58e7652260127f9e34f7f45478a2f3ef831ab6db7bcd72108f"}, + {file = "SQLAlchemy-1.4.54-cp311-cp311-win32.whl", hash = "sha256:1390ca2d301a2708fd4425c6d75528d22f26b8f5cbc9faba1ddca136671432bc"}, + {file = "SQLAlchemy-1.4.54-cp311-cp311-win_amd64.whl", hash = "sha256:2b37931eac4b837c45e2522066bda221ac6d80e78922fb77c75eb12e4dbcdee5"}, + {file = "SQLAlchemy-1.4.54-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:3f01c2629a7d6b30d8afe0326b8c649b74825a0e1ebdcb01e8ffd1c920deb07d"}, + {file = "SQLAlchemy-1.4.54-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c24dd161c06992ed16c5e528a75878edbaeced5660c3db88c820f1f0d3fe1f4"}, + {file = "SQLAlchemy-1.4.54-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b5e0d47d619c739bdc636bbe007da4519fc953393304a5943e0b5aec96c9877c"}, + {file = "SQLAlchemy-1.4.54-cp312-cp312-win32.whl", hash = "sha256:12bc0141b245918b80d9d17eca94663dbd3f5266ac77a0be60750f36102bbb0f"}, + {file = "SQLAlchemy-1.4.54-cp312-cp312-win_amd64.whl", hash = "sha256:f941aaf15f47f316123e1933f9ea91a6efda73a161a6ab6046d1cde37be62c88"}, + {file = "SQLAlchemy-1.4.54-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:a41611835010ed4ea4c7aed1da5b58aac78ee7e70932a91ed2705a7b38e40f52"}, + {file = "SQLAlchemy-1.4.54-cp36-cp36m-manylinux1_x86_64.manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_5_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e8c1b9ecaf9f2590337d5622189aeb2f0dbc54ba0232fa0856cf390957584a9"}, + {file = "SQLAlchemy-1.4.54-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0de620f978ca273ce027769dc8db7e6ee72631796187adc8471b3c76091b809e"}, + {file = "SQLAlchemy-1.4.54-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c5a2530400a6e7e68fd1552a55515de6a4559122e495f73554a51cedafc11669"}, + {file = "SQLAlchemy-1.4.54-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0cf7076c8578b3de4e43a046cc7a1af8466e1c3f5e64167189fe8958a4f9c02"}, + {file = "SQLAlchemy-1.4.54-cp37-cp37m-macosx_11_0_x86_64.whl", hash = "sha256:f1e1b92ee4ee9ffc68624ace218b89ca5ca667607ccee4541a90cc44999b9aea"}, + {file = "SQLAlchemy-1.4.54-cp37-cp37m-manylinux1_x86_64.manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_5_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41cffc63c7c83dfc30c4cab5b4308ba74440a9633c4509c51a0c52431fb0f8ab"}, + {file = "SQLAlchemy-1.4.54-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5933c45d11cbd9694b1540aa9076816cc7406964c7b16a380fd84d3a5fe3241"}, + {file = "SQLAlchemy-1.4.54-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cafe0ba3a96d0845121433cffa2b9232844a2609fce694fcc02f3f31214ece28"}, + {file = "SQLAlchemy-1.4.54-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a19f816f4702d7b1951d7576026c7124b9bfb64a9543e571774cf517b7a50b29"}, + {file = "SQLAlchemy-1.4.54-cp37-cp37m-win32.whl", hash = "sha256:76c2ba7b5a09863d0a8166fbc753af96d561818c572dbaf697c52095938e7be4"}, + {file = "SQLAlchemy-1.4.54-cp37-cp37m-win_amd64.whl", hash = "sha256:a86b0e4be775902a5496af4fb1b60d8a2a457d78f531458d294360b8637bb014"}, + {file = "SQLAlchemy-1.4.54-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:a49730afb716f3f675755afec109895cab95bc9875db7ffe2e42c1b1c6279482"}, + {file = "SQLAlchemy-1.4.54-cp38-cp38-manylinux1_x86_64.manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_5_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26e78444bc77d089e62874dc74df05a5c71f01ac598010a327881a48408d0064"}, + {file = "SQLAlchemy-1.4.54-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02d2ecb9508f16ab9c5af466dfe5a88e26adf2e1a8d1c56eb616396ccae2c186"}, + {file = "SQLAlchemy-1.4.54-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:394b0135900b62dbf63e4809cdc8ac923182af2816d06ea61cd6763943c2cc05"}, + {file = "SQLAlchemy-1.4.54-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ed3576675c187e3baa80b02c4c9d0edfab78eff4e89dd9da736b921333a2432"}, + {file = "SQLAlchemy-1.4.54-cp38-cp38-win32.whl", hash = "sha256:fc9ffd9a38e21fad3e8c5a88926d57f94a32546e937e0be46142b2702003eba7"}, + {file = "SQLAlchemy-1.4.54-cp38-cp38-win_amd64.whl", hash = "sha256:a01bc25eb7a5688656c8770f931d5cb4a44c7de1b3cec69b84cc9745d1e4cc10"}, + {file = "SQLAlchemy-1.4.54-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:0b76bbb1cbae618d10679be8966f6d66c94f301cfc15cb49e2f2382563fb6efb"}, + {file = "SQLAlchemy-1.4.54-cp39-cp39-manylinux1_x86_64.manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_5_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdb2886c0be2c6c54d0651d5a61c29ef347e8eec81fd83afebbf7b59b80b7393"}, + {file = "SQLAlchemy-1.4.54-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:954816850777ac234a4e32b8c88ac1f7847088a6e90cfb8f0e127a1bf3feddff"}, + {file = "SQLAlchemy-1.4.54-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:1d83cd1cc03c22d922ec94d0d5f7b7c96b1332f5e122e81b1a61fb22da77879a"}, + {file = "SQLAlchemy-1.4.54-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1576fba3616f79496e2f067262200dbf4aab1bb727cd7e4e006076686413c80c"}, + {file = "SQLAlchemy-1.4.54-cp39-cp39-win32.whl", hash = "sha256:3112de9e11ff1957148c6de1df2bc5cc1440ee36783412e5eedc6f53638a577d"}, + {file = "SQLAlchemy-1.4.54-cp39-cp39-win_amd64.whl", hash = "sha256:6da60fb24577f989535b8fc8b2ddc4212204aaf02e53c4c7ac94ac364150ed08"}, + {file = "sqlalchemy-1.4.54.tar.gz", hash = "sha256:4470fbed088c35dc20b78a39aaf4ae54fe81790c783b3264872a0224f437c31a"}, ] +[package.dependencies] +greenlet = {version = "!=0.4.17", markers = "python_version >= \"3\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"} + [package.extras] +aiomysql = ["aiomysql (>=0.2.0) ; python_version >= \"3\"", "greenlet (!=0.4.17) ; python_version >= \"3\""] +aiosqlite = ["aiosqlite ; python_version >= \"3\"", "greenlet (!=0.4.17) ; python_version >= \"3\"", "typing_extensions (!=3.10.0.1)"] +asyncio = ["greenlet (!=0.4.17) ; python_version >= \"3\""] +asyncmy = ["asyncmy (>=0.2.3,!=0.2.4) ; python_version >= \"3\"", "greenlet (!=0.4.17) ; python_version >= \"3\""] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2) ; python_version >= \"3\"", "mariadb (>=1.0.1,!=1.1.2) ; python_version >= \"3\""] mssql = ["pyodbc"] -mssql-pymssql = ["pymssql"] -mssql-pyodbc = ["pyodbc"] -mysql = ["mysqlclient"] -oracle = ["cx-oracle"] -postgresql = ["psycopg2"] -postgresql-pg8000 = ["pg8000 (<1.16.6)"] +mssql-pymssql = ["pymssql", "pymssql"] +mssql-pyodbc = ["pyodbc", "pyodbc"] +mypy = ["mypy (>=0.910) ; python_version >= \"3\"", "sqlalchemy2-stubs"] +mysql = ["mysqlclient (>=1.4.0) ; python_version >= \"3\"", "mysqlclient (>=1.4.0,<2) ; python_version < \"3\""] +mysql-connector = ["mysql-connector-python", "mysql-connector-python"] +oracle = ["cx_oracle (>=7) ; python_version >= \"3\"", "cx_oracle (>=7,<8) ; python_version < \"3\""] +postgresql = ["psycopg2 (>=2.7)"] +postgresql-asyncpg = ["asyncpg ; python_version >= \"3\"", "asyncpg ; python_version >= \"3\"", "greenlet (!=0.4.17) ; python_version >= \"3\"", "greenlet (!=0.4.17) ; python_version >= \"3\""] +postgresql-pg8000 = ["pg8000 (>=1.16.6,!=1.29.0) ; python_version >= \"3\"", "pg8000 (>=1.16.6,!=1.29.0) ; python_version >= \"3\""] postgresql-psycopg2binary = ["psycopg2-binary"] postgresql-psycopg2cffi = ["psycopg2cffi"] pymysql = ["pymysql (<1) ; python_version < \"3\"", "pymysql ; python_version >= \"3\""] +sqlcipher = ["sqlcipher3_binary ; python_version >= \"3\""] [[package]] name = "sqlalchemy-utils" @@ -1619,4 +1710,4 @@ watchdog = ["watchdog (>=2.3)"] [metadata] lock-version = "2.1" python-versions = "^3.11.9" -content-hash = "4eefee122f4a9d9c661f6cb04dec9f0c8daf3c5785628e621b8b8e7ca1fdcd82" +content-hash = "85bacd1430c1e8ee85e5cd96b72f7c87251dfb2e30c33811a3fc94577c75dcd7" diff --git a/pyproject.toml b/pyproject.toml index 09b17d4..4ada52e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,7 +17,7 @@ packages =[ python = "^3.11.9" fastapi = "^0.116.1" gunicorn = "^23.0.0" -sqlalchemy = "~1.3.23" +sqlalchemy = "^1.4.0" alembic = "^1.4.2" psycopg2 = "^2.8.5" python-jose = {extras = ["cryptography"], version = "^3.2.0"} From c3339e730a443b2ac4ea8f5fc447a4edda777160 Mon Sep 17 00:00:00 2001 From: Ian Beck Date: Sun, 24 Aug 2025 11:40:54 -0700 Subject: [PATCH 03/15] Fixed deprecation warnings --- api/db.py | 5 +++-- api/tests/conftest.py | 3 ++- api/views/cards.py | 6 ++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/api/db.py b/api/db.py index 5982076..86649be 100644 --- a/api/db.py +++ b/api/db.py @@ -72,7 +72,6 @@ class SomeModel(db.AlchemyBase): within_group, ) from sqlalchemy.dialects.postgresql import JSONB, TIMESTAMP, UUID -from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.orm import ( Query, @@ -81,6 +80,7 @@ class SomeModel(db.AlchemyBase): backref, contains_eager, joinedload, + registry, relationship, sessionmaker, ) @@ -190,6 +190,7 @@ class SomeModel(db.AlchemyBase): "pk": "pk_%(table_name)s", } ) -AlchemyBase = declarative_base(metadata=meta) +mapper_registry = registry(metadata=meta) +AlchemyBase = mapper_registry.generate_base() UTCTimestamp = TIMESTAMP(timezone=True) diff --git a/api/tests/conftest.py b/api/tests/conftest.py index 9f71dd5..e23cb2d 100644 --- a/api/tests/conftest.py +++ b/api/tests/conftest.py @@ -50,7 +50,8 @@ def session_local(): create_database(test_engine.url) TestSessionLocal = sessionmaker(bind=test_engine) # Install necessary pgcrypto extension (for database-level default UUIDs) - test_engine.execute("create extension pgcrypto") + with test_engine.connect() as connection: + connection.execute(db.text("create extension pgcrypto")) # Create all tables db.AlchemyBase.metadata.create_all(bind=test_engine) try: diff --git a/api/views/cards.py b/api/views/cards.py index 71290c6..c19db73 100644 --- a/api/views/cards.py +++ b/api/views/cards.py @@ -133,10 +133,8 @@ def list_cards( if show_legacy and releases is CardsFilterRelease.phg: query = query.filter(Release.is_phg.is_(True)) elif releases is CardsFilterRelease.mine and not current_user.is_anonymous(): - my_release_subquery = ( - session.query(UserRelease.release_id) - .filter(UserRelease.user_id == current_user.id) - .subquery() + my_release_subquery = session.query(UserRelease.release_id).filter( + UserRelease.user_id == current_user.id ) query = query.filter(Card.release_id.in_(my_release_subquery)) elif r: From 3f37b221fa50000828ea60d78b2a4be828a62671 Mon Sep 17 00:00:00 2001 From: Ian Beck Date: Tue, 26 Aug 2025 21:47:23 -0700 Subject: [PATCH 04/15] Rewrote conftest nested transaction logic to work with SQLAlchemy 1.4 --- api/tests/cards/conftest.py | 16 ++++++++++------ api/tests/conftest.py | 22 +++++++++++----------- api/tests/decks/conftest.py | 15 +++++++++------ 3 files changed, 30 insertions(+), 23 deletions(-) diff --git a/api/tests/cards/conftest.py b/api/tests/cards/conftest.py index 043aa1a..57fec1f 100644 --- a/api/tests/cards/conftest.py +++ b/api/tests/cards/conftest.py @@ -1,4 +1,5 @@ import pytest +from sqlalchemy.engine import Engine from api import db, models from api.db import Session @@ -135,23 +136,26 @@ def _create_cards_for_filtration(session: db.Session, is_legacy=False): @pytest.fixture(scope="package") -def cards_session(session_local: Session, monkeypatch_package) -> Session: +def cards_session(test_engine: Engine, monkeypatch_package) -> Session: """Populate our database with the cards needed for listing tests. This causes our session to be reused between all tests in this package. """ - # Creates a nested transaction that includes standard card data - session = session_local() - session.begin_nested() + # Create a nested transaction that includes standard card data + connection = test_engine.connect() + cards_transaction = connection.begin() + session = Session(bind=connection) # Overwrite commits with flushes so that we can query stuff, but it's in the same transaction monkeypatch_package.setattr(session, "commit", session.flush) + # Create our fake cards that are relied on by the tests in this module _create_cards_for_filtration(session, is_legacy=True) _create_cards_for_filtration(session) + try: yield session finally: - session.rollback() - session.close() + cards_transaction.rollback() + connection.close() @pytest.fixture(scope="function") diff --git a/api/tests/conftest.py b/api/tests/conftest.py index e23cb2d..7780e51 100644 --- a/api/tests/conftest.py +++ b/api/tests/conftest.py @@ -13,7 +13,8 @@ def test_endpoint(client: TestClient): from _pytest.monkeypatch import MonkeyPatch from fastapi.testclient import TestClient from sqlalchemy import create_engine -from sqlalchemy.orm import Session, sessionmaker +from sqlalchemy.engine import Engine +from sqlalchemy.orm import Session from sqlalchemy_utils import create_database, database_exists, drop_database import api.environment @@ -34,7 +35,7 @@ def testing_environment(monkeypatch): @pytest.fixture(scope="session") -def session_local(): +def test_engine(): """Override the default database with our testing database, and make sure to run migrations""" settings = api.environment.ApplicationSettings() test_engine = create_engine( @@ -48,30 +49,29 @@ def session_local(): if database_exists(test_engine.url): drop_database(test_engine.url) create_database(test_engine.url) - TestSessionLocal = sessionmaker(bind=test_engine) # Install necessary pgcrypto extension (for database-level default UUIDs) with test_engine.connect() as connection: connection.execute(db.text("create extension pgcrypto")) # Create all tables db.AlchemyBase.metadata.create_all(bind=test_engine) try: - yield TestSessionLocal + yield test_engine finally: drop_database(test_engine.url) @pytest.fixture(scope="function") -def session(session_local: Session, monkeypatch) -> Session: - """Return an SQLAlchemy session for this test""" - session = session_local() - session.begin_nested() - # Overwrite commits with flushes so that we can query stuff, but it's in the same transaction - monkeypatch.setattr(session, "commit", session.flush) +def session(test_engine: Engine, monkeypatch) -> Session: + """Return an SQLAlchemy session for this test, complete with SAVEPOINT for internal rollbacks""" + connection = test_engine.connect() + transaction = connection.begin() + session = Session(bind=connection) try: yield session finally: - session.rollback() session.close() + transaction.rollback() + connection.close() @pytest.fixture(scope="function") diff --git a/api/tests/decks/conftest.py b/api/tests/decks/conftest.py index e10e00d..bbe78b0 100644 --- a/api/tests/decks/conftest.py +++ b/api/tests/decks/conftest.py @@ -1,4 +1,5 @@ import pytest +from sqlalchemy.engine import Engine from api.db import Session @@ -6,23 +7,25 @@ @pytest.fixture(scope="package") -def cards_session(session_local: Session, monkeypatch_package) -> Session: +def cards_session(test_engine: Engine, monkeypatch_package) -> Session: """Populate our database with the cards needed to create decks once for the package This causes our session to be reused between all tests in this package, with specific classes handling deck/user data persistence using nested rollbacks. """ - # Creates a nested transaction that includes standard card data - session = session_local() - session.begin_nested() + connection = test_engine.connect() + cards_transaction = connection.begin() + session = Session(bind=connection) # Overwrite commits with flushes so that we can query stuff, but it's in the same transaction monkeypatch_package.setattr(session, "commit", session.flush) + # Create our fake cards that are relied on by the tests in this module create_cards_for_decks(session) + try: yield session finally: - session.rollback() - session.close() + cards_transaction.rollback() + connection.close() @pytest.fixture(scope="module") From b5770aed1d0966f0dc1156b654728bef4f1dee1c Mon Sep 17 00:00:00 2001 From: Ian Beck Date: Tue, 26 Aug 2025 22:07:28 -0700 Subject: [PATCH 05/15] Made necessary updates for SQLAlchemy 2.0 compatibility --- api/models/card.py | 2 +- api/services/deck.py | 14 ++++++++------ api/services/releases.py | 4 +--- api/tests/conftest.py | 3 ++- api/utils/pagination.py | 5 ++++- api/views/comments.py | 2 +- api/views/decks.py | 36 +++++++++++++++++++----------------- 7 files changed, 36 insertions(+), 30 deletions(-) diff --git a/api/models/card.py b/api/models/card.py index af52cd0..d25cf61 100644 --- a/api/models/card.py +++ b/api/models/card.py @@ -105,7 +105,7 @@ def type_weight(self): @type_weight.expression def type_weight(cls): return db.case( - [ + *[ (cls.card_type == value, index) for index, value in enumerate(CARD_TYPE_ORDER) ], diff --git a/api/services/deck.py b/api/services/deck.py index 855eb95..cfa52b7 100644 --- a/api/services/deck.py +++ b/api/services/deck.py @@ -1,6 +1,7 @@ from collections import defaultdict from operator import itemgetter +from sqlalchemy import select from starlette.requests import Request from api import db @@ -74,15 +75,16 @@ def create_or_update_deck( # Tracks if dice or cards changed, as this necessitates resetting the export flag needs_new_export = False if deck_id: - deck = ( - session.query(Deck) + stmt = ( + select(Deck) .options( - db.joinedload("cards"), - db.joinedload("dice"), - db.joinedload("selected_cards"), + db.joinedload(Deck.cards), + db.joinedload(Deck.dice), + db.joinedload(Deck.selected_cards), ) - .get(deck_id) + .where(Deck.id == deck_id) ) + deck = session.execute(stmt).unique().scalar_one() deck.title = title deck.description = description deck.phoenixborn_id = phoenixborn.id diff --git a/api/services/releases.py b/api/services/releases.py index 9d2c9b9..4c169d4 100644 --- a/api/services/releases.py +++ b/api/services/releases.py @@ -15,9 +15,7 @@ def get_releases_query(session: db.Session, current_user: UserType, show_legacy= Release.stub, Release.is_legacy, db.case( - [ - (UserRelease.release_id == Release.id, True), - ], + (UserRelease.release_id == Release.id, True), else_=False, ).label("is_mine"), ).outerjoin( diff --git a/api/tests/conftest.py b/api/tests/conftest.py index 7780e51..8e328de 100644 --- a/api/tests/conftest.py +++ b/api/tests/conftest.py @@ -51,7 +51,8 @@ def test_engine(): create_database(test_engine.url) # Install necessary pgcrypto extension (for database-level default UUIDs) with test_engine.connect() as connection: - connection.execute(db.text("create extension pgcrypto")) + with connection.begin(): + connection.execute(db.text("create extension pgcrypto")) # Create all tables db.AlchemyBase.metadata.create_all(bind=test_engine) try: diff --git a/api/utils/pagination.py b/api/utils/pagination.py index 8d3db4b..22f0986 100644 --- a/api/utils/pagination.py +++ b/api/utils/pagination.py @@ -1,5 +1,8 @@ import inspect import urllib.parse +from typing import Any + +from sqlalchemy.orm import Query from api import db from api.environment import settings @@ -24,7 +27,7 @@ def replace_offset(url: str, offset: int) -> str: def paginated_results_for_query( - query: db.Query, + query: Query, paging: PaginationOptions, url: str, ) -> dict: diff --git a/api/views/comments.py b/api/views/comments.py index 88d66b3..179a704 100644 --- a/api/views/comments.py +++ b/api/views/comments.py @@ -61,7 +61,7 @@ def get_comments( """ query = ( session.query(Comment) - .options(db.joinedload("user")) + .options(db.joinedload(Comment.user)) .filter(Comment.source_entity_id == entity_id) .order_by(getattr(Comment.created, order)()) ) diff --git a/api/views/decks.py b/api/views/decks.py index 7242116..0fae085 100644 --- a/api/views/decks.py +++ b/api/views/decks.py @@ -5,7 +5,7 @@ import httpx from fastapi import APIRouter, Depends, Query, Request, Response, status from pydantic import UUID4 -from sqlalchemy import and_, or_ +from sqlalchemy import and_, or_, select from api import db from api.depends import ( @@ -221,7 +221,7 @@ def get_deck( a public snapshot) * passing any snapshot's ID will return that snapshot """ - source_deck: Deck = session.query(Deck).get(deck_id) + source_deck: Deck = session.get(Deck, deck_id) if not source_deck: raise NotFoundException(detail="Deck not found.") own_deck = ( @@ -472,15 +472,16 @@ def create_snapshot( **Please note:** you must save the deck prior to calling this endpoint! This endpoint will create a snapshot from the most recent saved copy of the deck (although it does allow you to set a custom title and description). """ - deck: Deck = ( - session.query(Deck) + stmt = ( + select(Deck) .options( - db.joinedload("cards"), - db.joinedload("dice"), - db.joinedload("selected_cards"), + db.joinedload(Deck.cards), + db.joinedload(Deck.dice), + db.joinedload(Deck.selected_cards), ) - .get(deck_id) + .where(Deck.id == deck_id) ) + deck: Deck = session.execute(stmt).unique().scalar_one_or_none() if not deck or deck.user_id != current_user.id: raise NoUserAccessException( detail="You cannot save a snapshot of a deck you do not own." @@ -583,7 +584,7 @@ def list_snapshots( do not own decks can only ever see public snapshots, so no private snapshots will be included even if they ask for them). """ - source_deck: Deck = session.query(Deck).get(deck_id) + source_deck: Deck = session.get(Deck, deck_id) if not source_deck or source_deck.is_deleted or source_deck.is_snapshot: raise NotFoundException(detail="Deck not found.") query = session.query(Deck).filter( @@ -631,7 +632,8 @@ def delete_deck( When requested for a snapshot, it's a soft deletion and the snapshot will no longer show up in any listings (including the stream). """ - deck: Deck = session.query(Deck).options(db.joinedload("source")).get(deck_id) + stmt = select(Deck).options(db.joinedload(Deck.source)).where(Deck.id == deck_id) + deck: Deck = session.execute(stmt).unique().scalar_one_or_none() if not deck or deck.user_id != current_user.id: raise NoUserAccessException(detail="You cannot delete a deck you do not own.") if deck.is_legacy: @@ -770,9 +772,9 @@ def clone_deck( deck = ( session.query(Deck) .options( - db.joinedload("cards"), - db.joinedload("dice"), - db.joinedload("selected_cards"), + db.joinedload(Deck.cards), + db.joinedload(Deck.dice), + db.joinedload(Deck.selected_cards), ) .filter(*valid_deck_filters) .first() @@ -850,7 +852,7 @@ def edit_snapshot( Title and description can be intentionally cleared by passing in an empty string for one or the other. """ # First look up the snapshot - deck: Deck = session.query(Deck).get(snapshot_id) + deck: Deck = session.get(Deck, snapshot_id) # Run basic validation to make sure they have access to this snapshot (and it is indeed a snapshot) if not deck: raise NotFoundException(detail="No such snapshot found.") @@ -973,9 +975,9 @@ def import_decks( x.created: x for x in session.query(Deck) .options( - db.joinedload("cards"), - db.joinedload("dice"), - db.joinedload("selected_cards"), + db.joinedload(Deck.cards), + db.joinedload(Deck.dice), + db.joinedload(Deck.selected_cards), ) .filter(Deck.created.in_(created_dates), Deck.user_id == current_user.id) .all() From ab4a6d52fcaec87f52d6e499bab2358efdadbd1b Mon Sep 17 00:00:00 2001 From: Ian Beck Date: Tue, 26 Aug 2025 23:26:21 -0700 Subject: [PATCH 06/15] Fully converted codebase to use `session.execute()` rather than `session.query()` --- api/db.py | 4 +- api/depends.py | 12 +- api/services/card.py | 9 +- api/services/deck.py | 91 ++++---- api/services/releases.py | 12 +- api/services/stream.py | 27 +-- api/services/user.py | 15 +- api/tests/cards/test_card_create.py | 34 ++- api/tests/cards/test_card_patch.py | 37 +++- api/tests/cards/test_card_read.py | 13 +- api/tests/cards/test_cards.py | 5 +- api/tests/decks/deck_utils.py | 39 ++-- api/tests/decks/test_comments.py | 16 +- api/tests/decks/test_deck_clone.py | 65 +++++- api/tests/decks/test_deck_create.py | 75 ++++--- api/tests/decks/test_deck_delete.py | 17 +- api/tests/decks/test_deck_import.py | 58 +++-- api/tests/decks/test_subscriptions.py | 43 ++-- api/tests/test_auth.py | 39 +++- api/tests/test_health_check.py | 2 +- api/tests/test_players.py | 45 +++- api/tests/test_releases.py | 8 +- api/utils/pagination.py | 35 ++- api/views/auth.py | 15 +- api/views/cards.py | 172 +++++++-------- api/views/comments.py | 34 +-- api/views/decks.py | 302 +++++++++++++------------- api/views/health_check.py | 6 +- api/views/players.py | 17 +- api/views/releases.py | 40 ++-- api/views/subscriptions.py | 76 +++---- 31 files changed, 771 insertions(+), 592 deletions(-) diff --git a/api/db.py b/api/db.py index 86649be..cbdf767 100644 --- a/api/db.py +++ b/api/db.py @@ -177,8 +177,8 @@ class SomeModel(db.AlchemyBase): ) # Setup base engine and session class -engine = create_engine(settings.postgres_url, echo=settings.debug) -SessionLocal = sessionmaker(bind=engine) +engine = create_engine(settings.postgres_url, echo=settings.debug, future=True) +SessionLocal = sessionmaker(engine) # Setup our declarative base meta = MetaData( diff --git a/api/depends.py b/api/depends.py index 66b49fa..ee76689 100644 --- a/api/depends.py +++ b/api/depends.py @@ -3,6 +3,7 @@ from fastapi import Depends from fastapi.security import OAuth2PasswordBearer from jose import JWTError, jwt +from sqlalchemy import select from .db import Session, SessionLocal from .environment import settings @@ -58,12 +59,11 @@ def get_current_user( if user_badge is None or jwt_hex is None: raise CredentialsException() jwt_id = uuid.UUID(hex=jwt_hex) - current_user = session.query(User).filter(User.badge == user_badge).first() - revoked_session = ( - session.query(UserRevokedToken) - .filter(UserRevokedToken.revoked_uuid == jwt_id) - .first() - ) + stmt = select(User).where(User.badge == user_badge) + current_user = session.execute(stmt).scalar_one_or_none() + + stmt = select(UserRevokedToken).where(UserRevokedToken.revoked_uuid == jwt_id) + revoked_session = session.execute(stmt).scalar_one_or_none() if revoked_session or current_user is None: raise CredentialsException() if current_user.is_banned: diff --git a/api/services/card.py b/api/services/card.py index e3ce418..90754e7 100644 --- a/api/services/card.py +++ b/api/services/card.py @@ -1,5 +1,7 @@ import re +from sqlalchemy import select + from api import db from api.models.card import Card, CardConjuration from api.models.release import Release @@ -190,11 +192,10 @@ def create_card( text, ): conjuration_stubs.add(stubify(match.group(1))) - existing_conjurations = ( - session.query(Card.id, Card.stub, Card.name) - .filter(Card.stub.in_(conjuration_stubs), Card.is_legacy.is_(False)) - .all() + stmt = select(Card.id, Card.stub, Card.name).where( + Card.stub.in_(conjuration_stubs), Card.is_legacy.is_(False) ) + existing_conjurations = session.execute(stmt).all() existing_stubs = set(x.stub for x in existing_conjurations) missing_conjurations = conjuration_stubs.symmetric_difference(existing_stubs) if missing_conjurations: diff --git a/api/services/deck.py b/api/services/deck.py index cfa52b7..22b0ab0 100644 --- a/api/services/deck.py +++ b/api/services/deck.py @@ -2,6 +2,7 @@ from operator import itemgetter from sqlalchemy import select +from sqlalchemy.sql import Select from starlette.requests import Request from api import db @@ -90,16 +91,13 @@ def create_or_update_deck( deck.phoenixborn_id = phoenixborn.id deck.modified = now if deck.is_red_rains != is_red_rains: - if ( - session.query(Deck) - .filter( - Deck.source_id == deck_id, - Deck.is_snapshot.is_(True), - Deck.is_public.is_(True), - Deck.is_deleted.is_(False), - ) - .count() - ): + stmt = select(db.func.count(Deck.id)).where( + Deck.source_id == deck_id, + Deck.is_snapshot.is_(True), + Deck.is_public.is_(True), + Deck.is_deleted.is_(False), + ) + if session.execute(stmt).scalar(): raise RedRainsConversionFailed() deck.is_red_rains = is_red_rains else: @@ -143,16 +141,16 @@ def create_or_update_deck( if tutor_map: card_stubs.update(tutor_map.keys()) card_stubs.update(tutor_map.values()) - minimal_cards = ( - session.query(Card.id, Card.stub, Card.name, Card.card_type, Card.phoenixborn) + stmt = ( + select(Card.id, Card.stub, Card.name, Card.card_type, Card.phoenixborn) .join(Card.release) - .filter( + .where( Card.stub.in_(card_stubs), Card.is_legacy.is_(False), Release.is_public == True, ) - .all() ) + minimal_cards = session.execute(stmt).all() for card in minimal_cards: # Minimal cards could include bogus cards thanks to first_five list and similar, so fall # back to zero to ensure this is something with a count @@ -309,8 +307,7 @@ def create_snapshot_for_deck( return snapshot -def get_decks_query( - session: db.Session, +def get_decks_stmt( show_legacy=False, show_red_rains=False, is_public=False, @@ -321,17 +318,17 @@ def get_decks_query( cards: list[str] | None = None, players: list[str] | None = None, show_preconstructed=False, -) -> db.Query: - query = session.query(Deck).filter( +): + stmt = select(Deck).where( Deck.is_legacy.is_(show_legacy), Deck.is_deleted.is_(False), Deck.is_red_rains.is_(show_red_rains), ) if show_preconstructed: - query = query.filter(Deck.is_preconstructed.is_(True)) + stmt = stmt.where(Deck.is_preconstructed.is_(True)) if is_public: deck_comp = db.aliased(Deck) - query = query.outerjoin( + stmt = stmt.outerjoin( deck_comp, db.and_( Deck.source_id == deck_comp.source_id, @@ -343,40 +340,40 @@ def get_decks_query( db.and_(Deck.created == deck_comp.created, Deck.id < deck_comp.id), ), ), - ).filter( + ).where( deck_comp.id.is_(None), Deck.is_snapshot.is_(True), Deck.is_public.is_(True) ) else: - query = query.filter(Deck.is_snapshot.is_(False)) + stmt = stmt.where(Deck.is_snapshot.is_(False)) if q and q.strip(): - query = query.filter( + stmt = stmt.where( db.func.to_tsvector("english", db.cast(Deck.title, db.Text)).match( to_prefixed_tsquery(q) ) ) # Filter by Phoenixborn stubs (this is always an OR comparison between Phoenixborn) if phoenixborn: - query = query.join(Card, Card.id == Deck.phoenixborn_id).filter( + stmt = stmt.join(Card, Card.id == Deck.phoenixborn_id).where( Card.stub.in_(phoenixborn) ) # Filter by cards (this is always an OR comparison between cards) if cards: card_table = db.aliased(Card) - query = ( - query.join(DeckCard, DeckCard.deck_id == Deck.id) + stmt = ( + stmt.join(DeckCard, DeckCard.deck_id == Deck.id) .join(card_table, card_table.id == DeckCard.card_id) - .filter(card_table.stub.in_(cards)) + .where(card_table.stub.in_(cards)) ) # Filter by player badge, and always ensure that we eagerly load the user object if players: - query = ( - query.join(User, User.id == Deck.user_id) - .filter(User.badge.in_(players)) + stmt = ( + stmt.join(User, User.id == Deck.user_id) + .where(User.badge.in_(players)) .options(db.contains_eager(Deck.user)) ) else: - query = query.options(db.joinedload(Deck.user)) - return query.order_by(getattr(Deck.created, order)()) + stmt = stmt.options(db.joinedload(Deck.user)) + return stmt.order_by(getattr(Deck.created, order)()) def add_conjurations(card_id_to_conjuration_mapping, root_card_id, conjuration_set): @@ -401,12 +398,12 @@ def add_conjurations(card_id_to_conjuration_mapping, root_card_id, conjuration_s def get_conjuration_mapping(session: db.Session, card_ids: set[int]) -> dict: """Gathers top-level conjurations into a mapping keyed off the root card ID""" - conjuration_results = ( - session.query(Card, CardConjuration.card_id.label("root_card")) + stmt = ( + select(Card, CardConjuration.card_id.label("root_card")) .join(CardConjuration, Card.id == CardConjuration.conjuration_id) - .filter(CardConjuration.card_id.in_(card_ids)) - .all() + .where(CardConjuration.card_id.in_(card_ids)) ) + conjuration_results = session.execute(stmt).all() card_id_to_conjurations = defaultdict(list) for result in conjuration_results: card_id_to_conjurations[result.root_card].append(result.Card) @@ -509,7 +506,7 @@ def generate_deck_dict( def paginate_deck_listing( - query: db.Query, + stmt: Select, session: db.Session, request: Request, paging: PaginationOptions, @@ -518,7 +515,7 @@ def paginate_deck_listing( """Generates a paginated deck listing using as few queries as possible.""" # Gather our paginated results output = paginated_results_for_query( - query=query, paging=paging, url=str(request.url) + session=session, stmt=stmt, paging=paging, url=str(request.url) ) # Parse through the decks so that we can load their cards en masse with a single query deck_ids = set() @@ -528,12 +525,14 @@ def paginate_deck_listing( # Ensure we lookup our Phoenixborn cards needed_cards.add(deck_row.phoenixborn_id) # Fetch and collate our dice information for all decks - deck_dice = session.query(DeckDie).filter(DeckDie.deck_id.in_(deck_ids)).all() + deckdie_stmt = select(DeckDie).where(DeckDie.deck_id.in_(deck_ids)) + deck_dice = session.execute(deckdie_stmt).scalars().all() deck_id_to_dice = defaultdict(list) for deck_die in deck_dice: deck_id_to_dice[deck_die.deck_id].append(deck_die) # Now that we have all our basic deck information, look up the cards and quantities they include - deck_cards = session.query(DeckCard).filter(DeckCard.deck_id.in_(deck_ids)).all() + deckcard_stmt = select(DeckCard).where(DeckCard.deck_id.in_(deck_ids)) + deck_cards = session.execute(deckcard_stmt).scalars().all() deck_id_to_deck_cards = defaultdict(list) for deck_card in deck_cards: needed_cards.add(deck_card.card_id) @@ -543,7 +542,8 @@ def paginate_deck_listing( session=session, card_ids=needed_cards ) # Now that we have root-level conjurations, we can gather all our cards and setup our decks - cards = session.query(Card).filter(Card.id.in_(needed_cards)).all() + card_stmt = select(Card).where(Card.id.in_(needed_cards)) + cards = session.execute(card_stmt).scalars().all() card_id_to_card = {x.id: x for x in cards} deck_output = [] for deck in output["results"]: @@ -570,16 +570,19 @@ def deck_to_dict( """Converts a Deck object into an output dict using as few queries as possible.""" needed_cards = set() needed_cards.add(deck.phoenixborn_id) - deck_cards = session.query(DeckCard).filter(DeckCard.deck_id == deck.id).all() + stmt = select(DeckCard).where(DeckCard.deck_id == deck.id) + deck_cards = session.execute(stmt).scalars().all() for deck_card in deck_cards: needed_cards.add(deck_card.card_id) - deck_dice = session.query(DeckDie).filter(DeckDie.deck_id == deck.id).all() + stmt = select(DeckDie).where(DeckDie.deck_id == deck.id) + deck_dice = session.execute(stmt).scalars().all() # And finally we need to fetch all top-level conjurations card_id_to_conjurations = get_conjuration_mapping( session=session, card_ids=needed_cards ) # Now that we have root-level conjurations, we can gather all our cards and generate deck output - cards = session.query(Card).filter(Card.id.in_(needed_cards)).all() + stmt = select(Card).where(Card.id.in_(needed_cards)) + cards = session.execute(stmt).scalars().all() card_id_to_card = {x.id: x for x in cards} deck_dict = generate_deck_dict( deck=deck, diff --git a/api/services/releases.py b/api/services/releases.py index 4c169d4..46230f7 100644 --- a/api/services/releases.py +++ b/api/services/releases.py @@ -1,16 +1,18 @@ +from sqlalchemy import select + from api import db from api.models import Release, UserRelease, UserType -def get_releases_query(session: db.Session, current_user: UserType, show_legacy=False): +def get_releases_query(current_user: UserType, show_legacy=False): """Returns the query necessary to fetch a list of releases If a user is passed, then the releases will be tagged `is_mine` if in that user's collection. """ if current_user.is_anonymous(): - query = session.query(Release.name, Release.stub, Release.is_legacy) + stmt = select(Release.name, Release.stub, Release.is_legacy) else: - query = session.query( + stmt = select( Release.name, Release.stub, Release.is_legacy, @@ -25,8 +27,8 @@ def get_releases_query(session: db.Session, current_user: UserType, show_legacy= UserRelease.user_id == current_user.id, ), ) - query = query.filter( + stmt = stmt.where( Release.is_legacy.is_(show_legacy), Release.is_public.is_(True), ).order_by(Release.id.asc()) - return query + return stmt diff --git a/api/services/stream.py b/api/services/stream.py index 07aada8..83ac618 100644 --- a/api/services/stream.py +++ b/api/services/stream.py @@ -1,3 +1,5 @@ +from sqlalchemy import delete, select + from api import db from api.models.stream import Stream, Streamable, Subscription from api.utils.dates import utcnow @@ -20,16 +22,14 @@ def refresh_stream_for_entity( invoking method. """ if entity_type == "deck": - entity = ( - session.query(Stream) - .filter( - Stream.source_entity_id == source_entity_id, - Stream.entity_type == "deck", - ) - .first() + stmt = select(Stream).where( + Stream.source_entity_id == source_entity_id, + Stream.entity_type == "deck", ) + entity = session.execute(stmt).scalar_one_or_none() else: - entity = session.query(Stream).filter(Stream.entity_id == entity_id).first() + stmt = select(Stream).where(Stream.entity_id == entity_id) + entity = session.execute(stmt).scalar_one_or_none() # Comment edits do not update the stream, hence not handling them here if not entity: entity = Stream( @@ -57,14 +57,11 @@ def update_subscription_for_user( **Please note:** this method does not commit the changes! You must flush the session in the invoking method. """ - subscription = ( - session.query(Subscription) - .filter( - Subscription.user_id == user.id, - Subscription.source_entity_id == source_entity_id, - ) - .first() + stmt = select(Subscription).where( + Subscription.user_id == user.id, + Subscription.source_entity_id == source_entity_id, ) + subscription = session.execute(stmt).scalar_one_or_none() if not subscription: subscription = Subscription( user_id=user.id, diff --git a/api/services/user.py b/api/services/user.py index 25df959..7be8f38 100644 --- a/api/services/user.py +++ b/api/services/user.py @@ -3,6 +3,8 @@ from datetime import timedelta from random import choice +from sqlalchemy import select + from api import db, models from api.environment import settings from api.utils.auth import create_access_token, generate_password_hash @@ -22,9 +24,8 @@ def access_token_for_user(user: "models.User", is_long_term=False) -> str: def get_invite_for_email(session: "db.Session", email: str) -> "models.Invite": """Fetches or creates a new invite for the given email""" - invitation = ( - session.query(models.Invite).filter(models.Invite.email == email).first() - ) + stmt = select(models.Invite).where(models.Invite.email == email) + invitation = session.execute(stmt).scalar_one_or_none() if invitation: invitation.requests += 1 else: @@ -123,12 +124,8 @@ def generate_badges( _tries=_tries + 1, _current=_current, ) - taken = [ - badge - for (badge,) in session.query(models.User.badge) - .filter(models.User.badge.in_(options)) - .all() - ] + stmt = select(models.User.badge).where(models.User.badge.in_(options)) + taken = [badge for badge in session.execute(stmt).scalars().all()] if taken: options = [x for x in options if x not in taken] # Highly unlikely, but if all random badges are taken, try again diff --git a/api/tests/cards/test_card_create.py b/api/tests/cards/test_card_create.py index dd19897..fa92040 100644 --- a/api/tests/cards/test_card_create.py +++ b/api/tests/cards/test_card_create.py @@ -2,6 +2,7 @@ from fastapi import status from fastapi.testclient import TestClient +from sqlalchemy import select from api import db from api.models.card import Card, CardConjuration @@ -104,12 +105,13 @@ def test_create_card_placement_optional_phoenixborn( def test_create_card_implicit_release(client: TestClient, session: db.Session): """Creating a card implicitly creates an unpublished release""" # Verify that the number of releases is what we expect - release_query = ( - session.query(Release) - .filter(Release.is_legacy.is_(False)) - .order_by(Release.id.desc()) + release_stmt = ( + select(Release).where(Release.is_legacy.is_(False)).order_by(Release.id.desc()) ) - assert release_query.count() == 2 + count = session.execute( + select(db.func.count()).select_from(release_stmt.subquery()) + ).scalar() + assert count == 2 admin, token = create_admin_token(session) response = client.post( "/v2/cards", @@ -117,8 +119,11 @@ def test_create_card_implicit_release(client: TestClient, session: db.Session): headers={"Authorization": f"Bearer {token}"}, ) assert response.status_code == status.HTTP_201_CREATED, response.json() - assert release_query.count() == 3 - release: Release = release_query.first() + new_count = session.execute( + select(db.func.count()).select_from(release_stmt.subquery()) + ).scalar() + assert new_count == 3 + release: Release = session.execute(release_stmt.limit(1)).scalar() assert release.name == MINIMUM_VALID_CARD["release"] assert release.is_public == False # And verify we don't end up with multiple releases on subsequent cards @@ -130,7 +135,10 @@ def test_create_card_implicit_release(client: TestClient, session: db.Session): headers={"Authorization": f"Bearer {token}"}, ) assert response.status_code == status.HTTP_201_CREATED, response.json() - assert release_query.count() == 3 + final_count = session.execute( + select(db.func.count()).select_from(release_stmt.subquery()) + ).scalar() + assert final_count == 3 def test_create_card_missing_conjuration(client: TestClient, session: db.Session): @@ -151,7 +159,10 @@ def test_create_card_missing_conjuration(client: TestClient, session: db.Session def test_create_card_populates_conjurations(client: TestClient, session: db.Session): """Creating a card adds its conjuration relationships""" # Verify that the pre-existing number of conjurations is what we expect - assert session.query(CardConjuration).count() == 6 + count = session.execute( + select(db.func.count()).select_from(CardConjuration) + ).scalar() + assert count == 6 admin, token = create_admin_token(session) # Create the conjuration first conj_data = copy(MINIMUM_VALID_CARD) @@ -170,7 +181,10 @@ def test_create_card_populates_conjurations(client: TestClient, session: db.Sess ) assert card_response.status_code == status.HTTP_201_CREATED, card_response.json() # Then verify that the conjuration is linked to the card - assert session.query(CardConjuration).count() == 7 + count = session.execute( + select(db.func.count()).select_from(CardConjuration) + ).scalar() + assert count == 7 def test_create_card_conjuration_copies_required( diff --git a/api/tests/cards/test_card_patch.py b/api/tests/cards/test_card_patch.py index 2a43fdd..4855b29 100644 --- a/api/tests/cards/test_card_patch.py +++ b/api/tests/cards/test_card_patch.py @@ -1,5 +1,6 @@ from fastapi import status from fastapi.testclient import TestClient +from sqlalchemy import select from api import db from api.models import Card @@ -32,7 +33,7 @@ def test_patch_card_errata(client: TestClient, session: db.Session): headers={"Authorization": f"Bearer {token}"}, ) assert response.status_code == status.HTTP_200_OK - card = session.query(Card).filter(Card.stub == "wesley").first() + card = session.execute(select(Card).where(Card.stub == "wesley").limit(1)).scalar() assert card.version == 2 @@ -48,7 +49,7 @@ def test_patch_card_name(client: TestClient, session: db.Session): card_data = response.json() assert card_data["name"] == "Wesley", card_data assert card_data["stub"] == "wesley", card_data - card = session.query(Card).filter(Card.stub == "wesley").first() + card = session.execute(select(Card).where(Card.stub == "wesley").limit(1)).scalar() assert card.search_text.startswith("Wesley"), card.search_text @@ -61,7 +62,9 @@ def test_patch_card_search_keywords(client: TestClient, session: db.Session): headers={"Authorization": f"Bearer {token}"}, ) assert response.status_code == status.HTTP_200_OK - card = session.query(Card).filter(Card.stub == "example-phoenixborn").first() + card = session.execute( + select(Card).where(Card.stub == "example-phoenixborn").limit(1) + ).scalar() assert card.search_text.startswith("Example Phoenixborn nonesuch"), card.search_text @@ -99,7 +102,9 @@ def test_patch_card_copies(client: TestClient, session: db.Session): ) assert response.status_code == status.HTTP_200_OK assert response.json()["copies"] == 17 - card = session.query(Card).filter(Card.stub == "example-conjuration").first() + card = session.execute( + select(Card).where(Card.stub == "example-conjuration").limit(1) + ).scalar() assert card.copies == 17 @@ -113,7 +118,9 @@ def test_patch_card_copies_removal(client: TestClient, session: db.Session): ) assert response.status_code == status.HTTP_200_OK assert "copies" not in response.json(), response.json() - card = session.query(Card).filter(Card.stub == "example-conjuration").first() + card = session.execute( + select(Card).where(Card.stub == "example-conjuration").limit(1) + ).scalar() assert card.copies is None @@ -126,7 +133,9 @@ def test_patch_card_cost(client: TestClient, session: db.Session): headers={"Authorization": f"Bearer {token}"}, ) assert response.status_code == status.HTTP_200_OK - card = session.query(Card).filter(Card.stub == "example-ally").first() + card = session.execute( + select(Card).where(Card.stub == "example-ally").limit(1) + ).scalar() assert card.cost_weight == 106 card_data = response.json() assert card_data["cost"] == ["[[main]]", "1 [[natural:class]]"], card_data @@ -143,7 +152,9 @@ def test_patch_card_dice(client: TestClient, session: db.Session): ) assert response.status_code == status.HTTP_200_OK assert response.json()["dice"] == ["illusion"] - card = session.query(Card).filter(Card.stub == "example-ally").first() + card = session.execute( + select(Card).where(Card.stub == "example-ally").limit(1) + ).scalar() assert card.dice_flags == 4 @@ -157,7 +168,9 @@ def test_patch_card_dice_removal(client: TestClient, session: db.Session): ) assert response.status_code == status.HTTP_200_OK assert "dice" not in response.json(), response.json() - card = session.query(Card).filter(Card.stub == "example-ally").first() + card = session.execute( + select(Card).where(Card.stub == "example-ally").limit(1) + ).scalar() assert card.dice_flags == 0 @@ -171,7 +184,9 @@ def test_patch_card_alt_dice(client: TestClient, session: db.Session): ) assert response.status_code == status.HTTP_200_OK assert response.json()["altDice"] == ["illusion"] - card = session.query(Card).filter(Card.stub == "example-ally").first() + card = session.execute( + select(Card).where(Card.stub == "example-ally").limit(1) + ).scalar() assert card.alt_dice_flags == 4 @@ -185,7 +200,9 @@ def test_patch_card_alt_dice_removal(client: TestClient, session: db.Session): ) assert response.status_code == status.HTTP_200_OK assert "altDice" not in response.json(), response.json() - card = session.query(Card).filter(Card.stub == "example-ally").first() + card = session.execute( + select(Card).where(Card.stub == "example-ally").limit(1) + ).scalar() assert card.alt_dice_flags == 0 diff --git a/api/tests/cards/test_card_read.py b/api/tests/cards/test_card_read.py index 474fe3f..12c5c47 100644 --- a/api/tests/cards/test_card_read.py +++ b/api/tests/cards/test_card_read.py @@ -1,5 +1,6 @@ from fastapi import status from fastapi.testclient import TestClient +from sqlalchemy import select from api import db from api.models import Card, Comment, Release, Subscription @@ -55,7 +56,9 @@ def test_get_card_for_update_admin(client: TestClient, session: db.Session): def test_get_card_for_update_search_keywords(client: TestClient, session: db.Session): """Search keywords are rendered correctly to update output for admins""" - card = session.query(Card).filter(Card.stub == "example-phoenixborn").first() + card = session.execute( + select(Card).where(Card.stub == "example-phoenixborn").limit(1) + ).scalar() card.search_text = f"{card.name} nonesuch\n{card.json['text']}" session.commit() admin, token = create_admin_token(session) @@ -117,7 +120,9 @@ def test_get_details_phoenixborn(client: TestClient, session: db.Session): def test_get_details_phoenixborn_second_unique(client: TestClient, session: db.Session): """Must properly output second Phoenixborn unique in details""" # Get the master set release for creating our test cards - master_set = session.query(Release).filter(Release.stub == "master-set").first() + master_set = session.execute( + select(Release).where(Release.stub == "master-set").limit(1) + ).scalar() # Create a second conjuration for the second unique create_card( @@ -162,7 +167,9 @@ def test_get_details_phoenixborn_second_unique(client: TestClient, session: db.S def test_get_details_last_seen_entity_id(client: TestClient, session: db.Session): """Must properly output last seen entity ID for cards with comments and subscriptions""" user, token = create_user_token(session) - card = session.query(Card).filter(Card.is_legacy == False).first() + card = session.execute( + select(Card).where(Card.is_legacy == False).limit(1) + ).scalar() comment = Comment( entity_id=create_entity(session), user_id=user.id, diff --git a/api/tests/cards/test_cards.py b/api/tests/cards/test_cards.py index 239d05f..e3d4c77 100644 --- a/api/tests/cards/test_cards.py +++ b/api/tests/cards/test_cards.py @@ -1,5 +1,6 @@ from fastapi import status from fastapi.testclient import TestClient +from sqlalchemy import select from api import db from api.models.release import Release, UserRelease @@ -222,7 +223,9 @@ def test_release_filtration(client: TestClient, session: db.Session): """Filtering cards by owned releases works properly.""" # Create our user, and setup their collection user, token = create_user_token(session) - master_set = session.query(Release).filter(Release.stub == "master-set").first() + master_set = session.execute( + select(Release).where(Release.stub == "master-set").limit(1) + ).scalar() user_release = UserRelease(user_id=user.id, release_id=master_set.id) session.add(user_release) session.commit() diff --git a/api/tests/decks/deck_utils.py b/api/tests/decks/deck_utils.py index 06d190f..65fa920 100644 --- a/api/tests/decks/deck_utils.py +++ b/api/tests/decks/deck_utils.py @@ -1,4 +1,4 @@ -from sqlalchemy import func +from sqlalchemy import func, select from api import db from api.models import Card, Deck, Release, User @@ -306,38 +306,35 @@ def get_phoenixborn_cards_dice( Returns (phoenixborn, cards, dice) """ - release: Release = ( - session.query(Release).filter(Release.stub == release_stub).first() - if release_stub - else None - ) - phoenixborn_query = session.query(Card).filter(Card.card_type == "Phoenixborn") + if release_stub: + stmt = select(Release).where(Release.stub == release_stub) + release: Release = session.execute(stmt).scalar_one_or_none() + else: + release = None + phoenixborn_stmt = select(Card).where(Card.card_type == "Phoenixborn") if release: - phoenixborn_query = phoenixborn_query.filter(Card.release_id == release.id) + phoenixborn_stmt = phoenixborn_stmt.where(Card.release_id == release.id) else: - phoenixborn_query = phoenixborn_query.order_by(func.random()) - phoenixborn: Card = phoenixborn_query.first() + phoenixborn_stmt = phoenixborn_stmt.order_by(func.random()) + phoenixborn: Card = session.execute(phoenixborn_stmt.limit(1)).scalar() if not phoenixborn: raise ValueError("No such test Phoenixborn!") - unique_card: Card = ( - session.query(Card) - .filter( - Card.phoenixborn == phoenixborn.name, - Card.card_type.notin_(CONJURATION_TYPES), - ) - .first() + unique_stmt = select(Card).where( + Card.phoenixborn == phoenixborn.name, + Card.card_type.notin_(CONJURATION_TYPES), ) - cards_query = session.query(Card).filter( + unique_card: Card = session.execute(unique_stmt.limit(1)).scalar() + cards_stmt = select(Card).where( Card.card_type.notin_( ("Conjuration", "Conjured Alteration Spell", "Phoenixborn") ), Card.phoenixborn.is_(None), ) if release: - cards_query = cards_query.filter(Card.release_id == release.id) + cards_stmt = cards_stmt.where(Card.release_id == release.id) else: - cards_query = cards_query.order_by(func.random()) - deck_cards: list[Card] = cards_query.limit(9).all() + cards_stmt = cards_stmt.order_by(func.random()) + deck_cards: list[Card] = session.execute(cards_stmt.limit(9)).scalars().all() card_dicts = [{"stub": x.stub, "count": 3} for x in deck_cards] card_dicts.append({"stub": unique_card.stub, "count": 3}) dice_dicts = [ diff --git a/api/tests/decks/test_comments.py b/api/tests/decks/test_comments.py index 7cd8c05..5adeb74 100644 --- a/api/tests/decks/test_comments.py +++ b/api/tests/decks/test_comments.py @@ -1,6 +1,7 @@ import pytest from fastapi import status from fastapi.testclient import TestClient +from sqlalchemy import select from api import db from api.models import Card, Comment @@ -48,7 +49,8 @@ def test_get_comments(client: TestClient, session: db.Session, deck1, user1): session.add(deck_comment) session.commit() # Add the comments we do want to show up in the listing - card = session.query(Card).first() + stmt = select(Card).limit(1) + card = session.execute(stmt).scalar() comment1 = Comment( entity_id=create_entity(session), source_entity_id=card.entity_id, @@ -147,7 +149,8 @@ def test_create_comment(client: TestClient, session: db.Session, deck1, user1): json={"text": "My second comment"}, ) assert response.status_code == status.HTTP_201_CREATED - comment = session.query(Comment).order_by(Comment.created.desc()).first() + stmt = select(Comment).order_by(Comment.created.desc()).limit(1) + comment = session.execute(stmt).scalar() assert comment.ordering_increment == 1 @@ -173,13 +176,15 @@ def test_create_comment_previous_comments( json={"text": "My second comment"}, ) assert response.status_code == status.HTTP_201_CREATED - comment2 = session.query(Comment).order_by(Comment.created.desc()).first() + stmt = select(Comment).order_by(Comment.created.desc()).limit(1) + comment2 = session.execute(stmt).scalar() assert comment2.ordering_increment == 2 def test_create_comment_card(client: TestClient, session: db.Session, user1): """Verify creating a comment for a card works""" - card = session.query(Card).first() + stmt = select(Card).limit(1) + card = session.execute(stmt).scalar() _, token = create_user_token(session, user=user1) response = client.post( f"/v2/comments/{card.entity_id}", @@ -187,7 +192,8 @@ def test_create_comment_card(client: TestClient, session: db.Session, user1): json={"text": "My second comment"}, ) assert response.status_code == status.HTTP_201_CREATED - comment = session.query(Comment).order_by(Comment.created.desc()).first() + stmt = select(Comment).order_by(Comment.created.desc()).limit(1) + comment = session.execute(stmt).scalar() assert comment.source_version == card.version diff --git a/api/tests/decks/test_deck_clone.py b/api/tests/decks/test_deck_clone.py index 9812110..57405bf 100644 --- a/api/tests/decks/test_deck_clone.py +++ b/api/tests/decks/test_deck_clone.py @@ -1,6 +1,7 @@ import pytest from fastapi import status from fastapi.testclient import TestClient +from sqlalchemy import select from api import db from api.models import Deck @@ -82,7 +83,14 @@ def test_clone_public_snapshot( ) assert response.status_code == status.HTTP_200_OK # One is the new deck object, and the other is the source ID snapshot - assert session.query(Deck).filter(Deck.user_id == user.id).count() == 2 + assert ( + session.execute( + select(db.func.count()).select_from( + select(Deck).where(Deck.user_id == user.id).subquery() + ) + ).scalar() + == 2 + ) def test_clone_private_snapshot( @@ -91,28 +99,56 @@ def test_clone_private_snapshot( """Can clone own private snapshot""" user, token = user_token # Verify that we have three "decks" (original deck, private snapshot, public snapshot) - assert session.query(Deck).filter(Deck.user_id == user.id).count() == 3 + assert ( + session.execute( + select(db.func.count()).select_from( + select(Deck).where(Deck.user_id == user.id).subquery() + ) + ).scalar() + == 3 + ) response = client.get( f"/v2/decks/{snapshot.id}/clone", headers={"Authorization": f"Bearer {token}"}, ) assert response.status_code == status.HTTP_200_OK # Check that we now have two more decks than before: new deck, and source snapshot - assert session.query(Deck).filter(Deck.user_id == user.id).count() == 5 + assert ( + session.execute( + select(db.func.count()).select_from( + select(Deck).where(Deck.user_id == user.id).subquery() + ) + ).scalar() + == 5 + ) def test_clone_deck(client: TestClient, session: db.Session, deck, user_token): """Can clone own deck""" user, token = user_token # Verify that we have three "decks" (original deck, private snapshot, public snapshot) - assert session.query(Deck).filter(Deck.user_id == user.id).count() == 3 + assert ( + session.execute( + select(db.func.count()).select_from( + select(Deck).where(Deck.user_id == user.id).subquery() + ) + ).scalar() + == 3 + ) response = client.get( f"/v2/decks/{deck.id}/clone", headers={"Authorization": f"Bearer {token}"}, ) assert response.status_code == status.HTTP_200_OK # Check that we now have two more decks than before: new deck, and source snapshot - assert session.query(Deck).filter(Deck.user_id == user.id).count() == 5 + assert ( + session.execute( + select(db.func.count()).select_from( + select(Deck).where(Deck.user_id == user.id).subquery() + ) + ).scalar() + == 5 + ) def test_clone_private_shared_deck( @@ -127,7 +163,14 @@ def test_clone_private_shared_deck( ) assert response.status_code == status.HTTP_200_OK, response.json() # One is the new deck object, and the other is the source ID snapshot - assert session.query(Deck).filter(Deck.user_id == user.id).count() == 2 + assert ( + session.execute( + select(db.func.count()).select_from( + select(Deck).where(Deck.user_id == user.id).subquery() + ) + ).scalar() + == 2 + ) def test_clone_deck_red_rains( @@ -144,8 +187,12 @@ def test_clone_deck_red_rains( assert response.status_code == status.HTTP_200_OK # Verify that we have two decks (deck and snapshot) and both are marked as Red Rains decks assert ( - session.query(Deck) - .filter(Deck.user_id == user.id, Deck.is_red_rains.is_(True)) - .count() + session.execute( + select(db.func.count()).select_from( + select(Deck) + .where(Deck.user_id == user.id, Deck.is_red_rains.is_(True)) + .subquery() + ) + ).scalar() == 2 ) diff --git a/api/tests/decks/test_deck_create.py b/api/tests/decks/test_deck_create.py index ccc6b47..b321667 100644 --- a/api/tests/decks/test_deck_create.py +++ b/api/tests/decks/test_deck_create.py @@ -1,6 +1,7 @@ import pytest from fastapi import status from fastapi.testclient import TestClient +from sqlalchemy import select from api import db from api.models import Card, Deck, DeckSelectedCard, Release @@ -141,14 +142,20 @@ def test_put_deck_bad_unique_in_deck( user, token = user_token valid_deck = _valid_deck_dict(session) # Add all PB uniques to the deck (easiest way to ensure we have the wrong one) - pb_uniques_query = session.query(Card.stub).filter( - Card.phoenixborn.isnot(None), - Card.card_type.notin_(CONJURATION_TYPES), + pb_uniques_query = ( + session.execute( + select(Card.stub).where( + Card.phoenixborn.isnot(None), + Card.card_type.notin_(CONJURATION_TYPES), + ) + ) + .scalars() + .all() ) - for unique in pb_uniques_query.all(): + for unique in pb_uniques_query: valid_deck["cards"].append( { - "stub": unique.stub, + "stub": unique, "count": 3, } ) @@ -164,12 +171,9 @@ def test_put_deck_conjuration_in_deck( """Must not allow saving a deck with conjurations in the card list""" user, token = user_token valid_deck = _valid_deck_dict(session) - conjuration_stub = ( - session.query(Card.stub) - .filter(Card.card_type.in_(CONJURATION_TYPES)) - .limit(1) - .scalar() - ) + conjuration_stub = session.execute( + select(Card.stub).where(Card.card_type.in_(CONJURATION_TYPES)).limit(1) + ).scalar() valid_deck["cards"].append( { "stub": conjuration_stub, @@ -246,17 +250,16 @@ def test_put_deck_first_five(client: TestClient, session: db.Session, user_token user, token = user_token valid_deck = _valid_deck_dict(session) valid_stubs = [x["stub"] for x in valid_deck["cards"]] - bad_stub = ( - session.query(Card.stub) - .filter( + bad_stub = session.execute( + select(Card.stub) + .where( Card.phoenixborn.is_(None), Card.card_type.notin_(CONJURATION_TYPES), Card.card_type != "Phoenixborn", Card.stub.notin_(valid_stubs), ) .limit(1) - .scalar() - ) + ).scalar() valid_deck["first_five"] = [valid_stubs[x] for x in range(0, 4)] valid_deck["first_five"].append(bad_stub) response = client.put( @@ -273,17 +276,16 @@ def test_put_deck_effect_costs(client: TestClient, session: db.Session, user_tok user, token = user_token valid_deck = _valid_deck_dict(session) valid_stubs = [x["stub"] for x in valid_deck["cards"]] - bad_stub = ( - session.query(Card.stub) - .filter( + bad_stub = session.execute( + select(Card.stub) + .where( Card.phoenixborn.is_(None), Card.card_type.notin_(CONJURATION_TYPES), Card.card_type != "Phoenixborn", Card.stub.notin_(valid_stubs), ) .limit(1) - .scalar() - ) + ).scalar() valid_deck["first_five"] = [valid_stubs[x] for x in range(0, 5)] valid_deck["effect_costs"] = [valid_stubs[x] for x in range(0, 4)] valid_deck["effect_costs"].append(bad_stub) @@ -301,17 +303,16 @@ def test_put_deck_tutor_map(client: TestClient, session: db.Session, user_token) user, token = user_token valid_deck = _valid_deck_dict(session) valid_stubs = [x["stub"] for x in valid_deck["cards"]] - bad_stub = ( - session.query(Card.stub) - .filter( + bad_stub = session.execute( + select(Card.stub) + .where( Card.phoenixborn.is_(None), Card.card_type.notin_(CONJURATION_TYPES), Card.card_type != "Phoenixborn", Card.stub.notin_(valid_stubs), ) .limit(1) - .scalar() - ) + ).scalar() valid_deck["tutor_map"] = { valid_stubs[0]: valid_stubs[1], bad_stub: valid_stubs[2], @@ -551,7 +552,9 @@ def test_post_snapshot_precon_already_exists(client: TestClient, session: db.Ses admin, token = create_user_token(session) admin.is_admin = True session.commit() - release_id = session.query(Release.id).filter(Release.stub == "expansion").scalar() + release_id = session.execute( + select(Release.id).where(Release.stub == "expansion").limit(1) + ).scalar() deck = create_deck_for_user(session, admin, release_stub="expansion") snapshot = create_snapshot_for_deck( session, admin, deck, is_public=True, preconstructed_release_id=release_id @@ -572,7 +575,7 @@ def test_post_snapshot(client: TestClient, session: db.Session, user_token, deck headers={"Authorization": f"Bearer {token}"}, ) assert response.status_code == status.HTTP_201_CREATED - snapshot = session.query(Deck).order_by(Deck.id.desc()).first() + snapshot = session.execute(select(Deck).order_by(Deck.id.desc()).limit(1)).scalar() assert snapshot.title == deck.title assert snapshot.description == deck.description @@ -589,7 +592,7 @@ def test_post_snapshot_clear_description( headers={"Authorization": f"Bearer {token}"}, ) assert response.status_code == status.HTTP_201_CREATED - snapshot = session.query(Deck).order_by(Deck.id.desc()).first() + snapshot = session.execute(select(Deck).order_by(Deck.id.desc()).limit(1)).scalar() assert snapshot.title == new_title assert snapshot.description is None @@ -606,7 +609,7 @@ def test_post_snapshot_new_description( headers={"Authorization": f"Bearer {token}"}, ) assert response.status_code == status.HTTP_201_CREATED - snapshot = session.query(Deck).order_by(Deck.id.desc()).first() + snapshot = session.execute(select(Deck).order_by(Deck.id.desc()).limit(1)).scalar() assert snapshot.description == new_description @@ -625,9 +628,9 @@ def test_post_snapshot_first_five(client: TestClient, session: db.Session, user_ headers={"Authorization": f"Bearer {token}"}, ) assert response.status_code == status.HTTP_201_CREATED - snapshot = ( - session.query(Deck).order_by(Deck.id.desc(), Deck.is_snapshot.is_(True)).first() - ) + snapshot = session.execute( + select(Deck).where(Deck.is_snapshot == True).order_by(Deck.id.desc()).limit(1) + ).scalar() assert len(snapshot.selected_cards) == 1 @@ -649,7 +652,7 @@ def test_post_snapshot_no_first_five_public( headers={"Authorization": f"Bearer {token}"}, ) assert response.status_code == status.HTTP_201_CREATED - snapshot = ( - session.query(Deck).order_by(Deck.id.desc(), Deck.is_snapshot.is_(True)).first() - ) + snapshot = session.execute( + select(Deck).where(Deck.is_snapshot == True).order_by(Deck.id.desc()).limit(1) + ).scalar() assert len(snapshot.selected_cards) == 0 diff --git a/api/tests/decks/test_deck_delete.py b/api/tests/decks/test_deck_delete.py index 3902d1b..9d3d734 100644 --- a/api/tests/decks/test_deck_delete.py +++ b/api/tests/decks/test_deck_delete.py @@ -1,6 +1,7 @@ import pytest from fastapi import status from fastapi.testclient import TestClient +from sqlalchemy import select from api import db from api.models import Deck, Stream @@ -85,7 +86,9 @@ def test_delete_deck_no_snapshots( f"/v2/decks/{deck.id}", headers={"Authorization": f"Bearer {token}"} ) assert response.status_code == status.HTTP_204_NO_CONTENT - assert session.query(Deck).filter(Deck.id == old_id).first() is None + assert ( + session.execute(select(Deck).where(Deck.id == old_id).limit(1)).scalar() is None + ) def test_delete_public_snapshot( @@ -94,12 +97,12 @@ def test_delete_public_snapshot( """Must properly clean up stream entries when deleting a public snapshot""" user, token = user_token snapshot = create_snapshot_for_deck(session, user, deck, is_public=True) - assert session.query(Stream).count() == 1 + assert session.execute(select(db.func.count()).select_from(Stream)).scalar() == 1 response = client.delete( f"/v2/decks/{snapshot.id}", headers={"Authorization": f"Bearer {token}"} ) assert response.status_code == status.HTTP_204_NO_CONTENT - assert session.query(Stream).count() == 0 + assert session.execute(select(db.func.count()).select_from(Stream)).scalar() == 0 session.refresh(snapshot) assert snapshot.is_deleted == True @@ -111,12 +114,12 @@ def test_delete_latest_public_snapshot( user, token = user_token snapshot1 = create_snapshot_for_deck(session, user, deck, is_public=True) snapshot2 = create_snapshot_for_deck(session, user, deck, is_public=True) - assert session.query(Stream).count() == 1 + assert session.execute(select(db.func.count()).select_from(Stream)).scalar() == 1 response = client.delete( f"/v2/decks/{snapshot2.id}", headers={"Authorization": f"Bearer {token}"} ) assert response.status_code == status.HTTP_204_NO_CONTENT - stream_entry = session.query(Stream).first() + stream_entry = session.execute(select(Stream).limit(1)).scalar() assert stream_entry.entity_id == snapshot1.entity_id session.refresh(snapshot2) assert snapshot2.is_deleted == True @@ -128,12 +131,12 @@ def test_delete_root_deck(client: TestClient, session: db.Session, user_token): deck = create_deck_for_user(session, user) private_snapshot = create_snapshot_for_deck(session, user, deck) public_snapshot = create_snapshot_for_deck(session, user, deck, is_public=True) - assert session.query(Stream).count() == 1 + assert session.execute(select(db.func.count()).select_from(Stream)).scalar() == 1 response = client.delete( f"/v2/decks/{deck.id}", headers={"Authorization": f"Bearer {token}"} ) assert response.status_code == status.HTTP_204_NO_CONTENT - assert session.query(Stream).count() == 0 + assert session.execute(select(db.func.count()).select_from(Stream)).scalar() == 0 session.refresh(deck) session.refresh(private_snapshot) session.refresh(public_snapshot) diff --git a/api/tests/decks/test_deck_import.py b/api/tests/decks/test_deck_import.py index a35ca2d..08d9ee2 100644 --- a/api/tests/decks/test_deck_import.py +++ b/api/tests/decks/test_deck_import.py @@ -12,6 +12,7 @@ import pytest from fastapi import status from fastapi.testclient import TestClient +from sqlalchemy import select from api import db from api.models import Deck @@ -213,7 +214,14 @@ def mock_httpx_post(*args, **kwargs): monkeypatch.setattr(httpx, "post", mock_httpx_post) # Verify no decks exist initially - assert session.query(Deck).filter(Deck.user_id == user.id).count() == 0 + assert ( + session.execute( + select(db.func.count()).select_from( + select(Deck).where(Deck.user_id == user.id).subquery() + ) + ).scalar() + == 0 + ) response = client.get( f"/v2/decks/import/{export_token}", @@ -229,7 +237,9 @@ def mock_httpx_post(*args, **kwargs): assert len(data["errors"]) == 0 # Verify decks were created in database - created_decks = session.query(Deck).filter(Deck.user_id == user.id).all() + created_decks = ( + session.execute(select(Deck).where(Deck.user_id == user.id)).scalars().all() + ) assert len(created_decks) == 2 # Verify deck details @@ -285,7 +295,9 @@ def mock_httpx_post(*args, **kwargs): assert len(data["errors"]) == 0 # Verify deck was updated, not duplicated - user_decks = session.query(Deck).filter(Deck.user_id == user.id).all() + user_decks = ( + session.execute(select(Deck).where(Deck.user_id == user.id)).scalars().all() + ) assert len(user_decks) == 1 updated_deck = user_decks[0] @@ -342,7 +354,9 @@ def mock_httpx_post(*args, **kwargs): assert len(data["errors"]) == 0 # Verify both decks were created - created_decks = session.query(Deck).filter(Deck.user_id == user.id).all() + created_decks = ( + session.execute(select(Deck).where(Deck.user_id == user.id)).scalars().all() + ) assert len(created_decks) == 2 # Find source and snapshot @@ -413,7 +427,9 @@ def mock_httpx_post(*args, **kwargs): assert data["success_count"] == 1 # Verify deck was created with selected cards data - created_deck = session.query(Deck).filter(Deck.user_id == user.id).first() + created_deck = session.execute( + select(Deck).where(Deck.user_id == user.id).limit(1) + ).scalar() assert created_deck is not None # Check selected cards relationships @@ -481,7 +497,9 @@ def mock_httpx_post(*args, **kwargs): assert len(data["errors"]) == 0 # Verify both the deck and its snapshot were imported - created_decks = session.query(Deck).filter(Deck.user_id == user.id).all() + created_decks = ( + session.execute(select(Deck).where(Deck.user_id == user.id)).scalars().all() + ) assert len(created_decks) == 2 source_deck = next((d for d in created_decks if not d.is_snapshot), None) @@ -569,7 +587,9 @@ def mock_httpx_post(*args, **kwargs): assert data["next_page_from_date"] is not None # Should have next page info # Verify only first deck was imported (pagination handled by front-end) - created_decks = session.query(Deck).filter(Deck.user_id == user.id).all() + created_decks = ( + session.execute(select(Deck).where(Deck.user_id == user.id)).scalars().all() + ) assert len(created_decks) == 1 deck_titles = [d.title for d in created_decks] @@ -642,7 +662,9 @@ def mock_httpx_post(*args, **kwargs): assert "missing cards" in data["errors"][0].lower() # Verify no decks were created - created_decks = session.query(Deck).filter(Deck.user_id == user.id).all() + created_decks = ( + session.execute(select(Deck).where(Deck.user_id == user.id)).scalars().all() + ) assert len(created_decks) == 0 @@ -696,7 +718,9 @@ def mock_httpx_post(*args, **kwargs): assert "missing phoenixborn" in data["errors"][0].lower() # Verify no decks were created - created_decks = session.query(Deck).filter(Deck.user_id == user.id).all() + created_decks = ( + session.execute(select(Deck).where(Deck.user_id == user.id)).scalars().all() + ) assert len(created_decks) == 0 @@ -847,7 +871,9 @@ def mock_httpx_post(*args, **kwargs): assert "missing phoenixborn" in data["errors"][0].lower() # Verify only the valid deck was created - created_decks = session.query(Deck).filter(Deck.user_id == user.id).all() + created_decks = ( + session.execute(select(Deck).where(Deck.user_id == user.id)).scalars().all() + ) assert len(created_decks) == 1 assert created_decks[0].title == "Valid Deck" @@ -1150,7 +1176,9 @@ def mock_httpx_post(*args, **kwargs): assert len(data["errors"]) == 0 # Verify deck was created with capped dice counts - created_decks = session.query(Deck).filter(Deck.user_id == user.id).all() + created_decks = ( + session.execute(select(Deck).where(Deck.user_id == user.id)).scalars().all() + ) assert len(created_decks) == 1 created_deck = created_decks[0] @@ -1196,7 +1224,9 @@ def mock_httpx_post(*args, **kwargs): assert data["next_page_from_date"] is None # Verify no decks were created - created_decks = session.query(Deck).filter(Deck.user_id == user.id).all() + created_decks = ( + session.execute(select(Deck).where(Deck.user_id == user.id)).scalars().all() + ) assert len(created_decks) == 0 @@ -1262,7 +1292,9 @@ def mock_httpx_post(*args, **kwargs): assert len(data["errors"]) == 0 # Verify all decks were created - created_decks = session.query(Deck).filter(Deck.user_id == user.id).all() + created_decks = ( + session.execute(select(Deck).where(Deck.user_id == user.id)).scalars().all() + ) assert len(created_decks) == 3 # Find the decks by type diff --git a/api/tests/decks/test_subscriptions.py b/api/tests/decks/test_subscriptions.py index d9b5444..ade6e58 100644 --- a/api/tests/decks/test_subscriptions.py +++ b/api/tests/decks/test_subscriptions.py @@ -1,6 +1,7 @@ import pytest from fastapi import status from fastapi.testclient import TestClient +from sqlalchemy import select from api import db from api.models import Card, Comment, Subscription @@ -24,7 +25,7 @@ def deck1(decks_session, user1): @pytest.fixture def subscription(session, user1): - card = session.query(Card).order_by(Card.id.desc()).first() + card = session.execute(select(Card).order_by(Card.id.desc()).limit(1)).scalar() sub = Subscription( user_id=user1.id, source_entity_id=card.entity_id, @@ -36,7 +37,7 @@ def subscription(session, user1): def test_create_subscription(client: TestClient, session: db.Session, user1): """Verify that creating a card subscription works properly""" - card = session.query(Card).first() + card = session.execute(select(Card).limit(1)).scalar() _, token = create_user_token(session, user=user1) response = client.post( f"/v2/subscription/{card.entity_id}", @@ -108,7 +109,7 @@ def test_create_subscription_existing_subscription( client: TestClient, session: db.Session, user1 ): """Verify creating a subscription that already exists returns truthy""" - card = session.query(Card).first() + card = session.execute(select(Card).limit(1)).scalar() sub = Subscription( user_id=user1.id, source_entity_id=card.entity_id, @@ -127,7 +128,7 @@ def test_create_subscription_last_entity_id( client: TestClient, session: db.Session, user1 ): """Verify subscriptions populate the last seen entity ID properly""" - card = session.query(Card).first() + card = session.execute(select(Card).limit(1)).scalar() # Add a pre-existing comment comment = Comment( entity_id=create_entity(session), @@ -148,14 +149,14 @@ def test_create_subscription_last_entity_id( ) assert response.status_code == status.HTTP_201_CREATED # Verify the last_seen_entity_id matches our previous comment - subscription = ( - session.query(Subscription) - .filter( + subscription = session.execute( + select(Subscription) + .where( Subscription.source_entity_id == card.entity_id, Subscription.user_id == user1.id, ) - .first() - ) + .limit(1) + ).scalar() assert subscription.last_seen_entity_id == comment.entity_id @@ -178,14 +179,14 @@ def test_create_subscription_last_entity_id_snapshot( ) assert response.status_code == status.HTTP_201_CREATED # Check the subscription last_seen_entity_id - subscription = ( - session.query(Subscription) - .filter( + subscription = session.execute( + select(Subscription) + .where( Subscription.source_entity_id == deck1.entity_id, Subscription.user_id == user1.id, ) - .first() - ) + .limit(1) + ).scalar() assert subscription.last_seen_entity_id == snapshot.entity_id @@ -208,12 +209,14 @@ def test_delete_subscription( ) assert response.status_code == status.HTTP_204_NO_CONTENT assert ( - session.query(Subscription) - .filter( - Subscription.source_entity_id == source_entity_id, - Subscription.user_id == user1.id, - ) - .first() + session.execute( + select(Subscription) + .where( + Subscription.source_entity_id == source_entity_id, + Subscription.user_id == user1.id, + ) + .limit(1) + ).scalar() is None ) diff --git a/api/tests/test_auth.py b/api/tests/test_auth.py index ce15974..fa43508 100644 --- a/api/tests/test_auth.py +++ b/api/tests/test_auth.py @@ -5,6 +5,7 @@ from fastapi.testclient import TestClient from freezegun import freeze_time from jose import jwt +from sqlalchemy import select import api.views.players from api import db @@ -87,7 +88,12 @@ def _always_true(*args, **kwargs): fake_email = utils.generate_random_email() response = client.post("/v2/players/new", json={"email": fake_email}) assert response.status_code == status.HTTP_201_CREATED, response.json() - assert session.query(Invite).filter(Invite.email == fake_email).count() == 1 + count = session.execute( + select(db.func.count()).select_from( + select(Invite).where(Invite.email == fake_email).subquery() + ) + ).scalar() + assert count == 1 def test_anonymous_required_authenticated_user(client: TestClient, session: db.Session): @@ -100,7 +106,12 @@ def test_anonymous_required_authenticated_user(client: TestClient, session: db.S headers={"Authorization": f"Bearer {token}"}, ) assert response.status_code == status.HTTP_401_UNAUTHORIZED, response.json() - assert session.query(Invite).filter(Invite.email == fake_email).count() == 0 + count = session.execute( + select(db.func.count()).select_from( + select(Invite).where(Invite.email == fake_email).subquery() + ) + ).scalar() + assert count == 0 def test_login_required(client: TestClient, session: db.Session): @@ -349,12 +360,14 @@ def test_revoke_token(client: TestClient, session: db.Session): # Verify that we added the token to the "revert token" table payload = jwt.decode(token, settings.secret_key, algorithms=["HS256"]) jwt_uuid = uuid.UUID(hex=payload["jti"]) - assert ( - session.query(UserRevokedToken) - .filter(UserRevokedToken.revoked_uuid == jwt_uuid) - .count() - == 1 - ) + count = session.execute( + select(db.func.count()).select_from( + select(UserRevokedToken) + .where(UserRevokedToken.revoked_uuid == jwt_uuid) + .subquery() + ) + ).scalar() + assert count == 1 # Verify that we cannot make further authenticated requests with this token response = client.get( "/v2/players/me", headers={"Authorization": f"Bearer {token}"} @@ -377,7 +390,13 @@ def revoke_token(time): # Revoke a token 2 days ago one_day = now - timedelta(days=2) revoke_token(one_day) - assert session.query(UserRevokedToken).count() == 1 + count = session.execute( + select(db.func.count()).select_from(UserRevokedToken) + ).scalar() + assert count == 1 # Revoke a token now, so that the first token should get purged revoke_token(now) - assert session.query(UserRevokedToken).count() == 1 + count = session.execute( + select(db.func.count()).select_from(UserRevokedToken) + ).scalar() + assert count == 1 diff --git a/api/tests/test_health_check.py b/api/tests/test_health_check.py index 33379a5..481e56d 100644 --- a/api/tests/test_health_check.py +++ b/api/tests/test_health_check.py @@ -17,6 +17,6 @@ def _raise_postgres_error(*args, **kwargs): """Fakes a Postgres connection failure""" raise TimeoutError() - monkeypatch.setattr(sqlalchemy.orm.Session, "query", _raise_postgres_error) + monkeypatch.setattr(sqlalchemy.orm.Session, "execute", _raise_postgres_error) response = client.get("/health-check") assert response.status_code == status.HTTP_503_SERVICE_UNAVAILABLE, response.json() diff --git a/api/tests/test_players.py b/api/tests/test_players.py index effd735..3192be1 100644 --- a/api/tests/test_players.py +++ b/api/tests/test_players.py @@ -2,6 +2,7 @@ from fastapi import status from fastapi.testclient import TestClient +from sqlalchemy import select import api.views.players from api import db @@ -28,13 +29,19 @@ def _always_true(*args, **kwargs): response = client.post("/v2/players/new", json={"email": fake_email}) assert response.status_code == status.HTTP_201_CREATED, response.json() assert ( - session.query(Invite.requests).filter(Invite.email == fake_email).scalar() == 1 + session.execute( + select(Invite.requests).where(Invite.email == fake_email) + ).scalar() + == 1 ) # Request a second time response = client.post("/v2/players/new", json={"email": fake_email}) assert response.status_code == status.HTTP_201_CREATED, response.json() assert ( - session.query(Invite.requests).filter(Invite.email == fake_email).scalar() == 2 + session.execute( + select(Invite.requests).where(Invite.email == fake_email) + ).scalar() + == 2 ) @@ -43,7 +50,12 @@ def test_invite_existing_user(client: TestClient, session: db.Session): user, _ = utils.create_user_token(session) response = client.post("/v2/players/new", json={"email": user.email}) assert response.status_code == status.HTTP_400_BAD_REQUEST, respone.json() - assert session.query(Invite).filter(Invite.email == user.email).count() == 0 + count = session.execute( + select(db.func.count()).select_from( + select(Invite).where(Invite.email == user.email).subquery() + ) + ).scalar() + assert count == 0 def test_invite_smtp_failure(client: TestClient, session: db.Session, monkeypatch): @@ -58,7 +70,12 @@ def _always_false(*args, **kwargs): response = client.post("/v2/players/new", json={"email": fake_email}) assert response.status_code == status.HTTP_400_BAD_REQUEST, response.json() # Email failed, but the invite should still be created - assert session.query(Invite).filter(Invite.email == fake_email).count() == 1 + count = session.execute( + select(db.func.count()).select_from( + select(Invite).where(Invite.email == fake_email).subquery() + ) + ).scalar() + assert count == 1 def test_register_user_different_passwords(client: TestClient, session: db.Session): @@ -72,7 +89,8 @@ def test_register_user_different_passwords(client: TestClient, session: db.Sessi json={"password": password, "password_confirm": password_confirm}, ) assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY, response.json() - assert session.query(User).count() == 0 + count = session.execute(select(db.func.count(User.id))).scalar() + assert count == 0 def test_register_user_invalid_token(client: TestClient, session: db.Session): @@ -84,7 +102,8 @@ def test_register_user_invalid_token(client: TestClient, session: db.Session): json={"username": "test", "password": password, "password_confirm": password}, ) assert response.status_code == status.HTTP_404_NOT_FOUND, response.json() - assert session.query(User).count() == 0 + count = session.execute(select(db.func.count(User.id))).scalar() + assert count == 0 def test_register_user(client: TestClient, session: db.Session): @@ -97,8 +116,18 @@ def test_register_user(client: TestClient, session: db.Session): json={"username": "test", "password": password, "password_confirm": password}, ) assert response.status_code == status.HTTP_201_CREATED, response.json() - assert session.query(Invite).filter(Invite.email == email).count() == 0 - assert session.query(User).filter(User.email == email).count() == 1 + count = session.execute( + select(db.func.count()).select_from( + select(Invite).where(Invite.email == email).subquery() + ) + ).scalar() + assert count == 0 + count = session.execute( + select(db.func.count()).select_from( + select(User).where(User.email == email).subquery() + ) + ).scalar() + assert count == 1 # `/v2/players/me` is tested by the default auth dependency checks in `test_auth.py` diff --git a/api/tests/test_releases.py b/api/tests/test_releases.py index afaed0e..65d0ea3 100644 --- a/api/tests/test_releases.py +++ b/api/tests/test_releases.py @@ -1,5 +1,6 @@ from fastapi import status from fastapi.testclient import TestClient +from sqlalchemy import select from api import db from api.models import Release, UserRelease @@ -87,7 +88,12 @@ def test_put_releases(client: TestClient, session: db.Session): session.commit() user, token = create_user_token(session) assert ( - session.query(UserRelease).filter(UserRelease.user_id == user.id).count() == 0 + session.execute( + select(db.func.count()).select_from( + select(UserRelease).where(UserRelease.user_id == user.id).subquery() + ) + ).scalar() + == 0 ) response = client.put( "/v2/releases/mine", diff --git a/api/utils/pagination.py b/api/utils/pagination.py index 22f0986..8a97584 100644 --- a/api/utils/pagination.py +++ b/api/utils/pagination.py @@ -2,7 +2,9 @@ import urllib.parse from typing import Any +from sqlalchemy import select from sqlalchemy.orm import Query +from sqlalchemy.sql import Select from api import db from api.environment import settings @@ -27,14 +29,32 @@ def replace_offset(url: str, offset: int) -> str: def paginated_results_for_query( - query: Query, + session: db.Session, + stmt: Select, paging: PaginationOptions, url: str, ) -> dict: """Generic pagination results output""" # Fetch count and actual query data - total_rows = query.count() - rows = query.limit(paging.limit).offset(paging.offset).all() + count_stmt = select(db.func.count()).select_from(stmt.subquery()) + total_rows = session.execute(count_stmt).scalar() + stmt = stmt.limit(paging.limit).offset(paging.offset) + # Check if this is a single column select of scalars vs ORM objects + is_orm_query = ( + len(stmt.column_descriptions) == 1 + and inspect.isclass(stmt.column_descriptions[0]["type"]) + and issubclass(stmt.column_descriptions[0]["type"], db.AlchemyBase) + ) + + if is_orm_query: + rows = session.execute(stmt).scalars().all() + row_list = rows + else: + rows = session.execute(stmt).all() + if len(stmt.column_descriptions) == 1: + row_list = [x[0] for x in rows] + else: + row_list = rows # Construct our next and previous links previous_url = None @@ -48,15 +68,6 @@ def paginated_results_for_query( next_offset = paging.offset + paging.limit if next_offset < total_rows: next_url = replace_offset(url, next_offset) - - # Construct our result rows and return - if len(query.column_descriptions) == 1 and ( - not inspect.isclass(query.column_descriptions[0]["type"]) - or not issubclass(query.column_descriptions[0]["type"], db.AlchemyBase) - ): - row_list = [x[0] for x in rows] - else: - row_list = rows return { "count": total_rows, "next": next_url, diff --git a/api/views/auth.py b/api/views/auth.py index df4d0c9..ba03e57 100644 --- a/api/views/auth.py +++ b/api/views/auth.py @@ -5,6 +5,7 @@ from fastapi import APIRouter, Depends from fastapi.security import OAuth2PasswordRequestForm from pydantic import UUID4 +from sqlalchemy import delete, select from api import db from api.depends import ( @@ -54,7 +55,8 @@ def log_in( (defaults to one year before expiring). """ email = form_data.username.lower() - user = session.query(User).filter(User.email == email).first() + stmt = select(User).where(User.email == email) + user = session.execute(stmt).scalar_one_or_none() if not user or not verify_password(form_data.password, user.password): raise CredentialsException( detail="Incorrect username or password", @@ -82,9 +84,10 @@ def log_out( long-lived. """ # Do some quick clean-up to keep our table lean and mean; deletes any tokens that expired more than 24 hours ago - session.query(UserRevokedToken).filter( + delete_stmt = delete(UserRevokedToken).where( UserRevokedToken.expires < utcnow() - dt.timedelta(days=1) - ).delete(synchronize_session=False) + ) + session.execute(delete_stmt) session.commit() # Then add our newly revoked token expires_at = dt.datetime.fromtimestamp(jwt_payload["exp"], tz=dt.timezone.utc) @@ -114,7 +117,8 @@ def request_password_reset( ): """Request a reset password link for the given email.""" email = data.email.lower() - user: User = session.query(User).filter(User.email == email).first() + stmt = select(User).where(User.email == email) + user: User = session.execute(stmt).scalar_one_or_none() if not user: raise NotFoundException(detail="No account found for email.") if user.is_banned: @@ -150,7 +154,8 @@ def reset_password( _=Depends(anonymous_required), ): """Reset the password for account associated with the given reset token.""" - user = session.query(User).filter(User.reset_uuid == token).first() + stmt = select(User).where(User.reset_uuid == token) + user = session.execute(stmt).scalar_one_or_none() if user is None: raise NotFoundException( detail="Token not found. Please request a new password reset." diff --git a/api/views/cards.py b/api/views/cards.py index c19db73..754e2e4 100644 --- a/api/views/cards.py +++ b/api/views/cards.py @@ -1,6 +1,7 @@ from copy import deepcopy from fastapi import APIRouter, Depends, Query, Request, status +from sqlalchemy import select from sqlalchemy.exc import IntegrityError from api import db @@ -92,18 +93,16 @@ def list_cards( * `include_uniques_for`: if set to a Phoenixborn name, listing will also include uniques belonging to the given Phoenixborn (only applicable to deckbuilder mode) """ - # First build our base query - query = ( - session.query(Card.json).join(Card.release).filter(Release.is_public.is_(True)) - ) + # First build our base statement + stmt = select(Card.json).join(Card.release).where(Release.is_public.is_(True)) # Only include legacy cards, if we're in legacy mode if show_legacy: - query = query.filter(Card.is_legacy.is_(True)) + stmt = stmt.where(Card.is_legacy.is_(True)) else: - query = query.filter(Card.is_legacy.is_(False)) + stmt = stmt.where(Card.is_legacy.is_(False)) # Add a search term, if we're using one if q and q.strip(): - query = query.filter( + stmt = stmt.where( db.func.to_tsvector("english", Card.search_text).match( to_prefixed_tsquery(q) ) @@ -117,28 +116,28 @@ def list_cards( card_types.add("Conjured Alteration Spell") else: card_types.add(card_type.replace("_", " ").title()) - query = query.filter(Card.card_type.in_(card_types)) + stmt = stmt.where(Card.card_type.in_(card_types)) # Exclude some types if we're in deckbuilder mode if mode is CardsFilterListingMode.deckbuilder: - query = query.filter( + stmt = stmt.where( Card.card_type.notin_( ("Phoenixborn", "Conjuration", "Conjured Alteration Spell") ) ) # Check if we're filtering by "Summon" cards if show_summons: - query = query.filter(Card.is_summon_spell.is_(True)) + stmt = stmt.where(Card.is_summon_spell.is_(True)) # Filter by releases, if requested if releases or r: if show_legacy and releases is CardsFilterRelease.phg: - query = query.filter(Release.is_phg.is_(True)) + stmt = stmt.where(Release.is_phg.is_(True)) elif releases is CardsFilterRelease.mine and not current_user.is_anonymous(): - my_release_subquery = session.query(UserRelease.release_id).filter( + my_release_subquery = select(UserRelease.release_id).where( UserRelease.user_id == current_user.id ) - query = query.filter(Card.release_id.in_(my_release_subquery)) + stmt = stmt.where(Card.release_id.in_(my_release_subquery)) elif r: - query = query.filter(Release.stub.in_(r)) + stmt = stmt.where(Release.stub.in_(r)) # Filter against required dice costs if dice: dice_set = set(dice) @@ -223,26 +222,26 @@ def list_cards( # It's possible, though unlikely, to not have filters here if they passed a bad dice listing # (e.g. passed "basic" for the dice color and "includes" for the logic) if dice_filters: - query = query.filter(db.or_(*dice_filters)) + stmt = stmt.where(db.or_(*dice_filters)) # Only Include Phoenixborn uniques for the given Phoenixborn (or no Phoenixborn, in deckbuilder) if include_uniques_for: - query = query.filter( + stmt = stmt.where( db.or_( Card.phoenixborn.is_(None), Card.phoenixborn == include_uniques_for, ) ) elif mode is CardsFilterListingMode.deckbuilder: - query = query.filter( + stmt = stmt.where( Card.phoenixborn.is_(None), ) if sort == CardsSortingMode.type_: # This uses a similar ordering to how the front-end organizes cards in deck listings - query = query.order_by( + stmt = stmt.order_by( getattr(Card.type_weight, order)(), getattr(Card.name, order)() ) elif sort == CardsSortingMode.cost: - query = query.order_by( + stmt = stmt.order_by( getattr(Card.cost_weight, order)(), getattr(Card.name, order)() ) elif sort == CardsSortingMode.dice: @@ -251,7 +250,7 @@ def list_cards( # then by their relative cost, and finally falling back on name. The latter two are simple, # but to order by dice types we need to first bitwise OR dice and alt_dice (so we get a # number representing all possible dice types you could spend), then order by that - query = query.order_by( + stmt = stmt.order_by( getattr(Card.dice_weight, order)(), getattr(Card.cost_weight, order)(), getattr(Card.name, order)(), @@ -262,16 +261,17 @@ def list_cards( # those cards out by preconstructed deck, because there's not an easy join strategy to # fetch that data; I'd have to denormalize it into the cards. Will consider if people # request it) - query = query.order_by( + stmt = stmt.order_by( getattr(Release.id, order)(), getattr(Card.type_weight, order)(), getattr(Card.name, order)(), ) else: # Defaults to ordering by name - query = query.order_by(getattr(Card.name, order)()) + stmt = stmt.order_by(getattr(Card.name, order)()) return paginated_results_for_query( - query=query, + session=session, + stmt=stmt, paging=paging, url=str(request.url), ) @@ -298,22 +298,23 @@ def get_card_fuzzy_lookup( # Make sure we have a search term if not q or not q.strip(): raise APIException(detail="Query string is required.") - query = session.query(Card).join(Card.release).filter(Release.is_public.is_(True)) + stmt = select(Card).join(Card.release).where(Release.is_public.is_(True)) if show_legacy: - query = query.filter(Card.is_legacy.is_(True)) + stmt = stmt.where(Card.is_legacy.is_(True)) else: - query = query.filter(Card.is_legacy.is_(False)) + stmt = stmt.where(Card.is_legacy.is_(False)) stub_search = stubify(q) search_vector = db.func.to_tsvector("english", Card.search_text) prefixed_query = to_prefixed_tsquery(q) - query = query.filter( + stmt = stmt.where( db.or_( search_vector.match(prefixed_query), Card.stub.like(f"%{stub_search}%"), ) ) # Order by search ranking - possible_cards = query.order_by(Card.name.asc()).all() + stmt = stmt.order_by(Card.name.asc()) + possible_cards = session.execute(stmt).scalars().all() if not possible_cards: raise NotFoundException(detail="No matching cards found.") ranks_with_matches = [] @@ -365,14 +366,14 @@ def get_card( `for_update=true` will only work for admins. """ - query = session.query(Card).filter(Card.stub == stub) + stmt = select(Card).where(Card.stub == stub) if show_legacy: - query = query.filter(Card.is_legacy.is_(True)) + stmt = stmt.where(Card.is_legacy.is_(True)) else: - query = query.filter(Card.is_legacy.is_(False)) + stmt = stmt.where(Card.is_legacy.is_(False)) if user.is_anonymous() or not user.is_admin: - query = query.join(Card.release).filter(Release.is_public == True) - card = query.scalar() + stmt = stmt.join(Card.release).where(Release.is_public == True) + card = session.execute(stmt).scalar() if not card: raise NotFoundException(detail="Card not found.") card_json = deepcopy(card.json) @@ -426,8 +427,8 @@ def update_card( need to modify the database directly (and remember that the stub is stored in two places! The database column, and within the card JSON). """ - query = session.query(Card).filter(Card.stub == stub, Card.is_legacy.is_(False)) - card = query.scalar() + stmt = select(Card).where(Card.stub == stub, Card.is_legacy.is_(False)) + card = session.execute(stmt).scalar() if not card: raise NotFoundException(detail="Card not found.") if data.is_errata: @@ -526,17 +527,17 @@ def get_card_details( current_user: "UserType" = Depends(get_current_user), ): """Returns the full details about the card for use on the card details page""" - card = ( - session.query(Card) + stmt = ( + select(Card) .join(Card.release) .options(db.contains_eager(Card.release)) - .filter( + .where( Card.stub == stub, Card.is_legacy.is_(show_legacy), Release.is_public == True, ) - .scalar() ) + card = session.execute(stmt).scalar() if not card: raise NotFoundException(detail="Card not found.") @@ -550,28 +551,25 @@ def get_card_details( if card.phoenixborn or card.card_type == "Phoenixborn": # Grab all cards related to this Phoenixborn if card.phoenixborn: - phoenixborn = ( - session.query(Card) - .filter( - Card.name == card.phoenixborn, - Card.card_type == "Phoenixborn", - Card.is_legacy.is_(show_legacy), - ) - .first() + stmt = select(Card).where( + Card.name == card.phoenixborn, + Card.card_type == "Phoenixborn", + Card.is_legacy.is_(show_legacy), ) + phoenixborn = session.execute(stmt).scalar_one_or_none() else: phoenixborn = card phoenixborn_conjurations = gather_conjurations(phoenixborn) - phoenixborn_uniques = ( - session.query(Card) - .filter( + stmt = ( + select(Card) + .where( Card.phoenixborn == phoenixborn.name, Card.card_type.notin_(("Conjuration", "Conjured Alteration Spell")), Card.is_legacy.is_(show_legacy), ) .order_by(Card.id.asc()) - .all() ) + phoenixborn_uniques = session.execute(stmt).scalars().all() related_cards["phoenixborn"] = _card_to_minimal_card(phoenixborn) if phoenixborn_conjurations: related_cards["phoenixborn_conjurations"] = [ @@ -639,30 +637,29 @@ def get_card_details( root_card_ids = [card.id] # We only look up the Phoenixborn if it's in our root summons array (otherwise we might be # looking at a Phoenixborn unique, and we'll get accurate counts for it in the next query) - phoenixborn_counts = ( - session.query( + if phoenixborn and phoenixborn.id in root_card_ids: + stmt = select( db.func.count(Deck.id).label("decks"), db.func.count(db.func.distinct(Deck.user_id)).label("users"), + ).where(Deck.phoenixborn_id == phoenixborn.id, Deck.is_snapshot.is_(False)) + phoenixborn_counts = session.execute(stmt).first() + else: + phoenixborn_counts = None + if root_card_ids: + stmt = ( + select( + db.func.count(DeckCard.deck_id).label("decks"), + db.func.count(db.func.distinct(Deck.user_id)).label("users"), + ) + .join(Deck, Deck.id == DeckCard.deck_id) + .where( + DeckCard.card_id.in_(root_card_ids), + Deck.is_snapshot.is_(False), + ) ) - .filter(Deck.phoenixborn_id == phoenixborn.id, Deck.is_snapshot.is_(False)) - .first() - if phoenixborn and phoenixborn.id in root_card_ids - else None - ) - card_counts = ( - session.query( - db.func.count(DeckCard.deck_id).label("decks"), - db.func.count(db.func.distinct(Deck.user_id)).label("users"), - ) - .join(Deck, Deck.id == DeckCard.deck_id) - .filter( - DeckCard.card_id.in_(root_card_ids), - Deck.is_snapshot.is_(False), - ) - .first() - if root_card_ids - else None - ) + card_counts = session.execute(stmt).first() + else: + card_counts = None counts = {"decks": 0, "users": 0} if phoenixborn_counts: counts["decks"] += phoenixborn_counts.decks @@ -672,30 +669,27 @@ def get_card_details( counts["users"] += card_counts.users # Grab preconstructed deck, if available - preconstructed = ( - session.query(Deck.source_id, Deck.title) + stmt = ( + select(Deck.source_id, Deck.title) .join(DeckCard, DeckCard.deck_id == Deck.id) - .filter( + .where( Deck.is_snapshot.is_(True), Deck.is_public.is_(True), Deck.is_preconstructed.is_(True), Deck.is_legacy.is_(show_legacy), DeckCard.card_id.in_(root_card_ids), ) - .first() ) + preconstructed = session.execute(stmt).first() # Grab the last seen entity ID, if the user is logged in and has a subscription last_seen_entity_id = None if not current_user.is_anonymous(): - last_seen_entity_id = ( - session.query(Subscription.last_seen_entity_id) - .filter( - Subscription.user_id == current_user.id, - Subscription.source_entity_id == card.entity_id, - ) - .scalar() + stmt = select(Subscription.last_seen_entity_id).where( + Subscription.user_id == current_user.id, + Subscription.source_entity_id == card.entity_id, ) + last_seen_entity_id = session.execute(stmt).scalar() return { "card": card.json, @@ -739,13 +733,11 @@ def create_card( """ # Implicitly create the release, if necessary release_stub = stubify(data.release) - if not ( - release := ( - session.query(Release) - .filter(Release.stub == release_stub, Release.is_legacy.is_(False)) - .one_or_none() - ) - ): + stmt = select(Release).where( + Release.stub == release_stub, Release.is_legacy.is_(False) + ) + release = session.execute(stmt).scalar_one_or_none() + if not release: release = Release(name=data.release, stub=release_stub) session.add(release) session.commit() diff --git a/api/views/comments.py b/api/views/comments.py index 179a704..ff200c8 100644 --- a/api/views/comments.py +++ b/api/views/comments.py @@ -1,4 +1,5 @@ from fastapi import APIRouter, Depends, Request, Response, status +from sqlalchemy import select from api import db from api.depends import ( @@ -59,14 +60,15 @@ def get_comments( By default, comments are ordered from oldest to newest by created date. You can pass the `order` query parameter to use a different sorting order. """ - query = ( - session.query(Comment) + stmt = ( + select(Comment) .options(db.joinedload(Comment.user)) - .filter(Comment.source_entity_id == entity_id) + .where(Comment.source_entity_id == entity_id) .order_by(getattr(Comment.created, order)()) ) page_results = paginated_results_for_query( - query=query, + session=session, + stmt=stmt, paging=paging, url=str(request.url), ) @@ -102,10 +104,12 @@ def create_comment( ): """Post a comment to a resource on the site.""" # First, figure out what our entity ID is pointing to - source = session.query(Card).filter(Card.entity_id == entity_id).first() + stmt = select(Card).where(Card.entity_id == entity_id) + source = session.execute(stmt).scalar_one_or_none() source_type = "card" if not source: - source = session.query(Deck).filter(Deck.entity_id == entity_id).first() + stmt = select(Deck).where(Deck.entity_id == entity_id) + source = session.execute(stmt).scalar_one_or_none() source_type = "deck" if source is None: raise NotFoundException(detail="No valid resource found to comment on.") @@ -127,13 +131,13 @@ def create_comment( except AttributeError: # Decks don't have this attribute, so we can ignore it source_version = None - previous_ordering_increment = ( - session.query(Comment.ordering_increment) - .filter(Comment.source_entity_id == entity_id) + stmt = ( + select(Comment.ordering_increment) + .where(Comment.source_entity_id == entity_id) .order_by(Comment.created.desc()) .limit(1) - .scalar() ) + previous_ordering_increment = session.execute(stmt).scalar() if not previous_ordering_increment: previous_ordering_increment = 0 # Create our comment and update the stream and the existing user subscription @@ -190,9 +194,8 @@ def edit_comment( **Admin-only:** the `moderation_notes` field is required when modifying another user's comment; should contain a short description of the reason the comment is being moderated. """ - comment = ( - session.query(Comment).filter(Comment.entity_id == comment_entity_id).first() - ) + stmt = select(Comment).where(Comment.entity_id == comment_entity_id) + comment = session.execute(stmt).scalar_one_or_none() if not comment: raise NotFoundException(detail="No such comment found.") if comment.is_deleted: @@ -246,9 +249,8 @@ def delete_comment( **Admin-only:** the `moderation_notes` field is required when modifying another user's comment; should contain a short description of the reason the comment is being moderated. """ - comment = ( - session.query(Comment).filter(Comment.entity_id == comment_entity_id).first() - ) + stmt = select(Comment).where(Comment.entity_id == comment_entity_id) + comment = session.execute(stmt).scalar_one_or_none() if not comment: raise NotFoundException(detail="No such comment found.") success_response = Response(status_code=status.HTTP_204_NO_CONTENT) diff --git a/api/views/decks.py b/api/views/decks.py index 0fae085..09832fc 100644 --- a/api/views/decks.py +++ b/api/views/decks.py @@ -5,7 +5,7 @@ import httpx from fastapi import APIRouter, Depends, Query, Request, Response, status from pydantic import UUID4 -from sqlalchemy import and_, or_, select +from sqlalchemy import and_, delete, or_, select, update from api import db from api.depends import ( @@ -59,7 +59,7 @@ deck_to_dict, generate_deck_dict, get_conjuration_mapping, - get_decks_query, + get_decks_stmt, paginate_deck_listing, ) from api.services.stream import create_entity @@ -93,8 +93,9 @@ def list_published_decks( * `show_legacy` (default: false): if true, legacy 1.0 decks will be returned * `show_red_rains` (default: false): if true, only Red Rains decks will be returned """ - query = get_decks_query( - session, + # For now, keep using get_decks_query but need to handle the session parameter + # This will be addressed when updating views/decks.py fully + stmt = get_decks_stmt( show_legacy=filters.show_legacy, show_red_rains=filters.show_red_rains, is_public=True, @@ -105,7 +106,7 @@ def list_published_decks( players=filters.player, show_preconstructed=filters.show_preconstructed, ) - return paginate_deck_listing(query, session, request, paging) + return paginate_deck_listing(stmt, session, request, paging) @router.get( @@ -135,8 +136,7 @@ def list_my_decks( * `show_legacy` (default: false): if true, legacy 1.0 decks will be returned * `show_red_rains` (default: false): if true, only Red Rains decks will be returned """ - query = get_decks_query( - session, + stmt = get_decks_stmt( show_legacy=filters.show_legacy, show_red_rains=filters.show_red_rains, is_public=False, @@ -146,7 +146,7 @@ def list_my_decks( cards=filters.card, players=[current_user.badge], ) - return paginate_deck_listing(query, session, request, paging) + return paginate_deck_listing(stmt, session, request, paging) @router.get( @@ -169,11 +169,10 @@ def get_private_deck( primarily intended for loading a deck into an external application such as TableTop Simulator or Ashteki, but can also be used to privately share access to a deck with another user. """ - deck = ( - session.query(Deck) - .filter(Deck.direct_share_uuid == direct_share_uuid, Deck.is_deleted.is_(False)) - .first() + stmt = select(Deck).where( + Deck.direct_share_uuid == direct_share_uuid, Deck.is_deleted.is_(False) ) + deck = session.execute(stmt).scalar_one_or_none() if not deck: raise NotFoundException( detail="No such deck; it might have been deleted, or your share ID might be wrong." @@ -246,17 +245,17 @@ def get_deck( deck = source_deck # By default, re-route to the latest public snapshot else: - deck: Deck = ( - session.query(Deck) - .filter( + stmt = ( + select(Deck) + .where( Deck.source_id == source_deck.id, Deck.is_snapshot.is_(True), Deck.is_public.is_(True), Deck.is_deleted.is_(False), ) .order_by(Deck.created.desc()) - .first() ) + deck: Deck = session.execute(stmt).scalar_one_or_none() if not deck: raise NotFoundException(detail="Deck not found.") @@ -291,11 +290,11 @@ def get_deck( } for die in deck_dict["dice"]: release_stubs.add(dice_to_release[die["name"]]) - release_results = ( - session.query(Release, Deck) + stmt = ( + select(Release, Deck) .outerjoin(Card, Card.release_id == Release.id) .outerjoin(Deck, Deck.preconstructed_release == Release.id) - .filter( + .where( db.or_( Release.stub.in_(release_stubs), Card.stub.in_(card_stubs), @@ -304,8 +303,8 @@ def get_deck( ) .order_by(Release.id.asc()) .distinct(Release.id) - .all() ) + release_results = session.execute(stmt).all() release_data = [] for result in release_results: release_data.append( @@ -330,34 +329,29 @@ def get_deck( source_id = ( source_deck.id if not source_deck.is_snapshot else source_deck.source_id ) - deck_details["has_published_snapshot"] = bool( - session.query(Deck.id) - .filter( - Deck.source_id == source_id, - Deck.is_snapshot.is_(True), - Deck.is_public.is_(True), - Deck.is_deleted.is_(False), - ) - .count() + stmt = select(Deck.id).where( + Deck.source_id == source_id, + Deck.is_snapshot.is_(True), + Deck.is_public.is_(True), + Deck.is_deleted.is_(False), ) + count = session.execute( + select(db.func.count()).select_from(stmt.subquery()) + ).scalar() + deck_details["has_published_snapshot"] = bool(count) # If the user is subscribed to this deck, note their last seen entity ID for this deck if not current_user.is_anonymous(): - deck_source_entity_id = ( - deck.entity_id - if not deck.is_snapshot - else session.query(Deck.entity_id) - .filter(Deck.id == deck.source_id) - .scalar() - ) - subscription = ( - session.query(Subscription) - .filter( - Subscription.user_id == current_user.id, - Subscription.source_entity_id == deck_source_entity_id, - ) - .first() + if not deck.is_snapshot: + deck_source_entity_id = deck.entity_id + else: + stmt = select(Deck.entity_id).where(Deck.id == deck.source_id) + deck_source_entity_id = session.execute(stmt).scalar() + stmt = select(Subscription).where( + Subscription.user_id == current_user.id, + Subscription.source_entity_id == deck_source_entity_id, ) + subscription = session.execute(stmt).scalar_one_or_none() if subscription: deck_details["last_seen_entity_id"] = subscription.last_seen_entity_id @@ -387,13 +381,10 @@ def save_deck( """ # Verify that the user has access to this deck, if we're saving over an existing deck if data.id: - deck_check: Deck = ( - session.query( - Deck.user_id, Deck.is_legacy, Deck.is_snapshot, Deck.is_deleted - ) - .filter(Deck.id == data.id) - .first() - ) + stmt = select( + Deck.user_id, Deck.is_legacy, Deck.is_snapshot, Deck.is_deleted + ).where(Deck.id == data.id) + deck_check = session.execute(stmt).first() if not deck_check or deck_check.user_id != current_user.id: raise NoUserAccessException(detail="You cannot save a deck you do not own.") if deck_check.is_legacy: @@ -408,11 +399,10 @@ def save_deck( if isinstance(data.phoenixborn, str) else data.phoenixborn.get("stub") ) - phoenixborn = ( - session.query(Card.id, Card.name) - .filter(Card.stub == phoenixborn_stub, Card.is_legacy.is_(False)) - .first() + stmt = select(Card.id, Card.name).where( + Card.stub == phoenixborn_stub, Card.is_legacy.is_(False) ) + phoenixborn = session.execute(stmt).first() if not phoenixborn: raise APIException(detail="Valid Phoenixborn is required.") try: @@ -519,17 +509,17 @@ def create_snapshot( raise APIException( detail="Only public decks may be associated with a preconstructed deck." ) - preconstructed_release_id = ( - session.query(Release.id) + stmt = ( + select(Release.id) .outerjoin(Deck, Deck.preconstructed_release == Release.id) - .filter( + .where( Release.stub == data.preconstructed_release, Release.is_legacy.is_(False), Release.is_public.is_(True), Deck.id.is_(None), ) - .scalar() ) + preconstructed_release_id = session.execute(stmt).scalar() if not preconstructed_release_id: raise APIException( detail="No such release, or release already has a preconstructed deck." @@ -587,7 +577,7 @@ def list_snapshots( source_deck: Deck = session.get(Deck, deck_id) if not source_deck or source_deck.is_deleted or source_deck.is_snapshot: raise NotFoundException(detail="Deck not found.") - query = session.query(Deck).filter( + stmt = select(Deck).where( Deck.is_deleted.is_(False), Deck.is_snapshot.is_(True), Deck.source_id == source_deck.id, @@ -597,12 +587,12 @@ def list_snapshots( or current_user.id != source_deck.user_id or show_public_only is True ): - query = query.filter(Deck.is_public.is_(True)) - query = query.options(db.joinedload(Deck.user)).order_by( + stmt = stmt.where(Deck.is_public.is_(True)) + stmt = stmt.options(db.joinedload(Deck.user)).order_by( getattr(Deck.created, order)() ) return paginate_deck_listing( - query, + stmt, session, request, paging, @@ -645,18 +635,23 @@ def delete_deck( # Check if we have any snapshots for source decks, and just delete that sucker for real if not if ( not deck.is_snapshot - and session.query(Deck).filter(Deck.source_id == deck.id).count() == 0 + and session.execute( + select(db.func.count()).select_from( + select(Deck).where(Deck.source_id == deck.id).subquery() + ) + ).scalar() + == 0 ): - session.query(DeckCard).filter(DeckCard.deck_id == deck.id).delete( - synchronize_session=False - ) - session.query(DeckDie).filter(DeckDie.deck_id == deck_id).delete( - synchronize_session=False - ) - session.query(DeckSelectedCard).filter( + delete_stmt = delete(DeckCard).where(DeckCard.deck_id == deck.id) + session.execute(delete_stmt) + delete_stmt = delete(DeckDie).where(DeckDie.deck_id == deck_id) + session.execute(delete_stmt) + delete_stmt = delete(DeckSelectedCard).where( DeckSelectedCard.deck_id == deck_id - ).delete(synchronize_session=False) - session.query(Deck).filter(Deck.id == deck_id).delete(synchronize_session=False) + ) + session.execute(delete_stmt) + delete_stmt = delete(Deck).where(Deck.id == deck_id) + session.execute(delete_stmt) session.commit() return success_response @@ -666,28 +661,25 @@ def delete_deck( # of the source deck's snapshots alone). if deck.is_snapshot and deck.is_public: # Check to see if we have a Stream entry that needs updating - stream_entry: Stream = ( - session.query(Stream) - .filter( - Stream.source_entity_id == deck.source.entity_id, - Stream.entity_type == "deck", - Stream.entity_id == deck.entity_id, - ) - .first() + stmt = select(Stream).where( + Stream.source_entity_id == deck.source.entity_id, + Stream.entity_type == "deck", + Stream.entity_id == deck.entity_id, ) + stream_entry: Stream = session.execute(stmt).scalar_one_or_none() if stream_entry: # We have a stream entry pointed to this snapshot, so check if we have an older snapshot # that we can swap in - previous_snapshot: Deck = ( - session.query(Deck) - .filter( + stmt = ( + select(Deck) + .where( Deck.source_id == deck.source_id, Deck.created < deck.created, Deck.is_deleted.is_(False), ) .order_by(Deck.created.desc()) - .first() ) + previous_snapshot: Deck = session.execute(stmt).scalar_one_or_none() if previous_snapshot: stream_entry.entity_id = previous_snapshot.entity_id stream_entry.posted = previous_snapshot.created @@ -697,15 +689,21 @@ def delete_deck( session.delete(stream_entry) elif not deck.is_snapshot: # If we're not deleting a snapshot, then we need to completely clear out the Stream entry - session.query(Stream).filter( + delete_stmt = delete(Stream).where( Stream.source_entity_id == deck.entity_id, Stream.entity_type == "deck" - ).delete(synchronize_session=False) + ) + session.execute(delete_stmt) # And mark all snapshots as deleted - session.query(Deck).filter( - Deck.source_id == deck.id, - Deck.is_snapshot.is_(True), - Deck.is_deleted.is_(False), - ).update({"is_deleted": True}, synchronize_session=False) + update_stmt = ( + update(Deck) + .where( + Deck.source_id == deck.id, + Deck.is_snapshot.is_(True), + Deck.is_deleted.is_(False), + ) + .values(is_deleted=True) + ) + session.execute(update_stmt) # Commit any pending changes, and return success session.commit() return success_response @@ -763,22 +761,23 @@ def clone_deck( Deck.is_legacy.is_(False), Deck.is_deleted.is_(False), ) - deck = session.query(Deck.id).filter(*valid_deck_filters).first() + stmt = select(Deck.id).where(*valid_deck_filters) + deck = session.execute(stmt).first() if not deck: raise NotFoundException(detail="Invalid ID for cloning.") # Then we grab a new entity_id first because it causes a commit and kills the process otherwise entity_id = create_entity(session) # Then we can finally grab our full deck and copy it - deck = ( - session.query(Deck) + stmt = ( + select(Deck) .options( db.joinedload(Deck.cards), db.joinedload(Deck.dice), db.joinedload(Deck.selected_cards), ) - .filter(*valid_deck_filters) - .first() + .where(*valid_deck_filters) ) + deck = session.execute(stmt).unique().scalar_one_or_none() # Create a clone of our deck object (transient cloning was too error-prone, so we're doing everything by hand) cloned_deck = Deck( entity_id=entity_id, @@ -960,27 +959,27 @@ def import_decks( card_stubs.add(rendered.phoenixborn.stub) created_dates.add(rendered.created) rendered_decks.append(rendered) - card_stub_to_id: dict[str, int] = { - x[0]: x[1] - for x in session.query(Card.stub, Card.id) + stmt = ( + select(Card.stub, Card.id) .join(Card.release) - .filter( + .where( Card.stub.in_(card_stubs), Card.is_legacy == False, Release.is_public == True, ) - .all() - } - created_to_deck: dict[datetime, Deck] = { - x.created: x - for x in session.query(Deck) + ) + card_stub_to_id: dict[str, int] = {x[0]: x[1] for x in session.execute(stmt).all()} + stmt = ( + select(Deck) .options( db.joinedload(Deck.cards), db.joinedload(Deck.dice), db.joinedload(Deck.selected_cards), ) - .filter(Deck.created.in_(created_dates), Deck.user_id == current_user.id) - .all() + .where(Deck.created.in_(created_dates), Deck.user_id == current_user.id) + ) + created_to_deck: dict[datetime, Deck] = { + x.created: x for x in session.execute(stmt).scalars().unique().all() } successfully_imported_created_dates = set() errors = [] @@ -1109,14 +1108,11 @@ def import_decks( errors.append(str(e)) # Now that we have imported everything, it's time to see if we can map source IDs - for row in ( - session.query(Deck.created, Deck.id) - .filter( - Deck.created.in_(source_created_to_deck.keys()), - Deck.user_id == current_user.id, - ) - .all() - ): + stmt = select(Deck.created, Deck.id).where( + Deck.created.in_(source_created_to_deck.keys()), + Deck.user_id == current_user.id, + ) + for row in session.execute(stmt).all(): source_created = row[0] source_id = row[1] snapshot_decks = source_created_to_deck[source_created] @@ -1183,33 +1179,28 @@ def export_decks( """ if not settings.allow_exports: raise APIException(detail="Deck exports are not allowed from this site.") - deck_user = ( - session.query(User).filter(User.deck_export_uuid == export_token).first() - ) + stmt = select(User).where(User.deck_export_uuid == export_token) + deck_user = session.execute(stmt).scalar_one_or_none() if not deck_user: raise NotFoundException(detail="No user matching export token.") # If we are exporting a "single" deck, then gather the source deck and all of its snapshots initial_deck = None if deck_share_uuid: - initial_deck = ( - session.query(Deck) - .filter( - Deck.direct_share_uuid == deck_share_uuid, - Deck.user_id == deck_user.id, - Deck.is_deleted == False, - Deck.is_legacy == False, - ) - .first() + stmt = select(Deck).where( + Deck.direct_share_uuid == deck_share_uuid, + Deck.user_id == deck_user.id, + Deck.is_deleted == False, + Deck.is_legacy == False, ) + initial_deck = session.execute(stmt).scalar_one_or_none() if not initial_deck: raise NotFoundException( detail="Current user does not have a deck with this share UUID." ) if initial_deck.is_snapshot: - initial_deck = ( - session.query(Deck).filter(Deck.id == initial_deck.source_id).first() - ) + stmt = select(Deck).where(Deck.id == initial_deck.source_id) + initial_deck = session.execute(stmt).scalar_one_or_none() deck_filters = [ Deck.user_id == deck_user.id, @@ -1226,10 +1217,14 @@ def export_decks( ) if from_date: deck_filters.append(Deck.created > from_date) - query = session.query(Deck).filter(*deck_filters).order_by(Deck.created.asc()) - total_to_export = query.count() + stmt = select(Deck).where(*deck_filters).order_by(Deck.created.asc()) + total_to_export = session.execute( + select(db.func.count()).select_from(stmt.subquery()) + ).scalar() # Find our next set of decks to export. We limit by 1 more than our max so we can determine if there is a next page - decks_to_export = query.limit(settings.exports_per_request + 1).all() + decks_to_export = ( + session.execute(stmt.limit(settings.exports_per_request + 1)).scalars().all() + ) # Check if we have a next page, and discard the extra, if so have_next_page = len(decks_to_export) == settings.exports_per_request + 1 if have_next_page: @@ -1247,12 +1242,14 @@ def export_decks( # Ensure we lookup our Phoenixborn cards needed_cards.add(deck_row.phoenixborn_id) # Fetch and collate our dice information for all decks - deck_dice = session.query(DeckDie).filter(DeckDie.deck_id.in_(deck_ids)).all() + deckdie_stmt = select(DeckDie).where(DeckDie.deck_id.in_(deck_ids)) + deck_dice = session.execute(deckdie_stmt).scalars().all() deck_id_to_dice = defaultdict(list) for deck_die in deck_dice: deck_id_to_dice[deck_die.deck_id].append(deck_die) # Now that we have all our basic deck information, look up the cards and quantities they include - deck_cards = session.query(DeckCard).filter(DeckCard.deck_id.in_(deck_ids)).all() + deckcard_stmt = select(DeckCard).where(DeckCard.deck_id.in_(deck_ids)) + deck_cards = session.execute(deckcard_stmt).scalars().all() deck_id_to_deck_cards = defaultdict(list) for deck_card in deck_cards: needed_cards.add(deck_card.card_id) @@ -1262,26 +1259,23 @@ def export_decks( session=session, card_ids=needed_cards ) # Now that we have root-level conjurations, we can gather all our cards and setup our decks - cards = session.query(Card).filter(Card.id.in_(needed_cards)).all() + card_stmt = select(Card).where(Card.id.in_(needed_cards)) + cards = session.execute(card_stmt).scalars().all() card_id_to_card = {x.id: x for x in cards} # Gather our selected cards for these decks - deck_selected_cards = ( - session.query(DeckSelectedCard) - .filter(DeckSelectedCard.deck_id.in_(deck_ids)) - .all() + deckselected_stmt = select(DeckSelectedCard).where( + DeckSelectedCard.deck_id.in_(deck_ids) ) + deck_selected_cards = session.execute(deckselected_stmt).scalars().all() deck_id_to_selected_cards = defaultdict(list) for deck_selected_card in deck_selected_cards: deck_id_to_selected_cards[deck_selected_card.deck_id].append(deck_selected_card) # Gather all source IDs *that belong to this user* and stick them in a mapping - source_decks = ( - session.query(Deck.id, Deck.created) - .filter( - Deck.id.in_(source_ids), - Deck.is_deleted == False, - ) - .all() + source_stmt = select(Deck.id, Deck.created).where( + Deck.id.in_(source_ids), + Deck.is_deleted == False, ) + source_decks = session.execute(source_stmt).all() source_id_to_created = {x[0]: x[1] for x in source_decks} # And finally generate a dict for our deck export deck_output = [] @@ -1353,9 +1347,8 @@ def finalize_exported_decks( """ if not settings.allow_exports: raise APIException(detail="Deck exports are not allowed from this site.") - deck_user = ( - session.query(User).filter(User.deck_export_uuid == export_token).first() - ) + stmt = select(User).where(User.deck_export_uuid == export_token) + deck_user = session.execute(stmt).scalar_one_or_none() if not deck_user: raise NotFoundException(detail="No user matching export token.") if len(deck_create_dates) == 0: @@ -1367,6 +1360,9 @@ def finalize_exported_decks( detail=f"You cannot mark more than {settings.exports_per_request} decks as successfully imported at once." ) - session.query(Deck).filter( - Deck.user_id == deck_user.id, Deck.created.in_(deck_create_dates) - ).update({Deck.is_exported: True}, synchronize_session=False) + update_stmt = ( + update(Deck) + .where(Deck.user_id == deck_user.id, Deck.created.in_(deck_create_dates)) + .values(is_exported=True) + ) + session.execute(update_stmt) diff --git a/api/views/health_check.py b/api/views/health_check.py index b196995..8a02ae6 100644 --- a/api/views/health_check.py +++ b/api/views/health_check.py @@ -1,6 +1,7 @@ import logging from fastapi import APIRouter, Depends, Response, status +from sqlalchemy import select from sqlalchemy.sql.expression import literal_column from api import db @@ -36,9 +37,8 @@ def health_check(response: Response, session: db.Session = Depends(get_session)) # Check for PostgreSQL database health try: - meaning_of_life_the_universe_and_everything = session.query( - literal_column("42") - ).scalar() + stmt = select(literal_column("42")) + meaning_of_life_the_universe_and_everything = session.execute(stmt).scalar() assert meaning_of_life_the_universe_and_everything == 42 except: output.services.database = HealthCheckStatusResponses.error diff --git a/api/views/players.py b/api/views/players.py index 307b27f..4afbc81 100644 --- a/api/views/players.py +++ b/api/views/players.py @@ -4,6 +4,7 @@ from fastapi import APIRouter, Depends, HTTPException, status from fastapi.exceptions import RequestValidationError from pydantic import UUID4 +from sqlalchemy import select from api import db from api.depends import ( @@ -49,7 +50,8 @@ def request_invite( Will fail if requested by an authenticated user. """ email = data.email.lower() - user = session.query(User).filter(User.email == email).first() + stmt = select(User).where(User.email == email) + user = session.execute(stmt).scalar_one_or_none() if user: raise APIException( detail="This email is already in use.", @@ -91,7 +93,8 @@ def create_player( Will fail if requested by an authenticated user. """ - invite = session.query(Invite).filter(Invite.uuid == token).first() + stmt = select(Invite).where(Invite.uuid == token) + invite = session.execute(stmt).scalar_one_or_none() if invite is None: raise NotFoundException(detail="Token not found. Please request a new invite.") user = create_user( @@ -187,11 +190,8 @@ def get_deck_export_token( ) def get_user_data(badge: str, session: db.Session = Depends(get_session)): """Return public user information for any user.""" - user = ( - session.query(User) - .filter(User.badge == badge, User.is_banned.is_(False)) - .first() - ) + stmt = select(User).where(User.badge == badge, User.is_banned.is_(False)) + user = session.execute(stmt).scalar_one_or_none() if not user: raise NotFoundException(detail="User not found.") return user @@ -213,7 +213,8 @@ def moderate_user( current_user: "User" = Depends(admin_required), ): """**Admin only.** Ban a user; or moderate their username or description.""" - user: User = session.query(User).filter(User.badge == badge).first() + stmt = select(User).where(User.badge == badge) + user: User = session.execute(stmt).scalar_one_or_none() if not user: raise NotFoundException(detail="User not found.") if user.id == current_user.id: diff --git a/api/views/releases.py b/api/views/releases.py index e1dc26b..e463c2e 100644 --- a/api/views/releases.py +++ b/api/views/releases.py @@ -1,4 +1,5 @@ from fastapi import APIRouter, Depends +from sqlalchemy import delete, select from api import db from api.depends import ( @@ -34,10 +35,8 @@ def list_releases( * `show_legacy` (default: false): if true, legacy 1.0 card data will be returned """ - query = get_releases_query( - session=session, current_user=current_user, show_legacy=show_legacy - ) - return query.all() + stmt = get_releases_query(current_user=current_user, show_legacy=show_legacy) + return session.execute(stmt).all() @router.put( @@ -59,27 +58,23 @@ def save_collection( **This is not a patch!** You must pass the entire list of the user's collections every time. """ # Clear out our existing releases - session.query(UserRelease).filter(UserRelease.user_id == current_user.id).delete() + delete_stmt = delete(UserRelease).where(UserRelease.user_id == current_user.id) + session.execute(delete_stmt) session.commit() - release_ids = ( - ( - session.query(Release.id) - .filter( - Release.is_legacy.is_(False), - Release.is_public.is_(True), - Release.stub.in_(collection), - ) - .all() + release_ids = None + if collection: + stmt = select(Release.id).where( + Release.is_legacy.is_(False), + Release.is_public.is_(True), + Release.stub.in_(collection), ) - if collection - else None - ) + release_ids = session.execute(stmt).all() if release_ids: for row in release_ids: session.add(UserRelease(user_id=current_user.id, release_id=row.id)) session.commit() - query = get_releases_query(session=session, current_user=current_user) - return query.all() + stmt = get_releases_query(current_user=current_user) + return session.execute(stmt).all() @router.patch( @@ -103,11 +98,10 @@ def update_release( **Admin only.** """ - release = ( - session.query(Release) - .filter(Release.stub == release_stub, Release.is_legacy.is_(False)) - .first() + stmt = select(Release).where( + Release.stub == release_stub, Release.is_legacy.is_(False) ) + release = session.execute(stmt).scalar_one_or_none() if not release: raise NotFoundException(detail="Release not found.") release.is_public = data.is_public diff --git a/api/views/subscriptions.py b/api/views/subscriptions.py index 91fe6d4..0898006 100644 --- a/api/views/subscriptions.py +++ b/api/views/subscriptions.py @@ -1,4 +1,5 @@ from fastapi import APIRouter, Depends, Response, status +from sqlalchemy import delete, select from api import db from api.depends import AUTH_RESPONSES, get_session, login_required @@ -34,10 +35,12 @@ def create_subscription( ): """Subscribe to comments and updates for a deck or card.""" # Make sure the entity ID can be subscribed to - source = session.query(Card).filter(Card.entity_id == entity_id).first() + stmt = select(Card).where(Card.entity_id == entity_id) + source = session.execute(stmt).scalar_one_or_none() is_deck = False if not source: - source = session.query(Deck).filter(Deck.entity_id == entity_id).first() + stmt = select(Deck).where(Deck.entity_id == entity_id) + source = session.execute(stmt).scalar_one_or_none() is_deck = True if source is None: raise NotFoundException(detail="No valid resource found to subscribe to.") @@ -51,14 +54,11 @@ def create_subscription( pass # Check if they already have a subscription - subscription = ( - session.query(Subscription) - .filter( - Subscription.source_entity_id == entity_id, - Subscription.user_id == current_user.id, - ) - .first() + stmt = select(Subscription).where( + Subscription.source_entity_id == entity_id, + Subscription.user_id == current_user.id, ) + subscription = session.execute(stmt).scalar_one_or_none() if subscription: # The front-end expects that if last_seen_entity_id is None it means we are not subscribed, # so this is a bit of a hack to ensure that it always has some sort of value for comparison @@ -69,25 +69,25 @@ def create_subscription( # Look up the most recently seen entity ID (assumes that they subscribed from the detail page, since it's silly to # force them to immediately update the last seen ID after subscribing). - last_seen = ( - session.query(Comment.entity_id) - .filter(Comment.source_entity_id == entity_id) + stmt = ( + select(Comment.entity_id) + .where(Comment.source_entity_id == entity_id) .order_by(Comment.entity_id.desc()) - .first() ) + last_seen = session.execute(stmt).first() if not last_seen and is_deck: # If we don't have any comments on this deck, grab the latest entity ID for the most recent published snapshot - last_seen = ( - session.query(Deck.entity_id) - .filter( + stmt = ( + select(Deck.entity_id) + .where( Deck.source_id == source.id, Deck.is_deleted == False, Deck.is_snapshot == True, Deck.is_public == True, ) .order_by(Deck.entity_id.desc()) - .first() ) + last_seen = session.execute(stmt).first() last_seen_entity_id = last_seen.entity_id if last_seen else None @@ -128,10 +128,11 @@ def delete_subscription( session: db.Session = Depends(get_session), ): """Delete a subscription to comments and updates for a deck or card.""" - session.query(Subscription).filter( + delete_stmt = delete(Subscription).where( Subscription.user_id == current_user.id, Subscription.source_entity_id == entity_id, - ).delete() + ) + session.execute(delete_stmt) session.commit() return Response(status_code=status.HTTP_204_NO_CONTENT) @@ -164,37 +165,28 @@ def update_subscription( the latest viewed comment or the latest published deck snapshot. """ # Grab the relevant subscription - subscription = ( - session.query(Subscription) - .filter( - Subscription.user_id == current_user.id, - Subscription.source_entity_id == entity_id, - ) - .first() + stmt = select(Subscription).where( + Subscription.user_id == current_user.id, + Subscription.source_entity_id == entity_id, ) + subscription = session.execute(stmt).scalar_one_or_none() if not subscription: raise NotFoundException(detail="You are not subscribed to this content.") # Validate the entity ID that was passed in - last_seen = ( - session.query(Comment) - .filter( - Comment.source_entity_id == entity_id, - Comment.entity_id == data.last_seen_entity_id, - ) - .first() + stmt = select(Comment).where( + Comment.source_entity_id == entity_id, + Comment.entity_id == data.last_seen_entity_id, ) + last_seen = session.execute(stmt).scalar_one_or_none() if not last_seen: # This might be a deck snapshot, so check for that - last_seen = ( - session.query(Deck) - .filter( - Deck.entity_id == data.last_seen_entity_id, - Deck.is_snapshot == True, - Deck.is_public == True, - Deck.is_deleted == False, - ) - .first() + stmt = select(Deck).where( + Deck.entity_id == data.last_seen_entity_id, + Deck.is_snapshot == True, + Deck.is_public == True, + Deck.is_deleted == False, ) + last_seen = session.execute(stmt).scalar_one_or_none() if not last_seen: raise APIException(detail="Invalid entity ID passed for this subscription.") subscription.last_seen_entity_id = data.last_seen_entity_id From 1f89f86b7eb9ebc1c119d6686264e2fbc570386b Mon Sep 17 00:00:00 2001 From: Ian Beck Date: Tue, 26 Aug 2025 23:27:57 -0700 Subject: [PATCH 07/15] Execute tests with `future=True` for engine to ensure full compatibility --- api/tests/conftest.py | 1 + 1 file changed, 1 insertion(+) diff --git a/api/tests/conftest.py b/api/tests/conftest.py index 8e328de..1d5b362 100644 --- a/api/tests/conftest.py +++ b/api/tests/conftest.py @@ -44,6 +44,7 @@ def test_engine(): f"@{settings.postgres_host}:{settings.postgres_port}/test" ), echo=False, + future=True, ) # Drop database and recreate to ensure tests are always run against a clean slate if database_exists(test_engine.url): From d058cae3ba274a40877a21ffd61dc23f1bbd8b73 Mon Sep 17 00:00:00 2001 From: Ian Beck Date: Tue, 26 Aug 2025 23:32:56 -0700 Subject: [PATCH 08/15] WIP: Updated to SQLAlchemy 2.0 Test suite is borked again; presumably the savepoint logic that I so painstakingly worked out for 1.4 just doesn't work for 2.0. --- poetry.lock | 368 ++++++++++++++++++++++++++----------------------- pyproject.toml | 2 +- 2 files changed, 194 insertions(+), 176 deletions(-) diff --git a/poetry.lock b/poetry.lock index 0877a81..9bb410e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2,23 +2,23 @@ [[package]] name = "alembic" -version = "1.14.1" +version = "1.16.4" description = "A database migration tool for SQLAlchemy." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["main"] files = [ - {file = "alembic-1.14.1-py3-none-any.whl", hash = "sha256:1acdd7a3a478e208b0503cd73614d5e4c6efafa4e73518bb60e4f2846a37b1c5"}, - {file = "alembic-1.14.1.tar.gz", hash = "sha256:496e888245a53adf1498fcab31713a469c65836f8de76e01399aa1c3e90dd213"}, + {file = "alembic-1.16.4-py3-none-any.whl", hash = "sha256:b05e51e8e82efc1abd14ba2af6392897e145930c3e0a2faf2b0da2f7f7fd660d"}, + {file = "alembic-1.16.4.tar.gz", hash = "sha256:efab6ada0dd0fae2c92060800e0bf5c1dc26af15a10e02fb4babff164b4725e2"}, ] [package.dependencies] Mako = "*" -SQLAlchemy = ">=1.3.0" -typing-extensions = ">=4" +SQLAlchemy = ">=1.4.0" +typing-extensions = ">=4.12" [package.extras] -tz = ["backports.zoneinfo ; python_version < \"3.9\"", "tzdata"] +tz = ["tzdata"] [[package]] name = "annotated-types" @@ -297,100 +297,100 @@ markers = {main = "platform_system == \"Windows\"", dev = "platform_system == \" [[package]] name = "coverage" -version = "7.10.4" +version = "7.10.5" description = "Code coverage measurement for Python" optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "coverage-7.10.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d92d6edb0ccafd20c6fbf9891ca720b39c2a6a4b4a6f9cf323ca2c986f33e475"}, - {file = "coverage-7.10.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7202da14dc0236884fcc45665ffb2d79d4991a53fbdf152ab22f69f70923cc22"}, - {file = "coverage-7.10.4-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:ada418633ae24ec8d0fcad5efe6fc7aa3c62497c6ed86589e57844ad04365674"}, - {file = "coverage-7.10.4-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:b828e33eca6c3322adda3b5884456f98c435182a44917ded05005adfa1415500"}, - {file = "coverage-7.10.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:802793ba397afcfdbe9f91f89d65ae88b958d95edc8caf948e1f47d8b6b2b606"}, - {file = "coverage-7.10.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d0b23512338c54101d3bf7a1ab107d9d75abda1d5f69bc0887fd079253e4c27e"}, - {file = "coverage-7.10.4-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:f36b7dcf72d06a8c5e2dd3aca02be2b1b5db5f86404627dff834396efce958f2"}, - {file = "coverage-7.10.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:fce316c367a1dc2c411821365592eeb335ff1781956d87a0410eae248188ba51"}, - {file = "coverage-7.10.4-cp310-cp310-win32.whl", hash = "sha256:8c5dab29fc8070b3766b5fc85f8d89b19634584429a2da6d42da5edfadaf32ae"}, - {file = "coverage-7.10.4-cp310-cp310-win_amd64.whl", hash = "sha256:4b0d114616f0fccb529a1817457d5fb52a10e106f86c5fb3b0bd0d45d0d69b93"}, - {file = "coverage-7.10.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:05d5f98ec893d4a2abc8bc5f046f2f4367404e7e5d5d18b83de8fde1093ebc4f"}, - {file = "coverage-7.10.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9267efd28f8994b750d171e58e481e3bbd69e44baed540e4c789f8e368b24b88"}, - {file = "coverage-7.10.4-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:4456a039fdc1a89ea60823d0330f1ac6f97b0dbe9e2b6fb4873e889584b085fb"}, - {file = "coverage-7.10.4-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:c2bfbd2a9f7e68a21c5bd191be94bfdb2691ac40d325bac9ef3ae45ff5c753d9"}, - {file = "coverage-7.10.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0ab7765f10ae1df7e7fe37de9e64b5a269b812ee22e2da3f84f97b1c7732a0d8"}, - {file = "coverage-7.10.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0a09b13695166236e171ec1627ff8434b9a9bae47528d0ba9d944c912d33b3d2"}, - {file = "coverage-7.10.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5c9e75dfdc0167d5675e9804f04a56b2cf47fb83a524654297000b578b8adcb7"}, - {file = "coverage-7.10.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c751261bfe6481caba15ec005a194cb60aad06f29235a74c24f18546d8377df0"}, - {file = "coverage-7.10.4-cp311-cp311-win32.whl", hash = "sha256:051c7c9e765f003c2ff6e8c81ccea28a70fb5b0142671e4e3ede7cebd45c80af"}, - {file = "coverage-7.10.4-cp311-cp311-win_amd64.whl", hash = "sha256:1a647b152f10be08fb771ae4a1421dbff66141e3d8ab27d543b5eb9ea5af8e52"}, - {file = "coverage-7.10.4-cp311-cp311-win_arm64.whl", hash = "sha256:b09b9e4e1de0d406ca9f19a371c2beefe3193b542f64a6dd40cfcf435b7d6aa0"}, - {file = "coverage-7.10.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a1f0264abcabd4853d4cb9b3d164adbf1565da7dab1da1669e93f3ea60162d79"}, - {file = "coverage-7.10.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:536cbe6b118a4df231b11af3e0f974a72a095182ff8ec5f4868c931e8043ef3e"}, - {file = "coverage-7.10.4-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9a4c0d84134797b7bf3f080599d0cd501471f6c98b715405166860d79cfaa97e"}, - {file = "coverage-7.10.4-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7c155fc0f9cee8c9803ea0ad153ab6a3b956baa5d4cd993405dc0b45b2a0b9e0"}, - {file = "coverage-7.10.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a5f2ab6e451d4b07855d8bcf063adf11e199bff421a4ba57f5bb95b7444ca62"}, - {file = "coverage-7.10.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:685b67d99b945b0c221be0780c336b303a7753b3e0ec0d618c795aada25d5e7a"}, - {file = "coverage-7.10.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0c079027e50c2ae44da51c2e294596cbc9dbb58f7ca45b30651c7e411060fc23"}, - {file = "coverage-7.10.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3749aa72b93ce516f77cf5034d8e3c0dfd45c6e8a163a602ede2dc5f9a0bb927"}, - {file = "coverage-7.10.4-cp312-cp312-win32.whl", hash = "sha256:fecb97b3a52fa9bcd5a7375e72fae209088faf671d39fae67261f37772d5559a"}, - {file = "coverage-7.10.4-cp312-cp312-win_amd64.whl", hash = "sha256:26de58f355626628a21fe6a70e1e1fad95702dafebfb0685280962ae1449f17b"}, - {file = "coverage-7.10.4-cp312-cp312-win_arm64.whl", hash = "sha256:67e8885408f8325198862bc487038a4980c9277d753cb8812510927f2176437a"}, - {file = "coverage-7.10.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:2b8e1d2015d5dfdbf964ecef12944c0c8c55b885bb5c0467ae8ef55e0e151233"}, - {file = "coverage-7.10.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:25735c299439018d66eb2dccf54f625aceb78645687a05f9f848f6e6c751e169"}, - {file = "coverage-7.10.4-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:715c06cb5eceac4d9b7cdf783ce04aa495f6aff657543fea75c30215b28ddb74"}, - {file = "coverage-7.10.4-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:e017ac69fac9aacd7df6dc464c05833e834dc5b00c914d7af9a5249fcccf07ef"}, - {file = "coverage-7.10.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bad180cc40b3fccb0f0e8c702d781492654ac2580d468e3ffc8065e38c6c2408"}, - {file = "coverage-7.10.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:becbdcd14f685fada010a5f792bf0895675ecf7481304fe159f0cd3f289550bd"}, - {file = "coverage-7.10.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:0b485ca21e16a76f68060911f97ebbe3e0d891da1dbbce6af7ca1ab3f98b9097"}, - {file = "coverage-7.10.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6c1d098ccfe8e1e0a1ed9a0249138899948afd2978cbf48eb1cc3fcd38469690"}, - {file = "coverage-7.10.4-cp313-cp313-win32.whl", hash = "sha256:8630f8af2ca84b5c367c3df907b1706621abe06d6929f5045fd628968d421e6e"}, - {file = "coverage-7.10.4-cp313-cp313-win_amd64.whl", hash = "sha256:f68835d31c421736be367d32f179e14ca932978293fe1b4c7a6a49b555dff5b2"}, - {file = "coverage-7.10.4-cp313-cp313-win_arm64.whl", hash = "sha256:6eaa61ff6724ca7ebc5326d1fae062d85e19b38dd922d50903702e6078370ae7"}, - {file = "coverage-7.10.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:702978108876bfb3d997604930b05fe769462cc3000150b0e607b7b444f2fd84"}, - {file = "coverage-7.10.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e8f978e8c5521d9c8f2086ac60d931d583fab0a16f382f6eb89453fe998e2484"}, - {file = "coverage-7.10.4-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:df0ac2ccfd19351411c45e43ab60932b74472e4648b0a9edf6a3b58846e246a9"}, - {file = "coverage-7.10.4-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:73a0d1aaaa3796179f336448e1576a3de6fc95ff4f07c2d7251d4caf5d18cf8d"}, - {file = "coverage-7.10.4-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:873da6d0ed6b3ffc0bc01f2c7e3ad7e2023751c0d8d86c26fe7322c314b031dc"}, - {file = "coverage-7.10.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c6446c75b0e7dda5daa876a1c87b480b2b52affb972fedd6c22edf1aaf2e00ec"}, - {file = "coverage-7.10.4-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:6e73933e296634e520390c44758d553d3b573b321608118363e52113790633b9"}, - {file = "coverage-7.10.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:52073d4b08d2cb571234c8a71eb32af3c6923149cf644a51d5957ac128cf6aa4"}, - {file = "coverage-7.10.4-cp313-cp313t-win32.whl", hash = "sha256:e24afb178f21f9ceb1aefbc73eb524769aa9b504a42b26857243f881af56880c"}, - {file = "coverage-7.10.4-cp313-cp313t-win_amd64.whl", hash = "sha256:be04507ff1ad206f4be3d156a674e3fb84bbb751ea1b23b142979ac9eebaa15f"}, - {file = "coverage-7.10.4-cp313-cp313t-win_arm64.whl", hash = "sha256:f3e3ff3f69d02b5dad67a6eac68cc9c71ae343b6328aae96e914f9f2f23a22e2"}, - {file = "coverage-7.10.4-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:a59fe0af7dd7211ba595cf7e2867458381f7e5d7b4cffe46274e0b2f5b9f4eb4"}, - {file = "coverage-7.10.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:3a6c35c5b70f569ee38dc3350cd14fdd0347a8b389a18bb37538cc43e6f730e6"}, - {file = "coverage-7.10.4-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:acb7baf49f513554c4af6ef8e2bd6e8ac74e6ea0c7386df8b3eb586d82ccccc4"}, - {file = "coverage-7.10.4-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:a89afecec1ed12ac13ed203238b560cbfad3522bae37d91c102e690b8b1dc46c"}, - {file = "coverage-7.10.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:480442727f464407d8ade6e677b7f21f3b96a9838ab541b9a28ce9e44123c14e"}, - {file = "coverage-7.10.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:a89bf193707f4a17f1ed461504031074d87f035153239f16ce86dfb8f8c7ac76"}, - {file = "coverage-7.10.4-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:3ddd912c2fc440f0fb3229e764feec85669d5d80a988ff1b336a27d73f63c818"}, - {file = "coverage-7.10.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:8a538944ee3a42265e61c7298aeba9ea43f31c01271cf028f437a7b4075592cf"}, - {file = "coverage-7.10.4-cp314-cp314-win32.whl", hash = "sha256:fd2e6002be1c62476eb862b8514b1ba7e7684c50165f2a8d389e77da6c9a2ebd"}, - {file = "coverage-7.10.4-cp314-cp314-win_amd64.whl", hash = "sha256:ec113277f2b5cf188d95fb66a65c7431f2b9192ee7e6ec9b72b30bbfb53c244a"}, - {file = "coverage-7.10.4-cp314-cp314-win_arm64.whl", hash = "sha256:9744954bfd387796c6a091b50d55ca7cac3d08767795b5eec69ad0f7dbf12d38"}, - {file = "coverage-7.10.4-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:5af4829904dda6aabb54a23879f0f4412094ba9ef153aaa464e3c1b1c9bc98e6"}, - {file = "coverage-7.10.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7bba5ed85e034831fac761ae506c0644d24fd5594727e174b5a73aff343a7508"}, - {file = "coverage-7.10.4-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:d57d555b0719834b55ad35045de6cc80fc2b28e05adb6b03c98479f9553b387f"}, - {file = "coverage-7.10.4-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:ba62c51a72048bb1ea72db265e6bd8beaabf9809cd2125bbb5306c6ce105f214"}, - {file = "coverage-7.10.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0acf0c62a6095f07e9db4ec365cc58c0ef5babb757e54745a1aa2ea2a2564af1"}, - {file = "coverage-7.10.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e1033bf0f763f5cf49ffe6594314b11027dcc1073ac590b415ea93463466deec"}, - {file = "coverage-7.10.4-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:92c29eff894832b6a40da1789b1f252305af921750b03ee4535919db9179453d"}, - {file = "coverage-7.10.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:822c4c830989c2093527e92acd97be4638a44eb042b1bdc0e7a278d84a070bd3"}, - {file = "coverage-7.10.4-cp314-cp314t-win32.whl", hash = "sha256:e694d855dac2e7cf194ba33653e4ba7aad7267a802a7b3fc4347d0517d5d65cd"}, - {file = "coverage-7.10.4-cp314-cp314t-win_amd64.whl", hash = "sha256:efcc54b38ef7d5bfa98050f220b415bc5bb3d432bd6350a861cf6da0ede2cdcd"}, - {file = "coverage-7.10.4-cp314-cp314t-win_arm64.whl", hash = "sha256:6f3a3496c0fa26bfac4ebc458747b778cff201c8ae94fa05e1391bab0dbc473c"}, - {file = "coverage-7.10.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:48fd4d52600c2a9d5622e52dfae674a7845c5e1dceaf68b88c99feb511fbcfd6"}, - {file = "coverage-7.10.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:56217b470d09d69e6b7dcae38200f95e389a77db801cb129101697a4553b18b6"}, - {file = "coverage-7.10.4-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:44ac3f21a6e28c5ff7f7a47bca5f87885f6a1e623e637899125ba47acd87334d"}, - {file = "coverage-7.10.4-cp39-cp39-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:3387739d72c84d17b4d2f7348749cac2e6700e7152026912b60998ee9a40066b"}, - {file = "coverage-7.10.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3f111ff20d9a6348e0125be892608e33408dd268f73b020940dfa8511ad05503"}, - {file = "coverage-7.10.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:01a852f0a9859734b018a3f483cc962d0b381d48d350b1a0c47d618c73a0c398"}, - {file = "coverage-7.10.4-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:225111dd06759ba4e37cee4c0b4f3df2b15c879e9e3c37bf986389300b9917c3"}, - {file = "coverage-7.10.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2178d4183bd1ba608f0bb12e71e55838ba1b7dbb730264f8b08de9f8ef0c27d0"}, - {file = "coverage-7.10.4-cp39-cp39-win32.whl", hash = "sha256:93d175fe81913aee7a6ea430abbdf2a79f1d9fd451610e12e334e4fe3264f563"}, - {file = "coverage-7.10.4-cp39-cp39-win_amd64.whl", hash = "sha256:2221a823404bb941c7721cf0ef55ac6ee5c25d905beb60c0bba5e5e85415d353"}, - {file = "coverage-7.10.4-py3-none-any.whl", hash = "sha256:065d75447228d05121e5c938ca8f0e91eed60a1eb2d1258d42d5084fecfc3302"}, - {file = "coverage-7.10.4.tar.gz", hash = "sha256:25f5130af6c8e7297fd14634955ba9e1697f47143f289e2a23284177c0061d27"}, + {file = "coverage-7.10.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c6a5c3414bfc7451b879141ce772c546985163cf553f08e0f135f0699a911801"}, + {file = "coverage-7.10.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bc8e4d99ce82f1710cc3c125adc30fd1487d3cf6c2cd4994d78d68a47b16989a"}, + {file = "coverage-7.10.5-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:02252dc1216e512a9311f596b3169fad54abcb13827a8d76d5630c798a50a754"}, + {file = "coverage-7.10.5-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:73269df37883e02d460bee0cc16be90509faea1e3bd105d77360b512d5bb9c33"}, + {file = "coverage-7.10.5-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f8a81b0614642f91c9effd53eec284f965577591f51f547a1cbeb32035b4c2f"}, + {file = "coverage-7.10.5-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6a29f8e0adb7f8c2b95fa2d4566a1d6e6722e0a637634c6563cb1ab844427dd9"}, + {file = "coverage-7.10.5-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fcf6ab569436b4a647d4e91accba12509ad9f2554bc93d3aee23cc596e7f99c3"}, + {file = "coverage-7.10.5-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:90dc3d6fb222b194a5de60af8d190bedeeddcbc7add317e4a3cd333ee6b7c879"}, + {file = "coverage-7.10.5-cp310-cp310-win32.whl", hash = "sha256:414a568cd545f9dc75f0686a0049393de8098414b58ea071e03395505b73d7a8"}, + {file = "coverage-7.10.5-cp310-cp310-win_amd64.whl", hash = "sha256:e551f9d03347196271935fd3c0c165f0e8c049220280c1120de0084d65e9c7ff"}, + {file = "coverage-7.10.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c177e6ffe2ebc7c410785307758ee21258aa8e8092b44d09a2da767834f075f2"}, + {file = "coverage-7.10.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:14d6071c51ad0f703d6440827eaa46386169b5fdced42631d5a5ac419616046f"}, + {file = "coverage-7.10.5-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:61f78c7c3bc272a410c5ae3fde7792b4ffb4acc03d35a7df73ca8978826bb7ab"}, + {file = "coverage-7.10.5-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f39071caa126f69d63f99b324fb08c7b1da2ec28cbb1fe7b5b1799926492f65c"}, + {file = "coverage-7.10.5-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:343a023193f04d46edc46b2616cdbee68c94dd10208ecd3adc56fcc54ef2baa1"}, + {file = "coverage-7.10.5-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:585ffe93ae5894d1ebdee69fc0b0d4b7c75d8007983692fb300ac98eed146f78"}, + {file = "coverage-7.10.5-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:b0ef4e66f006ed181df29b59921bd8fc7ed7cd6a9289295cd8b2824b49b570df"}, + {file = "coverage-7.10.5-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:eb7b0bbf7cc1d0453b843eca7b5fa017874735bef9bfdfa4121373d2cc885ed6"}, + {file = "coverage-7.10.5-cp311-cp311-win32.whl", hash = "sha256:1d043a8a06987cc0c98516e57c4d3fc2c1591364831e9deb59c9e1b4937e8caf"}, + {file = "coverage-7.10.5-cp311-cp311-win_amd64.whl", hash = "sha256:fefafcca09c3ac56372ef64a40f5fe17c5592fab906e0fdffd09543f3012ba50"}, + {file = "coverage-7.10.5-cp311-cp311-win_arm64.whl", hash = "sha256:7e78b767da8b5fc5b2faa69bb001edafcd6f3995b42a331c53ef9572c55ceb82"}, + {file = "coverage-7.10.5-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c2d05c7e73c60a4cecc7d9b60dbfd603b4ebc0adafaef371445b47d0f805c8a9"}, + {file = "coverage-7.10.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:32ddaa3b2c509778ed5373b177eb2bf5662405493baeff52278a0b4f9415188b"}, + {file = "coverage-7.10.5-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:dd382410039fe062097aa0292ab6335a3f1e7af7bba2ef8d27dcda484918f20c"}, + {file = "coverage-7.10.5-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7fa22800f3908df31cea6fb230f20ac49e343515d968cc3a42b30d5c3ebf9b5a"}, + {file = "coverage-7.10.5-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f366a57ac81f5e12797136552f5b7502fa053c861a009b91b80ed51f2ce651c6"}, + {file = "coverage-7.10.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5f1dc8f1980a272ad4a6c84cba7981792344dad33bf5869361576b7aef42733a"}, + {file = "coverage-7.10.5-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:2285c04ee8676f7938b02b4936d9b9b672064daab3187c20f73a55f3d70e6b4a"}, + {file = "coverage-7.10.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c2492e4dd9daab63f5f56286f8a04c51323d237631eb98505d87e4c4ff19ec34"}, + {file = "coverage-7.10.5-cp312-cp312-win32.whl", hash = "sha256:38a9109c4ee8135d5df5505384fc2f20287a47ccbe0b3f04c53c9a1989c2bbaf"}, + {file = "coverage-7.10.5-cp312-cp312-win_amd64.whl", hash = "sha256:6b87f1ad60b30bc3c43c66afa7db6b22a3109902e28c5094957626a0143a001f"}, + {file = "coverage-7.10.5-cp312-cp312-win_arm64.whl", hash = "sha256:672a6c1da5aea6c629819a0e1461e89d244f78d7b60c424ecf4f1f2556c041d8"}, + {file = "coverage-7.10.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ef3b83594d933020f54cf65ea1f4405d1f4e41a009c46df629dd964fcb6e907c"}, + {file = "coverage-7.10.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2b96bfdf7c0ea9faebce088a3ecb2382819da4fbc05c7b80040dbc428df6af44"}, + {file = "coverage-7.10.5-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:63df1fdaffa42d914d5c4d293e838937638bf75c794cf20bee12978fc8c4e3bc"}, + {file = "coverage-7.10.5-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:8002dc6a049aac0e81ecec97abfb08c01ef0c1fbf962d0c98da3950ace89b869"}, + {file = "coverage-7.10.5-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:63d4bb2966d6f5f705a6b0c6784c8969c468dbc4bcf9d9ded8bff1c7e092451f"}, + {file = "coverage-7.10.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1f672efc0731a6846b157389b6e6d5d5e9e59d1d1a23a5c66a99fd58339914d5"}, + {file = "coverage-7.10.5-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:3f39cef43d08049e8afc1fde4a5da8510fc6be843f8dea350ee46e2a26b2f54c"}, + {file = "coverage-7.10.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:2968647e3ed5a6c019a419264386b013979ff1fb67dd11f5c9886c43d6a31fc2"}, + {file = "coverage-7.10.5-cp313-cp313-win32.whl", hash = "sha256:0d511dda38595b2b6934c2b730a1fd57a3635c6aa2a04cb74714cdfdd53846f4"}, + {file = "coverage-7.10.5-cp313-cp313-win_amd64.whl", hash = "sha256:9a86281794a393513cf117177fd39c796b3f8e3759bb2764259a2abba5cce54b"}, + {file = "coverage-7.10.5-cp313-cp313-win_arm64.whl", hash = "sha256:cebd8e906eb98bb09c10d1feed16096700b1198d482267f8bf0474e63a7b8d84"}, + {file = "coverage-7.10.5-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0520dff502da5e09d0d20781df74d8189ab334a1e40d5bafe2efaa4158e2d9e7"}, + {file = "coverage-7.10.5-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:d9cd64aca68f503ed3f1f18c7c9174cbb797baba02ca8ab5112f9d1c0328cd4b"}, + {file = "coverage-7.10.5-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:0913dd1613a33b13c4f84aa6e3f4198c1a21ee28ccb4f674985c1f22109f0aae"}, + {file = "coverage-7.10.5-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:1b7181c0feeb06ed8a02da02792f42f829a7b29990fef52eff257fef0885d760"}, + {file = "coverage-7.10.5-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36d42b7396b605f774d4372dd9c49bed71cbabce4ae1ccd074d155709dd8f235"}, + {file = "coverage-7.10.5-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:b4fdc777e05c4940b297bf47bf7eedd56a39a61dc23ba798e4b830d585486ca5"}, + {file = "coverage-7.10.5-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:42144e8e346de44a6f1dbd0a56575dd8ab8dfa7e9007da02ea5b1c30ab33a7db"}, + {file = "coverage-7.10.5-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:66c644cbd7aed8fe266d5917e2c9f65458a51cfe5eeff9c05f15b335f697066e"}, + {file = "coverage-7.10.5-cp313-cp313t-win32.whl", hash = "sha256:2d1b73023854068c44b0c554578a4e1ef1b050ed07cf8b431549e624a29a66ee"}, + {file = "coverage-7.10.5-cp313-cp313t-win_amd64.whl", hash = "sha256:54a1532c8a642d8cc0bd5a9a51f5a9dcc440294fd06e9dda55e743c5ec1a8f14"}, + {file = "coverage-7.10.5-cp313-cp313t-win_arm64.whl", hash = "sha256:74d5b63fe3f5f5d372253a4ef92492c11a4305f3550631beaa432fc9df16fcff"}, + {file = "coverage-7.10.5-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:68c5e0bc5f44f68053369fa0d94459c84548a77660a5f2561c5e5f1e3bed7031"}, + {file = "coverage-7.10.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:cf33134ffae93865e32e1e37df043bef15a5e857d8caebc0099d225c579b0fa3"}, + {file = "coverage-7.10.5-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:ad8fa9d5193bafcf668231294241302b5e683a0518bf1e33a9a0dfb142ec3031"}, + {file = "coverage-7.10.5-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:146fa1531973d38ab4b689bc764592fe6c2f913e7e80a39e7eeafd11f0ef6db2"}, + {file = "coverage-7.10.5-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6013a37b8a4854c478d3219ee8bc2392dea51602dd0803a12d6f6182a0061762"}, + {file = "coverage-7.10.5-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:eb90fe20db9c3d930fa2ad7a308207ab5b86bf6a76f54ab6a40be4012d88fcae"}, + {file = "coverage-7.10.5-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:384b34482272e960c438703cafe63316dfbea124ac62006a455c8410bf2a2262"}, + {file = "coverage-7.10.5-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:467dc74bd0a1a7de2bedf8deaf6811f43602cb532bd34d81ffd6038d6d8abe99"}, + {file = "coverage-7.10.5-cp314-cp314-win32.whl", hash = "sha256:556d23d4e6393ca898b2e63a5bca91e9ac2d5fb13299ec286cd69a09a7187fde"}, + {file = "coverage-7.10.5-cp314-cp314-win_amd64.whl", hash = "sha256:f4446a9547681533c8fa3e3c6cf62121eeee616e6a92bd9201c6edd91beffe13"}, + {file = "coverage-7.10.5-cp314-cp314-win_arm64.whl", hash = "sha256:5e78bd9cf65da4c303bf663de0d73bf69f81e878bf72a94e9af67137c69b9fe9"}, + {file = "coverage-7.10.5-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:5661bf987d91ec756a47c7e5df4fbcb949f39e32f9334ccd3f43233bbb65e508"}, + {file = "coverage-7.10.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a46473129244db42a720439a26984f8c6f834762fc4573616c1f37f13994b357"}, + {file = "coverage-7.10.5-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:1f64b8d3415d60f24b058b58d859e9512624bdfa57a2d1f8aff93c1ec45c429b"}, + {file = "coverage-7.10.5-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:44d43de99a9d90b20e0163f9770542357f58860a26e24dc1d924643bd6aa7cb4"}, + {file = "coverage-7.10.5-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a931a87e5ddb6b6404e65443b742cb1c14959622777f2a4efd81fba84f5d91ba"}, + {file = "coverage-7.10.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:f9559b906a100029274448f4c8b8b0a127daa4dade5661dfd821b8c188058842"}, + {file = "coverage-7.10.5-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:b08801e25e3b4526ef9ced1aa29344131a8f5213c60c03c18fe4c6170ffa2874"}, + {file = "coverage-7.10.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ed9749bb8eda35f8b636fb7632f1c62f735a236a5d4edadd8bbcc5ea0542e732"}, + {file = "coverage-7.10.5-cp314-cp314t-win32.whl", hash = "sha256:609b60d123fc2cc63ccee6d17e4676699075db72d14ac3c107cc4976d516f2df"}, + {file = "coverage-7.10.5-cp314-cp314t-win_amd64.whl", hash = "sha256:0666cf3d2c1626b5a3463fd5b05f5e21f99e6aec40a3192eee4d07a15970b07f"}, + {file = "coverage-7.10.5-cp314-cp314t-win_arm64.whl", hash = "sha256:bc85eb2d35e760120540afddd3044a5bf69118a91a296a8b3940dfc4fdcfe1e2"}, + {file = "coverage-7.10.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:62835c1b00c4a4ace24c1a88561a5a59b612fbb83a525d1c70ff5720c97c0610"}, + {file = "coverage-7.10.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5255b3bbcc1d32a4069d6403820ac8e6dbcc1d68cb28a60a1ebf17e47028e898"}, + {file = "coverage-7.10.5-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3876385722e335d6e991c430302c24251ef9c2a9701b2b390f5473199b1b8ebf"}, + {file = "coverage-7.10.5-cp39-cp39-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:8048ce4b149c93447a55d279078c8ae98b08a6951a3c4d2d7e87f4efc7bfe100"}, + {file = "coverage-7.10.5-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4028e7558e268dd8bcf4d9484aad393cafa654c24b4885f6f9474bf53183a82a"}, + {file = "coverage-7.10.5-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:03f47dc870eec0367fcdd603ca6a01517d2504e83dc18dbfafae37faec66129a"}, + {file = "coverage-7.10.5-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:2d488d7d42b6ded7ea0704884f89dcabd2619505457de8fc9a6011c62106f6e5"}, + {file = "coverage-7.10.5-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b3dcf2ead47fa8be14224ee817dfc1df98043af568fe120a22f81c0eb3c34ad2"}, + {file = "coverage-7.10.5-cp39-cp39-win32.whl", hash = "sha256:02650a11324b80057b8c9c29487020073d5e98a498f1857f37e3f9b6ea1b2426"}, + {file = "coverage-7.10.5-cp39-cp39-win_amd64.whl", hash = "sha256:b45264dd450a10f9e03237b41a9a24e85cbb1e278e5a32adb1a303f58f0017f3"}, + {file = "coverage-7.10.5-py3-none-any.whl", hash = "sha256:0be24d35e4db1d23d0db5c0f6a74a962e2ec83c426b5cac09f4234aadef38e4a"}, + {file = "coverage-7.10.5.tar.gz", hash = "sha256:f2e57716a78bc3ae80b2207be0709a3b2b63b9f2dcf9740ee6ac03588a2015b6"}, ] [package.extras] @@ -514,14 +514,14 @@ gmpy2 = ["gmpy2"] [[package]] name = "email-validator" -version = "2.2.0" +version = "2.3.0" description = "A robust email address syntax and deliverability validation library." optional = false python-versions = ">=3.8" groups = ["main"] files = [ - {file = "email_validator-2.2.0-py3-none-any.whl", hash = "sha256:561977c2d73ce3611850a06fa56b414621e0c8faa9d66f2611407d87465da631"}, - {file = "email_validator-2.2.0.tar.gz", hash = "sha256:cb690f344c617a714f22e66ae771445a1ceb46821152df8e165c5f9a364582b7"}, + {file = "email_validator-2.3.0-py3-none-any.whl", hash = "sha256:80f13f623413e6b197ae73bb10bf4eb0908faf509ad8362c5edeb0be7fd450b4"}, + {file = "email_validator-2.3.0.tar.gz", hash = "sha256:9fc05c37f2f6cf439ff414f8fc46d917929974a82244c20eb10231ba60c54426"}, ] [package.dependencies] @@ -572,7 +572,7 @@ description = "Lightweight in-process concurrent programming" optional = false python-versions = ">=3.9" groups = ["main", "dev"] -markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\"" +markers = "python_version < \"3.14\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")" files = [ {file = "greenlet-3.2.4-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:8c68325b0d0acf8d91dde4e6f930967dd52a5302cd4062932a6b2e7c2969f47c"}, {file = "greenlet-3.2.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:94385f101946790ae13da500603491f04a76b6e4c059dab271b3ce2e283b2590"}, @@ -955,14 +955,14 @@ files = [ [[package]] name = "platformdirs" -version = "4.3.8" +version = "4.4.0" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.9" groups = ["dev", "vscode"] files = [ - {file = "platformdirs-4.3.8-py3-none-any.whl", hash = "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4"}, - {file = "platformdirs-4.3.8.tar.gz", hash = "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc"}, + {file = "platformdirs-4.4.0-py3-none-any.whl", hash = "sha256:abd01743f24e5287cd7a5db3752faf1a2d65353f38ec26d98e25a6db65958c85"}, + {file = "platformdirs-4.4.0.tar.gz", hash = "sha256:ca753cf4d81dc309bc67b0ea38fd15dc97bc30ce419a7f58d13eb3bf14c4febf"}, ] [package.extras] @@ -1221,7 +1221,7 @@ astroid = ">=3.3.8,<=3.4.0.dev0" colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} dill = [ {version = ">=0.3.7", markers = "python_version >= \"3.12\""}, - {version = ">=0.3.6", markers = "python_version == \"3.11\""}, + {version = ">=0.3.6", markers = "python_version >= \"3.11\""}, ] isort = ">=4.2.5,<5.13 || >5.13,<7" mccabe = ">=0.6,<0.8" @@ -1426,7 +1426,7 @@ ecdsa = ">=0.19.1,<1" python-http-client = ">=3.2.1" werkzeug = [ {version = ">=2.3.5", markers = "python_version >= \"3.12\""}, - {version = ">=2.2.0", markers = "python_version >= \"3.11\""}, + {version = ">=2.2.0", markers = "python_version == \"3.11\""}, ] [[package]] @@ -1455,81 +1455,99 @@ files = [ [[package]] name = "sqlalchemy" -version = "1.4.54" +version = "2.0.43" description = "Database Abstraction Library" optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" +python-versions = ">=3.7" groups = ["main", "dev"] files = [ - {file = "SQLAlchemy-1.4.54-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:af00236fe21c4d4f4c227b6ccc19b44c594160cc3ff28d104cdce85855369277"}, - {file = "SQLAlchemy-1.4.54-cp310-cp310-manylinux1_x86_64.manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_5_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1183599e25fa38a1a322294b949da02b4f0da13dbc2688ef9dbe746df573f8a6"}, - {file = "SQLAlchemy-1.4.54-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1990d5a6a5dc358a0894c8ca02043fb9a5ad9538422001fb2826e91c50f1d539"}, - {file = "SQLAlchemy-1.4.54-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:14b3f4783275339170984cadda66e3ec011cce87b405968dc8d51cf0f9997b0d"}, - {file = "SQLAlchemy-1.4.54-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b24364150738ce488333b3fb48bfa14c189a66de41cd632796fbcacb26b4585"}, - {file = "SQLAlchemy-1.4.54-cp310-cp310-win32.whl", hash = "sha256:a8a72259a1652f192c68377be7011eac3c463e9892ef2948828c7d58e4829988"}, - {file = "SQLAlchemy-1.4.54-cp310-cp310-win_amd64.whl", hash = "sha256:b67589f7955924865344e6eacfdcf70675e64f36800a576aa5e961f0008cde2a"}, - {file = "SQLAlchemy-1.4.54-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b05e0626ec1c391432eabb47a8abd3bf199fb74bfde7cc44a26d2b1b352c2c6e"}, - {file = "SQLAlchemy-1.4.54-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:13e91d6892b5fcb94a36ba061fb7a1f03d0185ed9d8a77c84ba389e5bb05e936"}, - {file = "SQLAlchemy-1.4.54-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb59a11689ff3c58e7652260127f9e34f7f45478a2f3ef831ab6db7bcd72108f"}, - {file = "SQLAlchemy-1.4.54-cp311-cp311-win32.whl", hash = "sha256:1390ca2d301a2708fd4425c6d75528d22f26b8f5cbc9faba1ddca136671432bc"}, - {file = "SQLAlchemy-1.4.54-cp311-cp311-win_amd64.whl", hash = "sha256:2b37931eac4b837c45e2522066bda221ac6d80e78922fb77c75eb12e4dbcdee5"}, - {file = "SQLAlchemy-1.4.54-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:3f01c2629a7d6b30d8afe0326b8c649b74825a0e1ebdcb01e8ffd1c920deb07d"}, - {file = "SQLAlchemy-1.4.54-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c24dd161c06992ed16c5e528a75878edbaeced5660c3db88c820f1f0d3fe1f4"}, - {file = "SQLAlchemy-1.4.54-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b5e0d47d619c739bdc636bbe007da4519fc953393304a5943e0b5aec96c9877c"}, - {file = "SQLAlchemy-1.4.54-cp312-cp312-win32.whl", hash = "sha256:12bc0141b245918b80d9d17eca94663dbd3f5266ac77a0be60750f36102bbb0f"}, - {file = "SQLAlchemy-1.4.54-cp312-cp312-win_amd64.whl", hash = "sha256:f941aaf15f47f316123e1933f9ea91a6efda73a161a6ab6046d1cde37be62c88"}, - {file = "SQLAlchemy-1.4.54-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:a41611835010ed4ea4c7aed1da5b58aac78ee7e70932a91ed2705a7b38e40f52"}, - {file = "SQLAlchemy-1.4.54-cp36-cp36m-manylinux1_x86_64.manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_5_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e8c1b9ecaf9f2590337d5622189aeb2f0dbc54ba0232fa0856cf390957584a9"}, - {file = "SQLAlchemy-1.4.54-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0de620f978ca273ce027769dc8db7e6ee72631796187adc8471b3c76091b809e"}, - {file = "SQLAlchemy-1.4.54-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c5a2530400a6e7e68fd1552a55515de6a4559122e495f73554a51cedafc11669"}, - {file = "SQLAlchemy-1.4.54-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0cf7076c8578b3de4e43a046cc7a1af8466e1c3f5e64167189fe8958a4f9c02"}, - {file = "SQLAlchemy-1.4.54-cp37-cp37m-macosx_11_0_x86_64.whl", hash = "sha256:f1e1b92ee4ee9ffc68624ace218b89ca5ca667607ccee4541a90cc44999b9aea"}, - {file = "SQLAlchemy-1.4.54-cp37-cp37m-manylinux1_x86_64.manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_5_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41cffc63c7c83dfc30c4cab5b4308ba74440a9633c4509c51a0c52431fb0f8ab"}, - {file = "SQLAlchemy-1.4.54-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5933c45d11cbd9694b1540aa9076816cc7406964c7b16a380fd84d3a5fe3241"}, - {file = "SQLAlchemy-1.4.54-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:cafe0ba3a96d0845121433cffa2b9232844a2609fce694fcc02f3f31214ece28"}, - {file = "SQLAlchemy-1.4.54-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a19f816f4702d7b1951d7576026c7124b9bfb64a9543e571774cf517b7a50b29"}, - {file = "SQLAlchemy-1.4.54-cp37-cp37m-win32.whl", hash = "sha256:76c2ba7b5a09863d0a8166fbc753af96d561818c572dbaf697c52095938e7be4"}, - {file = "SQLAlchemy-1.4.54-cp37-cp37m-win_amd64.whl", hash = "sha256:a86b0e4be775902a5496af4fb1b60d8a2a457d78f531458d294360b8637bb014"}, - {file = "SQLAlchemy-1.4.54-cp38-cp38-macosx_12_0_x86_64.whl", hash = "sha256:a49730afb716f3f675755afec109895cab95bc9875db7ffe2e42c1b1c6279482"}, - {file = "SQLAlchemy-1.4.54-cp38-cp38-manylinux1_x86_64.manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_5_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26e78444bc77d089e62874dc74df05a5c71f01ac598010a327881a48408d0064"}, - {file = "SQLAlchemy-1.4.54-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02d2ecb9508f16ab9c5af466dfe5a88e26adf2e1a8d1c56eb616396ccae2c186"}, - {file = "SQLAlchemy-1.4.54-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:394b0135900b62dbf63e4809cdc8ac923182af2816d06ea61cd6763943c2cc05"}, - {file = "SQLAlchemy-1.4.54-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ed3576675c187e3baa80b02c4c9d0edfab78eff4e89dd9da736b921333a2432"}, - {file = "SQLAlchemy-1.4.54-cp38-cp38-win32.whl", hash = "sha256:fc9ffd9a38e21fad3e8c5a88926d57f94a32546e937e0be46142b2702003eba7"}, - {file = "SQLAlchemy-1.4.54-cp38-cp38-win_amd64.whl", hash = "sha256:a01bc25eb7a5688656c8770f931d5cb4a44c7de1b3cec69b84cc9745d1e4cc10"}, - {file = "SQLAlchemy-1.4.54-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:0b76bbb1cbae618d10679be8966f6d66c94f301cfc15cb49e2f2382563fb6efb"}, - {file = "SQLAlchemy-1.4.54-cp39-cp39-manylinux1_x86_64.manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_5_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdb2886c0be2c6c54d0651d5a61c29ef347e8eec81fd83afebbf7b59b80b7393"}, - {file = "SQLAlchemy-1.4.54-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:954816850777ac234a4e32b8c88ac1f7847088a6e90cfb8f0e127a1bf3feddff"}, - {file = "SQLAlchemy-1.4.54-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:1d83cd1cc03c22d922ec94d0d5f7b7c96b1332f5e122e81b1a61fb22da77879a"}, - {file = "SQLAlchemy-1.4.54-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1576fba3616f79496e2f067262200dbf4aab1bb727cd7e4e006076686413c80c"}, - {file = "SQLAlchemy-1.4.54-cp39-cp39-win32.whl", hash = "sha256:3112de9e11ff1957148c6de1df2bc5cc1440ee36783412e5eedc6f53638a577d"}, - {file = "SQLAlchemy-1.4.54-cp39-cp39-win_amd64.whl", hash = "sha256:6da60fb24577f989535b8fc8b2ddc4212204aaf02e53c4c7ac94ac364150ed08"}, - {file = "sqlalchemy-1.4.54.tar.gz", hash = "sha256:4470fbed088c35dc20b78a39aaf4ae54fe81790c783b3264872a0224f437c31a"}, + {file = "SQLAlchemy-2.0.43-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:21ba7a08a4253c5825d1db389d4299f64a100ef9800e4624c8bf70d8f136e6ed"}, + {file = "SQLAlchemy-2.0.43-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:11b9503fa6f8721bef9b8567730f664c5a5153d25e247aadc69247c4bc605227"}, + {file = "SQLAlchemy-2.0.43-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07097c0a1886c150ef2adba2ff7437e84d40c0f7dcb44a2c2b9c905ccfc6361c"}, + {file = "SQLAlchemy-2.0.43-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:cdeff998cb294896a34e5b2f00e383e7c5c4ef3b4bfa375d9104723f15186443"}, + {file = "SQLAlchemy-2.0.43-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:bcf0724a62a5670e5718957e05c56ec2d6850267ea859f8ad2481838f889b42c"}, + {file = "SQLAlchemy-2.0.43-cp37-cp37m-win32.whl", hash = "sha256:c697575d0e2b0a5f0433f679bda22f63873821d991e95a90e9e52aae517b2e32"}, + {file = "SQLAlchemy-2.0.43-cp37-cp37m-win_amd64.whl", hash = "sha256:d34c0f6dbefd2e816e8f341d0df7d4763d382e3f452423e752ffd1e213da2512"}, + {file = "sqlalchemy-2.0.43-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:70322986c0c699dca241418fcf18e637a4369e0ec50540a2b907b184c8bca069"}, + {file = "sqlalchemy-2.0.43-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:87accdbba88f33efa7b592dc2e8b2a9c2cdbca73db2f9d5c510790428c09c154"}, + {file = "sqlalchemy-2.0.43-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c00e7845d2f692ebfc7d5e4ec1a3fd87698e4337d09e58d6749a16aedfdf8612"}, + {file = "sqlalchemy-2.0.43-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:022e436a1cb39b13756cf93b48ecce7aa95382b9cfacceb80a7d263129dfd019"}, + {file = "sqlalchemy-2.0.43-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c5e73ba0d76eefc82ec0219d2301cb33bfe5205ed7a2602523111e2e56ccbd20"}, + {file = "sqlalchemy-2.0.43-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9c2e02f06c68092b875d5cbe4824238ab93a7fa35d9c38052c033f7ca45daa18"}, + {file = "sqlalchemy-2.0.43-cp310-cp310-win32.whl", hash = "sha256:e7a903b5b45b0d9fa03ac6a331e1c1d6b7e0ab41c63b6217b3d10357b83c8b00"}, + {file = "sqlalchemy-2.0.43-cp310-cp310-win_amd64.whl", hash = "sha256:4bf0edb24c128b7be0c61cd17eef432e4bef507013292415f3fb7023f02b7d4b"}, + {file = "sqlalchemy-2.0.43-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:52d9b73b8fb3e9da34c2b31e6d99d60f5f99fd8c1225c9dad24aeb74a91e1d29"}, + {file = "sqlalchemy-2.0.43-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f42f23e152e4545157fa367b2435a1ace7571cab016ca26038867eb7df2c3631"}, + {file = "sqlalchemy-2.0.43-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4fb1a8c5438e0c5ea51afe9c6564f951525795cf432bed0c028c1cb081276685"}, + {file = "sqlalchemy-2.0.43-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db691fa174e8f7036afefe3061bc40ac2b770718be2862bfb03aabae09051aca"}, + {file = "sqlalchemy-2.0.43-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:fe2b3b4927d0bc03d02ad883f402d5de201dbc8894ac87d2e981e7d87430e60d"}, + {file = "sqlalchemy-2.0.43-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4d3d9b904ad4a6b175a2de0738248822f5ac410f52c2fd389ada0b5262d6a1e3"}, + {file = "sqlalchemy-2.0.43-cp311-cp311-win32.whl", hash = "sha256:5cda6b51faff2639296e276591808c1726c4a77929cfaa0f514f30a5f6156921"}, + {file = "sqlalchemy-2.0.43-cp311-cp311-win_amd64.whl", hash = "sha256:c5d1730b25d9a07727d20ad74bc1039bbbb0a6ca24e6769861c1aa5bf2c4c4a8"}, + {file = "sqlalchemy-2.0.43-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:20d81fc2736509d7a2bd33292e489b056cbae543661bb7de7ce9f1c0cd6e7f24"}, + {file = "sqlalchemy-2.0.43-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:25b9fc27650ff5a2c9d490c13c14906b918b0de1f8fcbb4c992712d8caf40e83"}, + {file = "sqlalchemy-2.0.43-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6772e3ca8a43a65a37c88e2f3e2adfd511b0b1da37ef11ed78dea16aeae85bd9"}, + {file = "sqlalchemy-2.0.43-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a113da919c25f7f641ffbd07fbc9077abd4b3b75097c888ab818f962707eb48"}, + {file = "sqlalchemy-2.0.43-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4286a1139f14b7d70141c67a8ae1582fc2b69105f1b09d9573494eb4bb4b2687"}, + {file = "sqlalchemy-2.0.43-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:529064085be2f4d8a6e5fab12d36ad44f1909a18848fcfbdb59cc6d4bbe48efe"}, + {file = "sqlalchemy-2.0.43-cp312-cp312-win32.whl", hash = "sha256:b535d35dea8bbb8195e7e2b40059e2253acb2b7579b73c1b432a35363694641d"}, + {file = "sqlalchemy-2.0.43-cp312-cp312-win_amd64.whl", hash = "sha256:1c6d85327ca688dbae7e2b06d7d84cfe4f3fffa5b5f9e21bb6ce9d0e1a0e0e0a"}, + {file = "sqlalchemy-2.0.43-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e7c08f57f75a2bb62d7ee80a89686a5e5669f199235c6d1dac75cd59374091c3"}, + {file = "sqlalchemy-2.0.43-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:14111d22c29efad445cd5021a70a8b42f7d9152d8ba7f73304c4d82460946aaa"}, + {file = "sqlalchemy-2.0.43-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21b27b56eb2f82653168cefe6cb8e970cdaf4f3a6cb2c5e3c3c1cf3158968ff9"}, + {file = "sqlalchemy-2.0.43-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c5a9da957c56e43d72126a3f5845603da00e0293720b03bde0aacffcf2dc04f"}, + {file = "sqlalchemy-2.0.43-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5d79f9fdc9584ec83d1b3c75e9f4595c49017f5594fee1a2217117647225d738"}, + {file = "sqlalchemy-2.0.43-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9df7126fd9db49e3a5a3999442cc67e9ee8971f3cb9644250107d7296cb2a164"}, + {file = "sqlalchemy-2.0.43-cp313-cp313-win32.whl", hash = "sha256:7f1ac7828857fcedb0361b48b9ac4821469f7694089d15550bbcf9ab22564a1d"}, + {file = "sqlalchemy-2.0.43-cp313-cp313-win_amd64.whl", hash = "sha256:971ba928fcde01869361f504fcff3b7143b47d30de188b11c6357c0505824197"}, + {file = "sqlalchemy-2.0.43-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4e6aeb2e0932f32950cf56a8b4813cb15ff792fc0c9b3752eaf067cfe298496a"}, + {file = "sqlalchemy-2.0.43-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:61f964a05356f4bca4112e6334ed7c208174511bd56e6b8fc86dad4d024d4185"}, + {file = "sqlalchemy-2.0.43-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46293c39252f93ea0910aababa8752ad628bcce3a10d3f260648dd472256983f"}, + {file = "sqlalchemy-2.0.43-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:136063a68644eca9339d02e6693932116f6a8591ac013b0014479a1de664e40a"}, + {file = "sqlalchemy-2.0.43-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:6e2bf13d9256398d037fef09fd8bf9b0bf77876e22647d10761d35593b9ac547"}, + {file = "sqlalchemy-2.0.43-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:44337823462291f17f994d64282a71c51d738fc9ef561bf265f1d0fd9116a782"}, + {file = "sqlalchemy-2.0.43-cp38-cp38-win32.whl", hash = "sha256:13194276e69bb2af56198fef7909d48fd34820de01d9c92711a5fa45497cc7ed"}, + {file = "sqlalchemy-2.0.43-cp38-cp38-win_amd64.whl", hash = "sha256:334f41fa28de9f9be4b78445e68530da3c5fa054c907176460c81494f4ae1f5e"}, + {file = "sqlalchemy-2.0.43-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ceb5c832cc30663aeaf5e39657712f4c4241ad1f638d487ef7216258f6d41fe7"}, + {file = "sqlalchemy-2.0.43-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:11f43c39b4b2ec755573952bbcc58d976779d482f6f832d7f33a8d869ae891bf"}, + {file = "sqlalchemy-2.0.43-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:413391b2239db55be14fa4223034d7e13325a1812c8396ecd4f2c08696d5ccad"}, + {file = "sqlalchemy-2.0.43-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c379e37b08c6c527181a397212346be39319fb64323741d23e46abd97a400d34"}, + {file = "sqlalchemy-2.0.43-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:03d73ab2a37d9e40dec4984d1813d7878e01dbdc742448d44a7341b7a9f408c7"}, + {file = "sqlalchemy-2.0.43-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:8cee08f15d9e238ede42e9bbc1d6e7158d0ca4f176e4eab21f88ac819ae3bd7b"}, + {file = "sqlalchemy-2.0.43-cp39-cp39-win32.whl", hash = "sha256:b3edaec7e8b6dc5cd94523c6df4f294014df67097c8217a89929c99975811414"}, + {file = "sqlalchemy-2.0.43-cp39-cp39-win_amd64.whl", hash = "sha256:227119ce0a89e762ecd882dc661e0aa677a690c914e358f0dd8932a2e8b2765b"}, + {file = "sqlalchemy-2.0.43-py3-none-any.whl", hash = "sha256:1681c21dd2ccee222c2fe0bef671d1aef7c504087c9c4e800371cfcc8ac966fc"}, + {file = "sqlalchemy-2.0.43.tar.gz", hash = "sha256:788bfcef6787a7764169cfe9859fe425bf44559619e1d9f56f5bddf2ebf6f417"}, ] [package.dependencies] -greenlet = {version = "!=0.4.17", markers = "python_version >= \"3\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"} +greenlet = {version = ">=1", markers = "python_version < \"3.14\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"} +typing-extensions = ">=4.6.0" [package.extras] -aiomysql = ["aiomysql (>=0.2.0) ; python_version >= \"3\"", "greenlet (!=0.4.17) ; python_version >= \"3\""] -aiosqlite = ["aiosqlite ; python_version >= \"3\"", "greenlet (!=0.4.17) ; python_version >= \"3\"", "typing_extensions (!=3.10.0.1)"] -asyncio = ["greenlet (!=0.4.17) ; python_version >= \"3\""] -asyncmy = ["asyncmy (>=0.2.3,!=0.2.4) ; python_version >= \"3\"", "greenlet (!=0.4.17) ; python_version >= \"3\""] -mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2) ; python_version >= \"3\"", "mariadb (>=1.0.1,!=1.1.2) ; python_version >= \"3\""] +aiomysql = ["aiomysql (>=0.2.0)", "greenlet (>=1)"] +aioodbc = ["aioodbc", "greenlet (>=1)"] +aiosqlite = ["aiosqlite", "greenlet (>=1)", "typing_extensions (!=3.10.0.1)"] +asyncio = ["greenlet (>=1)"] +asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (>=1)"] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5,!=1.1.10)"] mssql = ["pyodbc"] -mssql-pymssql = ["pymssql", "pymssql"] -mssql-pyodbc = ["pyodbc", "pyodbc"] -mypy = ["mypy (>=0.910) ; python_version >= \"3\"", "sqlalchemy2-stubs"] -mysql = ["mysqlclient (>=1.4.0) ; python_version >= \"3\"", "mysqlclient (>=1.4.0,<2) ; python_version < \"3\""] -mysql-connector = ["mysql-connector-python", "mysql-connector-python"] -oracle = ["cx_oracle (>=7) ; python_version >= \"3\"", "cx_oracle (>=7,<8) ; python_version < \"3\""] +mssql-pymssql = ["pymssql"] +mssql-pyodbc = ["pyodbc"] +mypy = ["mypy (>=0.910)"] +mysql = ["mysqlclient (>=1.4.0)"] +mysql-connector = ["mysql-connector-python"] +oracle = ["cx_oracle (>=8)"] +oracle-oracledb = ["oracledb (>=1.0.1)"] postgresql = ["psycopg2 (>=2.7)"] -postgresql-asyncpg = ["asyncpg ; python_version >= \"3\"", "asyncpg ; python_version >= \"3\"", "greenlet (!=0.4.17) ; python_version >= \"3\"", "greenlet (!=0.4.17) ; python_version >= \"3\""] -postgresql-pg8000 = ["pg8000 (>=1.16.6,!=1.29.0) ; python_version >= \"3\"", "pg8000 (>=1.16.6,!=1.29.0) ; python_version >= \"3\""] +postgresql-asyncpg = ["asyncpg", "greenlet (>=1)"] +postgresql-pg8000 = ["pg8000 (>=1.29.1)"] +postgresql-psycopg = ["psycopg (>=3.0.7)"] postgresql-psycopg2binary = ["psycopg2-binary"] postgresql-psycopg2cffi = ["psycopg2cffi"] -pymysql = ["pymysql (<1) ; python_version < \"3\"", "pymysql ; python_version >= \"3\""] -sqlcipher = ["sqlcipher3_binary ; python_version >= \"3\""] +postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] +pymysql = ["pymysql"] +sqlcipher = ["sqlcipher3_binary"] [[package]] name = "sqlalchemy-utils" @@ -1562,14 +1580,14 @@ url = ["furl (>=0.4.1)"] [[package]] name = "starlette" -version = "0.47.2" +version = "0.47.3" description = "The little ASGI library that shines." optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "starlette-0.47.2-py3-none-any.whl", hash = "sha256:c5847e96134e5c5371ee9fac6fdf1a67336d5815e09eb2a01fdb57a351ef915b"}, - {file = "starlette-0.47.2.tar.gz", hash = "sha256:6ae9aa5db235e4846decc1e7b79c4f346adf41e9777aebeb49dfd09bbd7023d8"}, + {file = "starlette-0.47.3-py3-none-any.whl", hash = "sha256:89c0778ca62a76b826101e7c709e70680a1699ca7da6b44d38eb0a7e61fe4b51"}, + {file = "starlette-0.47.3.tar.gz", hash = "sha256:6bc94f839cc176c4858894f1f8908f0ab79dfec1a6b8402f6da9be26ebea52e9"}, ] [package.dependencies] @@ -1593,14 +1611,14 @@ files = [ [[package]] name = "typing-extensions" -version = "4.14.1" +version = "4.15.0" description = "Backported and Experimental Type Hints for Python 3.9+" optional = false python-versions = ">=3.9" -groups = ["main"] +groups = ["main", "dev"] files = [ - {file = "typing_extensions-4.14.1-py3-none-any.whl", hash = "sha256:d1e1e3b58374dc93031d6eda2420a48ea44a36c2b4766a4fdeb3710755731d76"}, - {file = "typing_extensions-4.14.1.tar.gz", hash = "sha256:38b39f4aeeab64884ce9f74c94263ef78f3c22467c8724005483154c26648d36"}, + {file = "typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548"}, + {file = "typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466"}, ] [[package]] @@ -1710,4 +1728,4 @@ watchdog = ["watchdog (>=2.3)"] [metadata] lock-version = "2.1" python-versions = "^3.11.9" -content-hash = "85bacd1430c1e8ee85e5cd96b72f7c87251dfb2e30c33811a3fc94577c75dcd7" +content-hash = "6e8d94dcfe8061c4b3bfe859d7d4be69cb9dd863ace2dd883835bb44407ba06d" diff --git a/pyproject.toml b/pyproject.toml index 4ada52e..61d16fb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,7 +17,7 @@ packages =[ python = "^3.11.9" fastapi = "^0.116.1" gunicorn = "^23.0.0" -sqlalchemy = "^1.4.0" +sqlalchemy = "^2.0.43" alembic = "^1.4.2" psycopg2 = "^2.8.5" python-jose = {extras = ["cryptography"], version = "^3.2.0"} From 889900a5abc83f68edeca599b7521d1970993a0d Mon Sep 17 00:00:00 2001 From: Ian Beck Date: Thu, 28 Aug 2025 09:07:41 -0700 Subject: [PATCH 09/15] WIP: Experiments to convert the test suite to 2.0-style nested savepoints --- api/tests/cards/conftest.py | 19 ++++++++++--------- api/tests/conftest.py | 2 +- api/tests/decks/conftest.py | 2 +- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/api/tests/cards/conftest.py b/api/tests/cards/conftest.py index 57fec1f..d9513ea 100644 --- a/api/tests/cards/conftest.py +++ b/api/tests/cards/conftest.py @@ -136,7 +136,7 @@ def _create_cards_for_filtration(session: db.Session, is_legacy=False): @pytest.fixture(scope="package") -def cards_session(test_engine: Engine, monkeypatch_package) -> Session: +def cards_connection(test_engine: Engine) -> Session: """Populate our database with the cards needed for listing tests. This causes our session to be reused between all tests in this package. @@ -144,25 +144,26 @@ def cards_session(test_engine: Engine, monkeypatch_package) -> Session: # Create a nested transaction that includes standard card data connection = test_engine.connect() cards_transaction = connection.begin() - session = Session(bind=connection) - # Overwrite commits with flushes so that we can query stuff, but it's in the same transaction - monkeypatch_package.setattr(session, "commit", session.flush) + session = Session(bind=connection, join_transaction_mode="create_savepoint") # Create our fake cards that are relied on by the tests in this module _create_cards_for_filtration(session, is_legacy=True) _create_cards_for_filtration(session) try: - yield session + yield connection finally: cards_transaction.rollback() connection.close() @pytest.fixture(scope="function") -def session(cards_session): +def session(cards_connection): """Return a nested transaction on the outer session, to prevent rolling back card data""" - cards_session.begin_nested() + savepoint = cards_connection.begin_nested() try: - yield cards_session + with Session( + bind=cards_connection, join_transaction_mode="create_savepoint" + ) as session: + yield session finally: - cards_session.rollback() + savepoint.rollback() diff --git a/api/tests/conftest.py b/api/tests/conftest.py index 1d5b362..96266fc 100644 --- a/api/tests/conftest.py +++ b/api/tests/conftest.py @@ -67,7 +67,7 @@ def session(test_engine: Engine, monkeypatch) -> Session: """Return an SQLAlchemy session for this test, complete with SAVEPOINT for internal rollbacks""" connection = test_engine.connect() transaction = connection.begin() - session = Session(bind=connection) + session = Session(bind=connection, join_transaction_mode="create_savepoint") try: yield session finally: diff --git a/api/tests/decks/conftest.py b/api/tests/decks/conftest.py index bbe78b0..e1a2b3d 100644 --- a/api/tests/decks/conftest.py +++ b/api/tests/decks/conftest.py @@ -15,7 +15,7 @@ def cards_session(test_engine: Engine, monkeypatch_package) -> Session: """ connection = test_engine.connect() cards_transaction = connection.begin() - session = Session(bind=connection) + session = Session(bind=connection, join_transaction_mode="create_savepoint") # Overwrite commits with flushes so that we can query stuff, but it's in the same transaction monkeypatch_package.setattr(session, "commit", session.flush) # Create our fake cards that are relied on by the tests in this module From 08f69a6ccfdb8b7684091d76f24866c947db613b Mon Sep 17 00:00:00 2001 From: Ian Beck Date: Thu, 28 Aug 2025 11:19:03 -0700 Subject: [PATCH 10/15] Updated root and cards conftest logic to properly use 2.0 SAVEPOINTs Through trial and error, I discovered that: * Monkeypatching commit is no longer necessary (and in fact breaks things) * `being_nested()` must be called on the connection * Multiple sessions appear to be safer than passing a single session around --- api/tests/cards/conftest.py | 6 +++--- api/tests/conftest.py | 11 ++++------- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/api/tests/cards/conftest.py b/api/tests/cards/conftest.py index d9513ea..5b0a85b 100644 --- a/api/tests/cards/conftest.py +++ b/api/tests/cards/conftest.py @@ -144,10 +144,10 @@ def cards_connection(test_engine: Engine) -> Session: # Create a nested transaction that includes standard card data connection = test_engine.connect() cards_transaction = connection.begin() - session = Session(bind=connection, join_transaction_mode="create_savepoint") + cards_session = Session(bind=connection, join_transaction_mode="create_savepoint") # Create our fake cards that are relied on by the tests in this module - _create_cards_for_filtration(session, is_legacy=True) - _create_cards_for_filtration(session) + _create_cards_for_filtration(cards_session, is_legacy=True) + _create_cards_for_filtration(cards_session) try: yield connection diff --git a/api/tests/conftest.py b/api/tests/conftest.py index 96266fc..6a1c4b0 100644 --- a/api/tests/conftest.py +++ b/api/tests/conftest.py @@ -14,12 +14,10 @@ def test_endpoint(client: TestClient): from fastapi.testclient import TestClient from sqlalchemy import create_engine from sqlalchemy.engine import Engine -from sqlalchemy.orm import Session from sqlalchemy_utils import create_database, database_exists, drop_database import api.environment -# `models` is necessary to ensure that AlchemyBase is properly populated from api import app, db from api.depends import get_session @@ -63,21 +61,20 @@ def test_engine(): @pytest.fixture(scope="function") -def session(test_engine: Engine, monkeypatch) -> Session: +def session(test_engine: Engine) -> db.Session: """Return an SQLAlchemy session for this test, complete with SAVEPOINT for internal rollbacks""" connection = test_engine.connect() transaction = connection.begin() - session = Session(bind=connection, join_transaction_mode="create_savepoint") try: - yield session + with db.Session(bind=connection, join_transaction_mode="create_savepoint") as session: + yield session finally: - session.close() transaction.rollback() connection.close() @pytest.fixture(scope="function") -def client(session: Session) -> TestClient: +def client(session: db.Session) -> TestClient: """Return a FastAPI TestClient for issuing requests and rollback session transaction""" def override_get_session(): From 2770315c9202aa8d799dc9127907f10b9058df5b Mon Sep 17 00:00:00 2001 From: Ian Beck Date: Thu, 28 Aug 2025 11:20:27 -0700 Subject: [PATCH 11/15] Updated decks conftest to 2.0 SAVEPOINTs This module was a bit trickier; I was unfortunately unable to find a good way to nest SAVEPOINTs at three levels (e.g. could NOT get package -> module -> function nesting to properly incrementally roll back no matter what I tried), so now all deck tests share the same cards but rebuild their module-based stuff on a per-function basis. --- api/tests/conftest.py | 11 ----- api/tests/decks/conftest.py | 32 +++++-------- api/tests/decks/test_comments.py | 18 +++---- api/tests/decks/test_deck_clone.py | 30 ++++++------ api/tests/decks/test_deck_create.py | 12 ++--- api/tests/decks/test_deck_delete.py | 12 ++--- api/tests/decks/test_deck_export.py | 28 +++++------ api/tests/decks/test_deck_import.py | 6 +-- api/tests/decks/test_decks.py | 68 +++++++++++++-------------- api/tests/decks/test_subscriptions.py | 12 ++--- 10 files changed, 104 insertions(+), 125 deletions(-) diff --git a/api/tests/conftest.py b/api/tests/conftest.py index 6a1c4b0..4dce34b 100644 --- a/api/tests/conftest.py +++ b/api/tests/conftest.py @@ -83,14 +83,3 @@ def override_get_session(): app.dependency_overrides[get_session] = override_get_session yield TestClient(app) - - -@pytest.fixture(scope="package") -def monkeypatch_package(): - """Monkeypatch must be re-implemented to be included in fixtures for non-function scopes - - See: https://github.com/pytest-dev/pytest/issues/363 - """ - monkeypatch = MonkeyPatch() - yield monkeypatch - monkeypatch.undo() diff --git a/api/tests/decks/conftest.py b/api/tests/decks/conftest.py index e1a2b3d..b0376e2 100644 --- a/api/tests/decks/conftest.py +++ b/api/tests/decks/conftest.py @@ -7,42 +7,32 @@ @pytest.fixture(scope="package") -def cards_session(test_engine: Engine, monkeypatch_package) -> Session: +def cards_connection(test_engine: Engine) -> Session: """Populate our database with the cards needed to create decks once for the package - This causes our session to be reused between all tests in this package, with specific classes + This causes our session to be reused between all tests in this package, with specific files handling deck/user data persistence using nested rollbacks. """ + # Create a nested transaction that includes standard card data connection = test_engine.connect() cards_transaction = connection.begin() - session = Session(bind=connection, join_transaction_mode="create_savepoint") - # Overwrite commits with flushes so that we can query stuff, but it's in the same transaction - monkeypatch_package.setattr(session, "commit", session.flush) + cards_session = Session(bind=connection, join_transaction_mode="create_savepoint") # Create our fake cards that are relied on by the tests in this module - create_cards_for_decks(session) + create_cards_for_decks(cards_session) try: - yield session + yield connection finally: cards_transaction.rollback() connection.close() -@pytest.fixture(scope="module") -def decks_session(cards_session): - """Adds a module-level layer of nesting to keep the decks and such created in modules isolated""" - cards_session.begin_nested() - try: - yield cards_session - finally: - cards_session.rollback() - - @pytest.fixture(scope="function") -def session(decks_session): +def session(cards_connection): """Return a nested transaction on the outer session, to prevent rolling back card data""" - decks_session.begin_nested() + savepoint = cards_connection.begin_nested() try: - yield decks_session + with Session(bind=cards_connection, join_transaction_mode="create_savepoint") as session: + yield session finally: - decks_session.rollback() + savepoint.rollback() diff --git a/api/tests/decks/test_comments.py b/api/tests/decks/test_comments.py index 5adeb74..7ee107d 100644 --- a/api/tests/decks/test_comments.py +++ b/api/tests/decks/test_comments.py @@ -12,21 +12,21 @@ from .deck_utils import create_deck_for_user -@pytest.fixture(scope="module", autouse=True) -def user1(decks_session): - user1, _ = create_user_token(decks_session) +@pytest.fixture(scope="function", autouse=True) +def user1(session): + user1, _ = create_user_token(session) return user1 -@pytest.fixture(scope="module", autouse=True) -def deck1(decks_session, user1): - return create_deck_for_user(decks_session, user1, release_stub="master-set") +@pytest.fixture(scope="function", autouse=True) +def deck1(session, user1): + return create_deck_for_user(session, user1, release_stub="master-set") -@pytest.fixture(scope="module", autouse=True) -def snapshot1(decks_session, user1, deck1): +@pytest.fixture(scope="function", autouse=True) +def snapshot1(session, user1, deck1): return create_snapshot_for_deck( - decks_session, + session, user1, deck1, title="First Snapshot", diff --git a/api/tests/decks/test_deck_clone.py b/api/tests/decks/test_deck_clone.py index 57405bf..e19c312 100644 --- a/api/tests/decks/test_deck_clone.py +++ b/api/tests/decks/test_deck_clone.py @@ -10,29 +10,29 @@ from api.tests.utils import create_user_token -@pytest.fixture(scope="module", autouse=True) -def user_token(decks_session): - user, token = create_user_token(decks_session) +@pytest.fixture(scope="function", autouse=True) +def user_token(session): + user, token = create_user_token(session) return user, token -@pytest.fixture(scope="module", autouse=True) -def user2_token(decks_session): - user, token = create_user_token(decks_session) +@pytest.fixture(scope="function", autouse=True) +def user2_token(session): + user, token = create_user_token(session) return user, token -@pytest.fixture(scope="module", autouse=True) -def deck(decks_session, user_token): +@pytest.fixture(scope="function", autouse=True) +def deck(session, user_token): user, _ = user_token - return create_deck_for_user(decks_session, user) + return create_deck_for_user(session, user) -@pytest.fixture(scope="module", autouse=True) -def snapshot(decks_session, user_token, deck): +@pytest.fixture(scope="function", autouse=True) +def snapshot(session, user_token, deck): user, _ = user_token return create_snapshot_for_deck( - decks_session, + session, user, deck, title="First Snapshot", @@ -41,11 +41,11 @@ def snapshot(decks_session, user_token, deck): ) -@pytest.fixture(scope="module", autouse=True) -def public_snapshot(decks_session, user_token, deck): +@pytest.fixture(scope="function", autouse=True) +def public_snapshot(session, user_token, deck): user, _ = user_token return create_snapshot_for_deck( - decks_session, + session, user, deck, title="Second Snapshot", diff --git a/api/tests/decks/test_deck_create.py b/api/tests/decks/test_deck_create.py index b321667..46d46a5 100644 --- a/api/tests/decks/test_deck_create.py +++ b/api/tests/decks/test_deck_create.py @@ -14,16 +14,16 @@ from api.tests.utils import create_user_token, generate_random_chars -@pytest.fixture(scope="module", autouse=True) -def user_token(decks_session): - user, token = create_user_token(decks_session) +@pytest.fixture(scope="function", autouse=True) +def user_token(session): + user, token = create_user_token(session) return user, token -@pytest.fixture(scope="module") -def deck(decks_session, user_token): +@pytest.fixture(scope="function") +def deck(session, user_token): user, _ = user_token - return create_deck_for_user(decks_session, user) + return create_deck_for_user(session, user) def _valid_deck_dict(session: db.Session) -> dict: diff --git a/api/tests/decks/test_deck_delete.py b/api/tests/decks/test_deck_delete.py index 9d3d734..5863626 100644 --- a/api/tests/decks/test_deck_delete.py +++ b/api/tests/decks/test_deck_delete.py @@ -10,16 +10,16 @@ from api.tests.utils import create_user_token -@pytest.fixture(scope="module", autouse=True) -def user_token(decks_session): - user, token = create_user_token(decks_session) +@pytest.fixture(scope="function", autouse=True) +def user_token(session): + user, token = create_user_token(session) return user, token -@pytest.fixture(scope="module") -def deck(decks_session, user_token): +@pytest.fixture(scope="function") +def deck(session, user_token): user, _ = user_token - return create_deck_for_user(decks_session, user) + return create_deck_for_user(session, user) def test_delete_deck_bad_deck(client: TestClient, session: db.Session, user_token): diff --git a/api/tests/decks/test_deck_export.py b/api/tests/decks/test_deck_export.py index d5727a4..4f85a7c 100644 --- a/api/tests/decks/test_deck_export.py +++ b/api/tests/decks/test_deck_export.py @@ -21,33 +21,33 @@ # Shared fixtures -@pytest.fixture(scope="module", autouse=True) -def export_user(decks_session): +@pytest.fixture(scope="function", autouse=True) +def export_user(session): """User with export token for export tests""" - user, _ = utils.create_user_token(decks_session) + user, _ = utils.create_user_token(session) user.deck_export_uuid = uuid.uuid4() - decks_session.commit() + session.commit() return user -@pytest.fixture(scope="module", autouse=True) -def export_deck1(decks_session, export_user): +@pytest.fixture(scope="function", autouse=True) +def export_deck1(session, export_user): """First deck for export user""" - return create_deck_for_user(decks_session, export_user, release_stub="master-set") + return create_deck_for_user(session, export_user, release_stub="master-set") -@pytest.fixture(scope="module", autouse=True) -def export_deck2(decks_session, export_user): +@pytest.fixture(scope="function", autouse=True) +def export_deck2(session, export_user): """Second deck for export user""" - return create_deck_for_user(decks_session, export_user, release_stub="expansion") + return create_deck_for_user(session, export_user, release_stub="expansion") -@pytest.fixture(scope="module", autouse=True) -def export_deck3(decks_session, export_user): +@pytest.fixture(scope="function", autouse=True) +def export_deck3(session, export_user): """Third deck for export user (marked as exported)""" - deck = create_deck_for_user(decks_session, export_user, release_stub="expansion") + deck = create_deck_for_user(session, export_user, release_stub="expansion") deck.is_exported = True - decks_session.commit() + session.commit() return deck diff --git a/api/tests/decks/test_deck_import.py b/api/tests/decks/test_deck_import.py index 08d9ee2..dcd7516 100644 --- a/api/tests/decks/test_deck_import.py +++ b/api/tests/decks/test_deck_import.py @@ -24,10 +24,10 @@ # Shared fixtures -@pytest.fixture(scope="module", autouse=True) -def user_token(decks_session): +@pytest.fixture(scope="function", autouse=True) +def user_token(session): """User and token for import tests""" - user, token = utils.create_user_token(decks_session) + user, token = utils.create_user_token(session) return user, token diff --git a/api/tests/decks/test_decks.py b/api/tests/decks/test_decks.py index ad0a177..336b878 100644 --- a/api/tests/decks/test_decks.py +++ b/api/tests/decks/test_decks.py @@ -14,21 +14,21 @@ from .deck_utils import create_deck_for_user -@pytest.fixture(scope="module", autouse=True) -def user1(decks_session): - user1, _ = create_user_token(decks_session) +@pytest.fixture(scope="function", autouse=True) +def user1(session): + user1, _ = create_user_token(session) return user1 -@pytest.fixture(scope="module", autouse=True) -def deck1(decks_session, user1): - return create_deck_for_user(decks_session, user1, release_stub="master-set") +@pytest.fixture(scope="function", autouse=True) +def deck1(session, user1): + return create_deck_for_user(session, user1, release_stub="master-set") -@pytest.fixture(scope="module", autouse=True) -def snapshot1(decks_session, user1, deck1): +@pytest.fixture(scope="function", autouse=True) +def snapshot1(session, user1, deck1): return create_snapshot_for_deck( - decks_session, + session, user1, deck1, title="First Snapshot", @@ -37,10 +37,10 @@ def snapshot1(decks_session, user1, deck1): ) -@pytest.fixture(scope="module", autouse=True) -def private_snapshot1(decks_session, user1, deck1): +@pytest.fixture(scope="function", autouse=True) +def private_snapshot1(session, user1, deck1): return create_snapshot_for_deck( - decks_session, + session, user1, deck1, title="Private Snapshot", @@ -49,26 +49,26 @@ def private_snapshot1(decks_session, user1, deck1): ) -@pytest.fixture(scope="module", autouse=True) -def private_deck1(decks_session, user1): - return create_deck_for_user(decks_session, user1, release_stub="expansion") +@pytest.fixture(scope="function", autouse=True) +def private_deck1(session, user1): + return create_deck_for_user(session, user1, release_stub="expansion") -@pytest.fixture(scope="module", autouse=True) -def user2(decks_session): - user2, _ = create_user_token(decks_session) +@pytest.fixture(scope="function", autouse=True) +def user2(session): + user2, _ = create_user_token(session) return user2 -@pytest.fixture(scope="module", autouse=True) -def deck2(decks_session, user2): - return create_deck_for_user(decks_session, user2, release_stub="expansion") +@pytest.fixture(scope="function", autouse=True) +def deck2(session, user2): + return create_deck_for_user(session, user2, release_stub="expansion") -@pytest.fixture(scope="module", autouse=True) -def snapshot2(decks_session, user2, deck2): +@pytest.fixture(scope="function", autouse=True) +def snapshot2(session, user2, deck2): return create_snapshot_for_deck( - decks_session, + session, user2, deck2, title="Second Snapshot", @@ -76,24 +76,24 @@ def snapshot2(decks_session, user2, deck2): ) -@pytest.fixture(scope="module", autouse=True) -def user3(decks_session): - user3, _ = create_user_token(decks_session) +@pytest.fixture(scope="function", autouse=True) +def user3(session): + user3, _ = create_user_token(session) return user3 -@pytest.fixture(scope="module", autouse=True) -def deck3(decks_session, user3): - deck3 = create_deck_for_user(decks_session, user3, release_stub="expansion2") +@pytest.fixture(scope="function", autouse=True) +def deck3(session, user3): + deck3 = create_deck_for_user(session, user3, release_stub="expansion2") deck3.is_red_rains = True - decks_session.commit() + session.commit() return deck3 -@pytest.fixture(scope="module", autouse=True) -def snapshot3(decks_session, user3, deck3): +@pytest.fixture(scope="function", autouse=True) +def snapshot3(session, user3, deck3): return create_snapshot_for_deck( - decks_session, + session, user3, deck3, title="Red Rains Snapshot", diff --git a/api/tests/decks/test_subscriptions.py b/api/tests/decks/test_subscriptions.py index ade6e58..b785305 100644 --- a/api/tests/decks/test_subscriptions.py +++ b/api/tests/decks/test_subscriptions.py @@ -12,15 +12,15 @@ from .deck_utils import create_deck_for_user -@pytest.fixture(scope="module", autouse=True) -def user1(decks_session): - user1, _ = create_user_token(decks_session) +@pytest.fixture(scope="function", autouse=True) +def user1(session): + user1, _ = create_user_token(session) return user1 -@pytest.fixture(scope="module", autouse=True) -def deck1(decks_session, user1): - return create_deck_for_user(decks_session, user1, release_stub="master-set") +@pytest.fixture(scope="function", autouse=True) +def deck1(session, user1): + return create_deck_for_user(session, user1, release_stub="master-set") @pytest.fixture From 39deca6c08a1b64b9a4c78b3ba1d209972598db9 Mon Sep 17 00:00:00 2001 From: Ian Beck Date: Thu, 28 Aug 2025 11:22:35 -0700 Subject: [PATCH 12/15] Fixed a test that was using the wrong date formatting --- api/tests/decks/test_deck_export.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/tests/decks/test_deck_export.py b/api/tests/decks/test_deck_export.py index 4f85a7c..95065a6 100644 --- a/api/tests/decks/test_deck_export.py +++ b/api/tests/decks/test_deck_export.py @@ -130,9 +130,9 @@ def test_export_decks_filters_by_export_status( assert data["total"] == 2 # Two unexported decks # Compare by created dates since export data uses created as unique identifier exported_deck_created_dates = {deck["created"] for deck in data["decks"]} - assert export_deck1.created.isoformat() in exported_deck_created_dates + assert pydantic_style_datetime_str(export_deck1.created) in exported_deck_created_dates assert ( - export_deck3.created.isoformat() not in exported_deck_created_dates + pydantic_style_datetime_str(export_deck3.created) not in exported_deck_created_dates ) # Exported deck excluded From 5f57c0ca16719129306a5f46eb12dd3c5158ca54 Mon Sep 17 00:00:00 2001 From: Ian Beck Date: Thu, 28 Aug 2025 11:28:58 -0700 Subject: [PATCH 13/15] Tweaked card data Filtration test wasn't working; probably something to do with the database configuration, and I just don't really care to debug this one because it's a non-issue in production. --- api/tests/cards/conftest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/tests/cards/conftest.py b/api/tests/cards/conftest.py index 5b0a85b..713dd13 100644 --- a/api/tests/cards/conftest.py +++ b/api/tests/cards/conftest.py @@ -80,7 +80,7 @@ def _create_cards_for_filtration(session: db.Session, is_legacy=False): "phoenixborn": "Example Phoenixborn", "release": master_set, "cost": ["[[main]]", ["1 [[natural:power", "1 [[illusion:power]]"]], - "text": "Stuffiness: [[main]] - [[exhaust]] - 1 [[natural:class]] / 1 [[illusion:class]]: Place a [[Example Ally Conjuration]] conjuration on your battlefield.", + "text": "Stuff and Things: [[main]] - [[exhaust]] - 1 [[natural:class]] / 1 [[illusion:class]]: Place a [[Example Ally Conjuration]] conjuration on your battlefield.", "effect_magic_cost": "1 [[natural:class]] / 1 [[illusion:class]]", "attack": 2, "life": 1, From 383ac959fb7d70a809cc9432c35f9d3216de3971 Mon Sep 17 00:00:00 2001 From: Ian Beck Date: Thu, 28 Aug 2025 11:46:25 -0700 Subject: [PATCH 14/15] Bumped dependencies --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 9bb410e..e9d43a9 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2,14 +2,14 @@ [[package]] name = "alembic" -version = "1.16.4" +version = "1.16.5" description = "A database migration tool for SQLAlchemy." optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "alembic-1.16.4-py3-none-any.whl", hash = "sha256:b05e51e8e82efc1abd14ba2af6392897e145930c3e0a2faf2b0da2f7f7fd660d"}, - {file = "alembic-1.16.4.tar.gz", hash = "sha256:efab6ada0dd0fae2c92060800e0bf5c1dc26af15a10e02fb4babff164b4725e2"}, + {file = "alembic-1.16.5-py3-none-any.whl", hash = "sha256:e845dfe090c5ffa7b92593ae6687c5cb1a101e91fa53868497dbd79847f9dbe3"}, + {file = "alembic-1.16.5.tar.gz", hash = "sha256:a88bb7f6e513bd4301ecf4c7f2206fe93f9913f9b48dac3b78babde2d6fe765e"}, ] [package.dependencies] From 2a4d5955ac9c7d36c47ddf57caeec3e6413373d4 Mon Sep 17 00:00:00 2001 From: Ian Beck Date: Thu, 28 Aug 2025 11:51:40 -0700 Subject: [PATCH 15/15] Fixed formatting --- api/tests/conftest.py | 5 +++-- api/tests/decks/conftest.py | 4 +++- api/tests/decks/test_deck_export.py | 7 +++++-- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/api/tests/conftest.py b/api/tests/conftest.py index 4dce34b..83e1e4e 100644 --- a/api/tests/conftest.py +++ b/api/tests/conftest.py @@ -17,7 +17,6 @@ def test_endpoint(client: TestClient): from sqlalchemy_utils import create_database, database_exists, drop_database import api.environment - from api import app, db from api.depends import get_session @@ -66,7 +65,9 @@ def session(test_engine: Engine) -> db.Session: connection = test_engine.connect() transaction = connection.begin() try: - with db.Session(bind=connection, join_transaction_mode="create_savepoint") as session: + with db.Session( + bind=connection, join_transaction_mode="create_savepoint" + ) as session: yield session finally: transaction.rollback() diff --git a/api/tests/decks/conftest.py b/api/tests/decks/conftest.py index b0376e2..5e73d69 100644 --- a/api/tests/decks/conftest.py +++ b/api/tests/decks/conftest.py @@ -32,7 +32,9 @@ def session(cards_connection): """Return a nested transaction on the outer session, to prevent rolling back card data""" savepoint = cards_connection.begin_nested() try: - with Session(bind=cards_connection, join_transaction_mode="create_savepoint") as session: + with Session( + bind=cards_connection, join_transaction_mode="create_savepoint" + ) as session: yield session finally: savepoint.rollback() diff --git a/api/tests/decks/test_deck_export.py b/api/tests/decks/test_deck_export.py index 95065a6..f128e07 100644 --- a/api/tests/decks/test_deck_export.py +++ b/api/tests/decks/test_deck_export.py @@ -130,9 +130,12 @@ def test_export_decks_filters_by_export_status( assert data["total"] == 2 # Two unexported decks # Compare by created dates since export data uses created as unique identifier exported_deck_created_dates = {deck["created"] for deck in data["decks"]} - assert pydantic_style_datetime_str(export_deck1.created) in exported_deck_created_dates assert ( - pydantic_style_datetime_str(export_deck3.created) not in exported_deck_created_dates + pydantic_style_datetime_str(export_deck1.created) in exported_deck_created_dates + ) + assert ( + pydantic_style_datetime_str(export_deck3.created) + not in exported_deck_created_dates ) # Exported deck excluded