From e6cb1969736378e214a772d3184e66ecdd687060 Mon Sep 17 00:00:00 2001 From: dereknorrbom Date: Mon, 13 Apr 2026 15:22:55 -0700 Subject: [PATCH 1/2] feat: add pytest-bdd and initial BDD scenario for scan modes - Add pytest-bdd>=7.0 to dev dependencies - Create tests/features/ and tests/steps/ directory structure - Add scan_modes.feature with 3 scenarios covering default and standard-findings modes - Document BDD workflow and directory layout in AGENTS.md --- AGENTS.md | 45 ++++++- poetry.lock | 200 +++++++++++++++++++++++++++++- pyproject.toml | 1 + tests/features/scan_modes.feature | 21 ++++ tests/steps/test_scan_modes.py | 121 ++++++++++++++++++ 5 files changed, 385 insertions(+), 3 deletions(-) create mode 100644 tests/features/scan_modes.feature create mode 100644 tests/steps/test_scan_modes.py diff --git a/AGENTS.md b/AGENTS.md index ff17d8f..c772583 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -64,11 +64,54 @@ Before pushing a branch or marking a PR ready for review, run the following loca ```sh make fix # auto-format (black + ruff --fix) make check # verify fmt + lint are clean (CI-equivalent) -make test # all tests must pass +make test # all tests must pass (unit + BDD) ``` CI runs the same checks. A PR with a failing lint or test step will not be merged. +## Behavior-Driven Development (BDD) + +This project uses [pytest-bdd](https://pytest-bdd.readthedocs.io/) to make behavior specifications executable. BDD is mandatory for all new user-facing behavior, following the outside-in process prescribed in `CONTRIBUTING_AGENT.md`. + +### Directory layout + +``` +tests/ + features/ # Gherkin .feature files — one file per feature area + steps/ # Step definition files — test_.py per feature file +``` + +### The process (per CONTRIBUTING_AGENT.md) + +1. Write the Gherkin scenario in a `.feature` file before any production code +2. Run `make test` — confirm the scenario is collected and **fails** +3. Write the minimum step definitions and production code to make it pass +4. Refactor while all scenarios stay green + +### Writing scenarios + +- Feature files live in `tests/features/.feature` +- Step definitions live in `tests/steps/test_.py` +- Each step file must call `scenarios("../features/.feature")` to register all scenarios +- Use `parsers.parse(...)` for steps with quoted parameters, e.g.: + +```python +@then(parsers.parse('the report is named "{name}"')) +def report_named(ctx, name): ... +``` + +- Steps are shared across scenarios via a `ctx` fixture (a plain dict) rather than module-level state +- BDD scenarios test observable behavior (CLI output, file names, command arguments); they do not test internal implementation details + +### Running BDD tests + +```sh +make test # runs everything including BDD +poetry run pytest tests/steps/ -v # BDD only +``` + +All scenarios must be green before a PR is opened. + ## Commands ```sh diff --git a/poetry.lock b/poetry.lock index 3f36c03..f1a28c0 100644 --- a/poetry.lock +++ b/poetry.lock @@ -222,6 +222,18 @@ typing-extensions = {version = ">=4.6.0", markers = "python_version < \"3.13\""} [package.extras] test = ["pytest (>=6)"] +[[package]] +name = "gherkin-official" +version = "29.0.0" +description = "Gherkin parser (official, by Cucumber team)" +optional = false +python-versions = "*" +groups = ["dev"] +files = [ + {file = "gherkin_official-29.0.0-py3-none-any.whl", hash = "sha256:26967b0d537a302119066742669e0e8b663e632769330be675457ae993e1d1bc"}, + {file = "gherkin_official-29.0.0.tar.gz", hash = "sha256:dbea32561158f02280d7579d179b019160d072ce083197625e2f80a6776bb9eb"}, +] + [[package]] name = "iniconfig" version = "2.3.0" @@ -234,6 +246,125 @@ files = [ {file = "iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730"}, ] +[[package]] +name = "mako" +version = "1.3.10" +description = "A super-fast templating language that borrows the best ideas from the existing templating languages." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "mako-1.3.10-py3-none-any.whl", hash = "sha256:baef24a52fc4fc514a0887ac600f9f1cff3d82c61d4d700a1fa84d597b88db59"}, + {file = "mako-1.3.10.tar.gz", hash = "sha256:99579a6f39583fa7e5630a28c3c1f440e4e97a414b80372649c0ce338da2ea28"}, +] + +[package.dependencies] +MarkupSafe = ">=0.9.2" + +[package.extras] +babel = ["Babel"] +lingua = ["lingua"] +testing = ["pytest"] + +[[package]] +name = "markupsafe" +version = "3.0.3" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "markupsafe-3.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2f981d352f04553a7171b8e44369f2af4055f888dfb147d55e42d29e29e74559"}, + {file = "markupsafe-3.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e1c1493fb6e50ab01d20a22826e57520f1284df32f2d8601fdd90b6304601419"}, + {file = "markupsafe-3.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1ba88449deb3de88bd40044603fafffb7bc2b055d626a330323a9ed736661695"}, + {file = "markupsafe-3.0.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f42d0984e947b8adf7dd6dde396e720934d12c506ce84eea8476409563607591"}, + {file = "markupsafe-3.0.3-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c0c0b3ade1c0b13b936d7970b1d37a57acde9199dc2aecc4c336773e1d86049c"}, + {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0303439a41979d9e74d18ff5e2dd8c43ed6c6001fd40e5bf2e43f7bd9bbc523f"}, + {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:d2ee202e79d8ed691ceebae8e0486bd9a2cd4794cec4824e1c99b6f5009502f6"}, + {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:177b5253b2834fe3678cb4a5f0059808258584c559193998be2601324fdeafb1"}, + {file = "markupsafe-3.0.3-cp310-cp310-win32.whl", hash = "sha256:2a15a08b17dd94c53a1da0438822d70ebcd13f8c3a95abe3a9ef9f11a94830aa"}, + {file = "markupsafe-3.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:c4ffb7ebf07cfe8931028e3e4c85f0357459a3f9f9490886198848f4fa002ec8"}, + {file = "markupsafe-3.0.3-cp310-cp310-win_arm64.whl", hash = "sha256:e2103a929dfa2fcaf9bb4e7c091983a49c9ac3b19c9061b6d5427dd7d14d81a1"}, + {file = "markupsafe-3.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1cc7ea17a6824959616c525620e387f6dd30fec8cb44f649e31712db02123dad"}, + {file = "markupsafe-3.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4bd4cd07944443f5a265608cc6aab442e4f74dff8088b0dfc8238647b8f6ae9a"}, + {file = "markupsafe-3.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b5420a1d9450023228968e7e6a9ce57f65d148ab56d2313fcd589eee96a7a50"}, + {file = "markupsafe-3.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0bf2a864d67e76e5c9a34dc26ec616a66b9888e25e7b9460e1c76d3293bd9dbf"}, + {file = "markupsafe-3.0.3-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc51efed119bc9cfdf792cdeaa4d67e8f6fcccab66ed4bfdd6bde3e59bfcbb2f"}, + {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:068f375c472b3e7acbe2d5318dea141359e6900156b5b2ba06a30b169086b91a"}, + {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:7be7b61bb172e1ed687f1754f8e7484f1c8019780f6f6b0786e76bb01c2ae115"}, + {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f9e130248f4462aaa8e2552d547f36ddadbeaa573879158d721bbd33dfe4743a"}, + {file = "markupsafe-3.0.3-cp311-cp311-win32.whl", hash = "sha256:0db14f5dafddbb6d9208827849fad01f1a2609380add406671a26386cdf15a19"}, + {file = "markupsafe-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:de8a88e63464af587c950061a5e6a67d3632e36df62b986892331d4620a35c01"}, + {file = "markupsafe-3.0.3-cp311-cp311-win_arm64.whl", hash = "sha256:3b562dd9e9ea93f13d53989d23a7e775fdfd1066c33494ff43f5418bc8c58a5c"}, + {file = "markupsafe-3.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d53197da72cc091b024dd97249dfc7794d6a56530370992a5e1a08983ad9230e"}, + {file = "markupsafe-3.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1872df69a4de6aead3491198eaf13810b565bdbeec3ae2dc8780f14458ec73ce"}, + {file = "markupsafe-3.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a7e8ae81ae39e62a41ec302f972ba6ae23a5c5396c8e60113e9066ef893da0d"}, + {file = "markupsafe-3.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d6dd0be5b5b189d31db7cda48b91d7e0a9795f31430b7f271219ab30f1d3ac9d"}, + {file = "markupsafe-3.0.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:94c6f0bb423f739146aec64595853541634bde58b2135f27f61c1ffd1cd4d16a"}, + {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:be8813b57049a7dc738189df53d69395eba14fb99345e0a5994914a3864c8a4b"}, + {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:83891d0e9fb81a825d9a6d61e3f07550ca70a076484292a70fde82c4b807286f"}, + {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:77f0643abe7495da77fb436f50f8dab76dbc6e5fd25d39589a0f1fe6548bfa2b"}, + {file = "markupsafe-3.0.3-cp312-cp312-win32.whl", hash = "sha256:d88b440e37a16e651bda4c7c2b930eb586fd15ca7406cb39e211fcff3bf3017d"}, + {file = "markupsafe-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:26a5784ded40c9e318cfc2bdb30fe164bdb8665ded9cd64d500a34fb42067b1c"}, + {file = "markupsafe-3.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:35add3b638a5d900e807944a078b51922212fb3dedb01633a8defc4b01a3c85f"}, + {file = "markupsafe-3.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e1cf1972137e83c5d4c136c43ced9ac51d0e124706ee1c8aa8532c1287fa8795"}, + {file = "markupsafe-3.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:116bb52f642a37c115f517494ea5feb03889e04df47eeff5b130b1808ce7c219"}, + {file = "markupsafe-3.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:133a43e73a802c5562be9bbcd03d090aa5a1fe899db609c29e8c8d815c5f6de6"}, + {file = "markupsafe-3.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfcd093f13f0f0b7fdd0f198b90053bf7b2f02a3927a30e63f3ccc9df56b676"}, + {file = "markupsafe-3.0.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:509fa21c6deb7a7a273d629cf5ec029bc209d1a51178615ddf718f5918992ab9"}, + {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4afe79fb3de0b7097d81da19090f4df4f8d3a2b3adaa8764138aac2e44f3af1"}, + {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:795e7751525cae078558e679d646ae45574b47ed6e7771863fcc079a6171a0fc"}, + {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8485f406a96febb5140bfeca44a73e3ce5116b2501ac54fe953e488fb1d03b12"}, + {file = "markupsafe-3.0.3-cp313-cp313-win32.whl", hash = "sha256:bdd37121970bfd8be76c5fb069c7751683bdf373db1ed6c010162b2a130248ed"}, + {file = "markupsafe-3.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:9a1abfdc021a164803f4d485104931fb8f8c1efd55bc6b748d2f5774e78b62c5"}, + {file = "markupsafe-3.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:7e68f88e5b8799aa49c85cd116c932a1ac15caaa3f5db09087854d218359e485"}, + {file = "markupsafe-3.0.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:218551f6df4868a8d527e3062d0fb968682fe92054e89978594c28e642c43a73"}, + {file = "markupsafe-3.0.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3524b778fe5cfb3452a09d31e7b5adefeea8c5be1d43c4f810ba09f2ceb29d37"}, + {file = "markupsafe-3.0.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4e885a3d1efa2eadc93c894a21770e4bc67899e3543680313b09f139e149ab19"}, + {file = "markupsafe-3.0.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8709b08f4a89aa7586de0aadc8da56180242ee0ada3999749b183aa23df95025"}, + {file = "markupsafe-3.0.3-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b8512a91625c9b3da6f127803b166b629725e68af71f8184ae7e7d54686a56d6"}, + {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9b79b7a16f7fedff2495d684f2b59b0457c3b493778c9eed31111be64d58279f"}, + {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:12c63dfb4a98206f045aa9563db46507995f7ef6d83b2f68eda65c307c6829eb"}, + {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8f71bc33915be5186016f675cd83a1e08523649b0e33efdb898db577ef5bb009"}, + {file = "markupsafe-3.0.3-cp313-cp313t-win32.whl", hash = "sha256:69c0b73548bc525c8cb9a251cddf1931d1db4d2258e9599c28c07ef3580ef354"}, + {file = "markupsafe-3.0.3-cp313-cp313t-win_amd64.whl", hash = "sha256:1b4b79e8ebf6b55351f0d91fe80f893b4743f104bff22e90697db1590e47a218"}, + {file = "markupsafe-3.0.3-cp313-cp313t-win_arm64.whl", hash = "sha256:ad2cf8aa28b8c020ab2fc8287b0f823d0a7d8630784c31e9ee5edea20f406287"}, + {file = "markupsafe-3.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:eaa9599de571d72e2daf60164784109f19978b327a3910d3e9de8c97b5b70cfe"}, + {file = "markupsafe-3.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c47a551199eb8eb2121d4f0f15ae0f923d31350ab9280078d1e5f12b249e0026"}, + {file = "markupsafe-3.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f34c41761022dd093b4b6896d4810782ffbabe30f2d443ff5f083e0cbbb8c737"}, + {file = "markupsafe-3.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:457a69a9577064c05a97c41f4e65148652db078a3a509039e64d3467b9e7ef97"}, + {file = "markupsafe-3.0.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e8afc3f2ccfa24215f8cb28dcf43f0113ac3c37c2f0f0806d8c70e4228c5cf4d"}, + {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ec15a59cf5af7be74194f7ab02d0f59a62bdcf1a537677ce67a2537c9b87fcda"}, + {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:0eb9ff8191e8498cca014656ae6b8d61f39da5f95b488805da4bb029cccbfbaf"}, + {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2713baf880df847f2bece4230d4d094280f4e67b1e813eec43b4c0e144a34ffe"}, + {file = "markupsafe-3.0.3-cp314-cp314-win32.whl", hash = "sha256:729586769a26dbceff69f7a7dbbf59ab6572b99d94576a5592625d5b411576b9"}, + {file = "markupsafe-3.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:bdc919ead48f234740ad807933cdf545180bfbe9342c2bb451556db2ed958581"}, + {file = "markupsafe-3.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:5a7d5dc5140555cf21a6fefbdbf8723f06fcd2f63ef108f2854de715e4422cb4"}, + {file = "markupsafe-3.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:1353ef0c1b138e1907ae78e2f6c63ff67501122006b0f9abad68fda5f4ffc6ab"}, + {file = "markupsafe-3.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1085e7fbddd3be5f89cc898938f42c0b3c711fdcb37d75221de2666af647c175"}, + {file = "markupsafe-3.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1b52b4fb9df4eb9ae465f8d0c228a00624de2334f216f178a995ccdcf82c4634"}, + {file = "markupsafe-3.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fed51ac40f757d41b7c48425901843666a6677e3e8eb0abcff09e4ba6e664f50"}, + {file = "markupsafe-3.0.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f190daf01f13c72eac4efd5c430a8de82489d9cff23c364c3ea822545032993e"}, + {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e56b7d45a839a697b5eb268c82a71bd8c7f6c94d6fd50c3d577fa39a9f1409f5"}, + {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:f3e98bb3798ead92273dc0e5fd0f31ade220f59a266ffd8a4f6065e0a3ce0523"}, + {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5678211cb9333a6468fb8d8be0305520aa073f50d17f089b5b4b477ea6e67fdc"}, + {file = "markupsafe-3.0.3-cp314-cp314t-win32.whl", hash = "sha256:915c04ba3851909ce68ccc2b8e2cd691618c4dc4c4232fb7982bca3f41fd8c3d"}, + {file = "markupsafe-3.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4faffd047e07c38848ce017e8725090413cd80cbc23d86e55c587bf979e579c9"}, + {file = "markupsafe-3.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:32001d6a8fc98c8cb5c947787c5d08b0a50663d139f1305bac5885d98d9b40fa"}, + {file = "markupsafe-3.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:15d939a21d546304880945ca1ecb8a039db6b4dc49b2c5a400387cdae6a62e26"}, + {file = "markupsafe-3.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f71a396b3bf33ecaa1626c255855702aca4d3d9fea5e051b41ac59a9c1c41edc"}, + {file = "markupsafe-3.0.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f4b68347f8c5eab4a13419215bdfd7f8c9b19f2b25520968adfad23eb0ce60c"}, + {file = "markupsafe-3.0.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e8fc20152abba6b83724d7ff268c249fa196d8259ff481f3b1476383f8f24e42"}, + {file = "markupsafe-3.0.3-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:949b8d66bc381ee8b007cd945914c721d9aba8e27f71959d750a46f7c282b20b"}, + {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:3537e01efc9d4dccdf77221fb1cb3b8e1a38d5428920e0657ce299b20324d758"}, + {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:591ae9f2a647529ca990bc681daebdd52c8791ff06c2bfa05b65163e28102ef2"}, + {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a320721ab5a1aba0a233739394eb907f8c8da5c98c9181d1161e77a0c8e36f2d"}, + {file = "markupsafe-3.0.3-cp39-cp39-win32.whl", hash = "sha256:df2449253ef108a379b8b5d6b43f4b1a8e81a061d6537becd5582fba5f9196d7"}, + {file = "markupsafe-3.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:7c3fb7d25180895632e5d3148dbdc29ea38ccb7fd210aa27acbd1201a1902c6e"}, + {file = "markupsafe-3.0.3-cp39-cp39-win_arm64.whl", hash = "sha256:38664109c14ffc9e7437e86b4dceb442b0096dfe3541d7864d9cbe1da4cf36c8"}, + {file = "markupsafe-3.0.3.tar.gz", hash = "sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698"}, +] + [[package]] name = "mypy-extensions" version = "1.1.0" @@ -258,6 +389,39 @@ files = [ {file = "packaging-26.0.tar.gz", hash = "sha256:00243ae351a257117b6a241061796684b084ed1c516a08c48a3f7e147a9d80b4"}, ] +[[package]] +name = "parse" +version = "1.21.1" +description = "parse() is the opposite of format()" +optional = false +python-versions = "*" +groups = ["dev"] +files = [ + {file = "parse-1.21.1-py2.py3-none-any.whl", hash = "sha256:55339ca698019815df3b8e8b550e5933933527e623b0cdf1ca2f404da35ffb47"}, + {file = "parse-1.21.1.tar.gz", hash = "sha256:825e1a88e9d9fb481b8d2ca709c6195558b6eaa97c559ad3a9a20aa2d12815a3"}, +] + +[[package]] +name = "parse-type" +version = "0.6.6" +description = "Simplifies to build parse types based on the parse module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,>=2.7" +groups = ["dev"] +files = [ + {file = "parse_type-0.6.6-py2.py3-none-any.whl", hash = "sha256:3ca79bbe71e170dfccc8ec6c341edfd1c2a0fc1e5cfd18330f93af938de2348c"}, + {file = "parse_type-0.6.6.tar.gz", hash = "sha256:513a3784104839770d690e04339a8b4d33439fcd5dd99f2e4580f9fc1097bfb2"}, +] + +[package.dependencies] +parse = {version = ">=1.18.0", markers = "python_version >= \"3.0\""} +six = ">=1.15" + +[package.extras] +develop = ["build (>=0.5.1)", "coverage (>=4.4)", "pylint", "pytest (<5.0) ; python_version < \"3.0\"", "pytest (>=5.0) ; python_version >= \"3.0\"", "pytest-cov", "pytest-html (>=1.19.0)", "ruff ; python_version >= \"3.7\"", "setuptools", "setuptools-scm", "tox (>=2.8,<4.0)", "twine (>=1.13.0)", "virtualenv (<20.22.0) ; python_version <= \"3.6\"", "virtualenv (>=20.0.0) ; python_version > \"3.6\"", "wheel"] +docs = ["Sphinx (>=1.6)", "sphinx_bootstrap_theme (>=0.6.0)"] +testing = ["pytest (<5.0) ; python_version < \"3.0\"", "pytest (>=5.0) ; python_version >= \"3.0\"", "pytest-html (>=1.19.0)"] + [[package]] name = "pathspec" version = "1.0.4" @@ -343,6 +507,27 @@ tomli = {version = ">=1", markers = "python_version < \"3.11\""} [package.extras] dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "requests", "setuptools", "xmlschema"] +[[package]] +name = "pytest-bdd" +version = "8.1.0" +description = "BDD for pytest" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "pytest_bdd-8.1.0-py3-none-any.whl", hash = "sha256:2124051e71a05ad7db15296e39013593f72ebf96796e1b023a40e5453c47e5fb"}, + {file = "pytest_bdd-8.1.0.tar.gz", hash = "sha256:ef0896c5cd58816dc49810e8ff1d632f4a12019fb3e49959b2d349ffc1c9bfb5"}, +] + +[package.dependencies] +gherkin-official = ">=29.0.0,<30.0.0" +Mako = "*" +packaging = "*" +parse = "*" +parse-type = "*" +pytest = ">=7.0.0" +typing-extensions = "*" + [[package]] name = "pytest-cov" version = "7.0.0" @@ -446,6 +631,18 @@ files = [ {file = "ruff-0.15.7.tar.gz", hash = "sha256:04f1ae61fc20fe0b148617c324d9d009b5f63412c0b16474f3d5f1a1a665f7ac"}, ] +[[package]] +name = "six" +version = "1.17.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["dev"] +files = [ + {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, + {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, +] + [[package]] name = "tomli" version = "2.4.0" @@ -511,7 +708,6 @@ description = "Backported and Experimental Type Hints for Python 3.9+" optional = false python-versions = ">=3.9" groups = ["dev"] -markers = "python_version == \"3.10\"" files = [ {file = "typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548"}, {file = "typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466"}, @@ -520,4 +716,4 @@ files = [ [metadata] lock-version = "2.1" python-versions = ">=3.10" -content-hash = "e5fcae5671f32b0d2054dcd4dc0113d25cbb947ee8bcd9db35cf6cb3057dcee3" +content-hash = "d376cf54b5e48675a83d516fb13e31ddfa8ae0d6502c9b2a1fdf3a1d6bb8f750" diff --git a/pyproject.toml b/pyproject.toml index 61e52fa..07814a7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -41,6 +41,7 @@ packages = [{ include = "run_codeql" }] [tool.poetry.group.dev.dependencies] pytest = ">=8.0" +pytest-bdd = ">=7.0" pytest-cov = ">=6.0" black = ">=25.0" ruff = ">=0.9" diff --git a/tests/features/scan_modes.feature b/tests/features/scan_modes.feature new file mode 100644 index 0000000..a312d30 --- /dev/null +++ b/tests/features/scan_modes.feature @@ -0,0 +1,21 @@ +Feature: Scan modes + As an AI agent running CodeQL locally, + I want to control which query suite is used for a scan, + So that I can get consistent, reproducible findings regardless of repo config. + + Scenario: Default mode uses repo config query selector + Given a repo config that selects the "code-quality" query suite + When I run a Python scan in default mode + Then the SARIF report is named "python-code-quality.sarif" + + Scenario: standard-findings mode ignores repo config and forces code-quality + Given a repo config that selects the "security-and-quality" query suite + When I run a Python scan in standard-findings mode + Then the SARIF report is named "python-code-quality.sarif" + And the database create command does not include "--codescanning-config" + And the analyze command uses suite "codeql/python-queries:codeql-suites/python-code-quality.qls" + + Scenario: Default mode resolves security-and-quality suite when no config exists + Given no repo config file exists + When I run a Python scan in default mode + Then the analyze command uses suite "codeql/python-queries:codeql-suites/python-security-and-quality.qls" diff --git a/tests/steps/test_scan_modes.py b/tests/steps/test_scan_modes.py new file mode 100644 index 0000000..4a2ef71 --- /dev/null +++ b/tests/steps/test_scan_modes.py @@ -0,0 +1,121 @@ +import pytest +from pytest_bdd import given, parsers, scenarios, then, when + +import run_codeql.scanner as scanner + +scenarios("../features/scan_modes.feature") + + +# ── shared fixtures ─────────────────────────────────────────────────────────── + + +@pytest.fixture() +def scan_ctx(tmp_path, monkeypatch): + """Shared mutable context passed between steps.""" + repo_root = tmp_path / "repo" + work_dir = tmp_path / "work" + report_dir = tmp_path / "reports" + repo_root.mkdir() + work_dir.mkdir() + report_dir.mkdir() + + codeql = tmp_path / "codeql" + codeql.write_text("", encoding="utf-8") + + create_commands: list[list[str]] = [] + analyze_commands: list[list[str]] = [] + + def fake_run(cmd, check, stdout=None, stderr=None): # noqa: ANN001 + if len(cmd) > 2 and cmd[2] == "create": + create_commands.append(cmd) + if len(cmd) > 2 and cmd[2] == "analyze": + analyze_commands.append(cmd) + return None + + monkeypatch.setattr(scanner.subprocess, "run", fake_run) + monkeypatch.setattr(scanner, "ensure_pack", lambda pack_name, codeql, quiet: None) + + return { + "tmp_path": tmp_path, + "repo_root": repo_root, + "work_dir": work_dir, + "report_dir": report_dir, + "codeql": codeql, + "config_file": tmp_path / "missing.yml", + "mode": "default", + "create_commands": create_commands, + "analyze_commands": analyze_commands, + "sarif_path": None, + } + + +# ── given ───────────────────────────────────────────────────────────────────── + + +@given('a repo config that selects the "code-quality" query suite') +def repo_config_code_quality(scan_ctx): + config_file = scan_ctx["tmp_path"] / "codeql-config.yml" + config_file.write_text("queries:\n - uses: code-quality\n", encoding="utf-8") + scan_ctx["config_file"] = config_file + + +@given('a repo config that selects the "security-and-quality" query suite') +def repo_config_security_and_quality(scan_ctx): + config_file = scan_ctx["tmp_path"] / "codeql-config.yml" + config_file.write_text("queries:\n - uses: security-and-quality\n", encoding="utf-8") + scan_ctx["config_file"] = config_file + + +@given("no repo config file exists") +def no_repo_config(scan_ctx): + scan_ctx["config_file"] = scan_ctx["tmp_path"] / "missing.yml" + + +# ── when ────────────────────────────────────────────────────────────────────── + + +@when("I run a Python scan in default mode") +def run_python_default(scan_ctx): + scan_ctx["sarif_path"] = scanner.run_lang( + lang="python", + codeql=scan_ctx["codeql"], + keep_db=True, + repo_root=scan_ctx["repo_root"], + work_dir=scan_ctx["work_dir"], + report_dir=scan_ctx["report_dir"], + config_file=scan_ctx["config_file"], + ) + + +@when("I run a Python scan in standard-findings mode") +def run_python_standard_findings(scan_ctx): + scan_ctx["sarif_path"] = scanner.run_lang( + lang="python", + codeql=scan_ctx["codeql"], + keep_db=True, + repo_root=scan_ctx["repo_root"], + work_dir=scan_ctx["work_dir"], + report_dir=scan_ctx["report_dir"], + config_file=scan_ctx["config_file"], + mode="standard-findings", + ) + + +# ── then ────────────────────────────────────────────────────────────────────── + + +@then('the SARIF report is named "python-code-quality.sarif"') +def sarif_named_code_quality(scan_ctx): + assert scan_ctx["sarif_path"].name == "python-code-quality.sarif" + + +@then('the database create command does not include "--codescanning-config"') +def create_excludes_codescanning_config(scan_ctx): + assert scan_ctx["create_commands"], "No database create command was recorded" + assert "--codescanning-config" not in scan_ctx["create_commands"][0] + + +@then(parsers.parse('the analyze command uses suite "{suite}"')) +def analyze_uses_suite(scan_ctx, suite): + assert scan_ctx["analyze_commands"], "No analyze command was recorded" + assert suite in scan_ctx["analyze_commands"][0] From 82bdf8ccaa3c92247865d14ac0fab2661374e9af Mon Sep 17 00:00:00 2001 From: dereknorrbom Date: Mon, 13 Apr 2026 15:30:39 -0700 Subject: [PATCH 2/2] feat: add BDD scenario coverage for CLI output, filtering, language detection, and repo config - Add tests/conftest.py with shared helpers and cli_ctx fixture - Add cli_output.feature: exit codes, verbose/quiet output, lang filter, missing SARIF - Add findings_filtering.feature: --files, --rule, --limit, --offset, third-party excludes - Add language_detection.feature: auto-detect, ignore dirs, Actions, sort order - Add repo_config.feature: config loading, CLI override precedence, disable config - Add corresponding step definitions in tests/steps/ - 172 tests passing (129 unit + 43 BDD) --- tests/conftest.py | 79 ++++++++++++++++++++++ tests/features/cli_output.feature | 57 ++++++++++++++++ tests/features/findings_filtering.feature | 81 +++++++++++++++++++++++ tests/features/language_detection.feature | 61 +++++++++++++++++ tests/features/repo_config.feature | 44 ++++++++++++ tests/steps/test_cli_output.py | 70 ++++++++++++++++++++ tests/steps/test_findings_filtering.py | 39 +++++++++++ tests/steps/test_language_detection.py | 58 ++++++++++++++++ tests/steps/test_repo_config.py | 64 ++++++++++++++++++ 9 files changed, 553 insertions(+) create mode 100644 tests/conftest.py create mode 100644 tests/features/cli_output.feature create mode 100644 tests/features/findings_filtering.feature create mode 100644 tests/features/language_detection.feature create mode 100644 tests/features/repo_config.feature create mode 100644 tests/steps/test_cli_output.py create mode 100644 tests/steps/test_findings_filtering.py create mode 100644 tests/steps/test_language_detection.py create mode 100644 tests/steps/test_repo_config.py diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..f31b481 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,79 @@ +"""Shared pytest fixtures and helpers for both unit and BDD tests.""" + +import json +import shutil +import subprocess +import sys +from pathlib import Path + +import pytest + +FIXTURES = Path(__file__).parent / "fixtures" + + +def run_rcql(args: list[str], cwd: Path) -> subprocess.CompletedProcess: + return subprocess.run( + [sys.executable, "-m", "run_codeql"] + args, + cwd=cwd, + capture_output=True, + text=True, + ) + + +def make_report_dir(tmp_path: Path, *sarif_names: str) -> Path: + report_dir = tmp_path / ".codeql" / "reports" + report_dir.mkdir(parents=True, exist_ok=True) + for name in sarif_names: + shutil.copy(FIXTURES / name, report_dir / name) + return report_dir + + +def write_sarif_with_paths(tmp_path: Path, paths: list[str], lang: str = "python") -> None: + report_dir = tmp_path / ".codeql" / "reports" + report_dir.mkdir(parents=True, exist_ok=True) + sarif = { + "runs": [ + { + "tool": { + "driver": { + "rules": [ + { + "id": "py/unused-import", + "shortDescription": {"text": "Unused import"}, + } + ] + } + }, + "results": [ + { + "ruleId": "py/unused-import", + "level": "warning", + "message": {"text": f"finding-{idx}"}, + "locations": [ + { + "physicalLocation": { + "artifactLocation": {"uri": uri}, + "region": {"startLine": idx + 1}, + } + } + ], + } + for idx, uri in enumerate(paths) + ], + } + ] + } + (report_dir / f"{lang}-code-quality.sarif").write_text(json.dumps(sarif), encoding="utf-8") + + +def write_repo_config(tmp_path: Path, payload: dict) -> None: + (tmp_path / ".rcql.json").write_text(json.dumps(payload), encoding="utf-8") + + +@pytest.fixture() +def cli_ctx(tmp_path): + """Shared mutable context used by BDD CLI step definitions.""" + return { + "tmp_path": tmp_path, + "result": None, + } diff --git a/tests/features/cli_output.feature b/tests/features/cli_output.feature new file mode 100644 index 0000000..a929e45 --- /dev/null +++ b/tests/features/cli_output.feature @@ -0,0 +1,57 @@ +Feature: CLI output modes + As an AI agent consuming rcql output, + I want predictable stdout/stderr output and exit codes, + So that I can reliably parse results and decide next steps. + + Scenario: Report-only with findings exits non-zero + Given a Python SARIF report with findings exists + When I run rcql with "--report-only" + Then the exit code is non-zero + + Scenario: Report-only with no findings exits zero + Given an empty Python SARIF report exists + When I run rcql with "--report-only" + Then the exit code is zero + + Scenario: --no-fail forces exit zero even with findings + Given a Python SARIF report with findings exists + When I run rcql with "--report-only --no-fail" + Then the exit code is zero + + Scenario: Report-only output includes language block + Given a Python SARIF report with findings exists + When I run rcql with "--report-only --no-fail" + Then stdout contains "[python]" + + Scenario: Report-only output includes finding count + Given a Python SARIF report with findings exists + When I run rcql with "--report-only --no-fail" + Then stdout contains "Total: 3" + + Scenario: Verbose mode includes rule IDs + Given a Python SARIF report with findings exists + When I run rcql with "--report-only --verbose --no-fail" + Then stdout contains "py/sql-injection" + + Scenario: Quiet mode suppresses log lines from stdout + Given a Python SARIF report with findings exists + When I run rcql with "--report-only --quiet --no-fail" + Then stdout does not contain "[codeql-local]" + + Scenario: Quiet mode prints mode message to stderr + Given a Python SARIF report with findings exists + When I run rcql with "--report-only --quiet --no-fail" + Then stderr contains "quiet mode" + + Scenario: Missing SARIF reports exit non-zero with helpful message + Given no SARIF reports exist + When I run rcql with "--report-only" + Then the exit code is non-zero + And stderr contains "No SARIF files found" + + Scenario: --lang filter shows only requested language + Given a Python SARIF report with findings exists + And an empty Rust SARIF report exists + When I run rcql with "--report-only --lang=python --no-fail" + Then stdout contains "[python]" + And stdout does not contain "[rust]" diff --git a/tests/features/findings_filtering.feature b/tests/features/findings_filtering.feature new file mode 100644 index 0000000..5e81586 --- /dev/null +++ b/tests/features/findings_filtering.feature @@ -0,0 +1,81 @@ +Feature: Findings filtering + As an AI agent using rcql to investigate specific files or rules, + I want to filter findings by file path, rule ID, and pagination, + So that I can retrieve exactly the findings relevant to my current task. + + Scenario: --files filters to matching path + Given a Python SARIF report with findings exists + When I run rcql with "--report-only --no-fail --files src/db.py" + Then stdout contains "Shown: 1" + And stdout contains "matched: 1" + + Scenario: --files with glob matches multiple paths + Given a Python SARIF report with findings exists + When I run rcql with "--report-only --no-fail --files src/*.py" + Then stdout contains "Shown: 3" + + Scenario: --files with no match suppresses language block + Given a Python SARIF report with findings exists + When I run rcql with "--report-only --no-fail --files nonexistent.py" + Then stdout does not contain "[python]" + + Scenario: --rule filters to matching rule ID + Given a Python SARIF report with findings exists + When I run rcql with "--report-only --no-fail --rule py/unused-import" + Then stdout contains "Shown: 2" + And stdout contains "matched: 2" + + Scenario: --rule with glob matches all rules for a language + Given a Python SARIF report with findings exists + When I run rcql with "--report-only --no-fail --rule py/*" + Then stdout contains "Shown: 3" + + Scenario: --rule with no match suppresses language block + Given a Python SARIF report with findings exists + When I run rcql with "--report-only --no-fail --rule js/something" + Then stdout does not contain "[python]" + + Scenario: --files and --rule combined filter findings + Given a Python SARIF report with findings exists + When I run rcql with "--report-only --no-fail --files src/utils.py --rule py/unused-import" + Then stdout contains "Shown: 2" + + Scenario: --limit caps the number of shown findings + Given a Python SARIF report with findings exists + When I run rcql with "--report-only --no-fail --limit 1" + Then stdout contains "Shown: 1" + And stdout contains "matched: 3" + + Scenario: --offset skips leading findings + Given a Python SARIF report with findings exists + When I run rcql with "--report-only --no-fail --offset 2" + Then stdout contains "Shown: 1" + And stdout contains "matched: 3" + + Scenario: Pagination - page one + Given a Python SARIF report with findings exists + When I run rcql with "--report-only --no-fail --limit 2 --offset 0" + Then stdout contains "Shown: 2" + And stdout contains "matched: 3" + + Scenario: Pagination - page two + Given a Python SARIF report with findings exists + When I run rcql with "--report-only --no-fail --limit 2 --offset 2" + Then stdout contains "Shown: 1" + And stdout contains "matched: 3" + + Scenario: Default excludes suppress node_modules findings + Given a SARIF report exists with findings in "src/app.py" and "app/node_modules/pkg/index.py" + When I run rcql with "--report-only --no-fail" + Then stdout contains "Total: 1" + And stdout does not contain "node_modules" + + Scenario: --include-third-party restores suppressed paths + Given a SARIF report exists with findings in "src/app.py" and "node_modules/pkg/index.py" + When I run rcql with "--report-only --no-fail --include-third-party" + Then stdout contains "Total: 2" + + Scenario: --exclude-files hides matching paths + Given a SARIF report exists with findings in "src/app.py" and "src/generated/foo.py" + When I run rcql with "--report-only --no-fail --exclude-files src/generated/**" + Then stdout contains "Total: 1" diff --git a/tests/features/language_detection.feature b/tests/features/language_detection.feature new file mode 100644 index 0000000..eea9b5d --- /dev/null +++ b/tests/features/language_detection.feature @@ -0,0 +1,61 @@ +Feature: Language detection + As an AI agent running rcql on a repository, + I want rcql to automatically detect which languages are present, + So that I don't need to know the repo's tech stack in advance. + + Scenario: Detects Python from .py files + Given the repo contains "src/main.py" + When I run language detection + Then the detected languages include "python" + + Scenario: Detects Rust from .rs files + Given the repo contains "src/main.rs" + When I run language detection + Then the detected languages include "rust" + + Scenario: Detects JavaScript/TypeScript from .ts files + Given the repo contains "src/index.ts" + When I run language detection + Then the detected languages include "javascript-typescript" + + Scenario: Detects multiple languages + Given the repo contains "app.py" + And the repo contains "main.rs" + When I run language detection + Then the detected languages include "python" + And the detected languages include "rust" + + Scenario: Detects GitHub Actions from workflow files + Given the repo contains ".github/workflows/ci.yml" + When I run language detection + Then the detected languages include "actions" + + Scenario: Does not detect Actions without workflow directory + Given the repo contains "src/app.py" + When I run language detection + Then the detected languages do not include "actions" + + Scenario: Ignores files inside node_modules + Given the repo contains "node_modules/lib/index.js" + And the repo contains "src/app.py" + When I run language detection + Then the detected languages include "python" + And the detected languages do not include "javascript-typescript" + + Scenario: Empty repo detects no languages + Given the repo is empty + When I run language detection + Then no languages are detected + + Scenario: Unknown file extensions are ignored + Given the repo contains "README.md" + And the repo contains "data.csv" + When I run language detection + Then no languages are detected + + Scenario: Detection results are sorted alphabetically + Given the repo contains "a.rs" + And the repo contains "b.py" + And the repo contains "c.go" + When I run language detection + Then the detected languages are sorted alphabetically diff --git a/tests/features/repo_config.feature b/tests/features/repo_config.feature new file mode 100644 index 0000000..e648f6d --- /dev/null +++ b/tests/features/repo_config.feature @@ -0,0 +1,44 @@ +Feature: Repository configuration + As an AI agent working in a repo with a .rcql.json config, + I want rcql to honour the repo config as a baseline + and let CLI flags override it, + So that repo-level defaults don't require flags on every invocation. + + Scenario: Repo config files filter is applied automatically + Given a Python SARIF report with findings exists + And the repo config sets files to "src/utils.py" + When I run rcql with "--report-only --no-fail" + Then stdout contains "Shown: 2" + And stdout contains "matched: 2" + + Scenario: CLI --files overrides repo config files filter + Given a Python SARIF report with findings exists + And the repo config sets files to "src/db.py" + When I run rcql with "--report-only --no-fail --files src/utils.py" + Then stdout contains "Shown: 2" + And stdout contains "matched: 2" + + Scenario: Repo config exclude_files is applied automatically + Given a SARIF report exists with findings in "src/app.py" and "src/generated/foo.py" + And the repo config sets exclude_files to "src/generated/**" + When I run rcql with "--report-only --no-fail" + Then stdout contains "Total: 1" + + Scenario: Repo config include_third_party opt-in + Given a SARIF report exists with findings in "src/app.py" and "node_modules/pkg/index.py" + And the repo config sets include_third_party to true + When I run rcql with "--report-only --no-fail" + Then stdout contains "Total: 2" + + Scenario: Missing config file is silently ignored + Given no repo config file exists + And a Python SARIF report with findings exists + When I run rcql with "--report-only --no-fail" + Then the exit code is zero + And stdout contains "[python]" + + Scenario: --config empty string disables config loading + Given a Python SARIF report with findings exists + And the repo config sets files to "src/db.py" + When I run rcql with "--report-only --no-fail --config ''" + Then stdout contains "Total: 3" diff --git a/tests/steps/test_cli_output.py b/tests/steps/test_cli_output.py new file mode 100644 index 0000000..ab9614c --- /dev/null +++ b/tests/steps/test_cli_output.py @@ -0,0 +1,70 @@ +import shutil + +from pytest_bdd import given, parsers, scenarios, then, when + +from tests.conftest import make_report_dir, run_rcql + +scenarios("../features/cli_output.feature") + +FIXTURES = __import__("pathlib").Path(__file__).parent.parent / "fixtures" + + +# ── given ───────────────────────────────────────────────────────────────────── + + +@given("a Python SARIF report with findings exists") +def python_sarif_with_findings(cli_ctx): + make_report_dir(cli_ctx["tmp_path"], "python-code-quality.sarif") + + +@given("an empty Python SARIF report exists") +def empty_python_sarif(cli_ctx): + make_report_dir(cli_ctx["tmp_path"], "empty-code-quality.sarif") + + +@given("no SARIF reports exist") +def no_sarif_reports(cli_ctx): + pass # tmp_path starts empty + + +@given("an empty Rust SARIF report exists") +def empty_rust_sarif(cli_ctx): + report_dir = cli_ctx["tmp_path"] / ".codeql" / "reports" + report_dir.mkdir(parents=True, exist_ok=True) + shutil.copy(FIXTURES / "empty-code-quality.sarif", report_dir / "rust-code-quality.sarif") + + +# ── when ────────────────────────────────────────────────────────────────────── + + +@when(parsers.parse('I run rcql with "{args}"')) +def run_rcql_with_args(cli_ctx, args): + cli_ctx["result"] = run_rcql(args.split(), cli_ctx["tmp_path"]) + + +# ── then ────────────────────────────────────────────────────────────────────── + + +@then("the exit code is zero") +def exit_code_zero(cli_ctx): + assert cli_ctx["result"].returncode == 0 + + +@then("the exit code is non-zero") +def exit_code_nonzero(cli_ctx): + assert cli_ctx["result"].returncode != 0 + + +@then(parsers.parse('stdout contains "{text}"')) +def stdout_contains(cli_ctx, text): + assert text in cli_ctx["result"].stdout + + +@then(parsers.parse('stdout does not contain "{text}"')) +def stdout_not_contains(cli_ctx, text): + assert text not in cli_ctx["result"].stdout + + +@then(parsers.parse('stderr contains "{text}"')) +def stderr_contains(cli_ctx, text): + assert text in cli_ctx["result"].stderr diff --git a/tests/steps/test_findings_filtering.py b/tests/steps/test_findings_filtering.py new file mode 100644 index 0000000..b8880fb --- /dev/null +++ b/tests/steps/test_findings_filtering.py @@ -0,0 +1,39 @@ +from pytest_bdd import given, parsers, scenarios, then, when + +from tests.conftest import make_report_dir, run_rcql, write_sarif_with_paths + +scenarios("../features/findings_filtering.feature") + + +# ── given ───────────────────────────────────────────────────────────────────── + + +@given("a Python SARIF report with findings exists") +def python_sarif_with_findings(cli_ctx): + make_report_dir(cli_ctx["tmp_path"], "python-code-quality.sarif") + + +@given(parsers.parse('a SARIF report exists with findings in "{path_a}" and "{path_b}"')) +def sarif_with_two_paths(cli_ctx, path_a, path_b): + write_sarif_with_paths(cli_ctx["tmp_path"], [path_a, path_b]) + + +# ── when ────────────────────────────────────────────────────────────────────── + + +@when(parsers.parse('I run rcql with "{args}"')) +def run_rcql_with_args(cli_ctx, args): + cli_ctx["result"] = run_rcql(args.split(), cli_ctx["tmp_path"]) + + +# ── then ────────────────────────────────────────────────────────────────────── + + +@then(parsers.parse('stdout contains "{text}"')) +def stdout_contains(cli_ctx, text): + assert text in cli_ctx["result"].stdout + + +@then(parsers.parse('stdout does not contain "{text}"')) +def stdout_not_contains(cli_ctx, text): + assert text not in cli_ctx["result"].stdout diff --git a/tests/steps/test_language_detection.py b/tests/steps/test_language_detection.py new file mode 100644 index 0000000..01149b6 --- /dev/null +++ b/tests/steps/test_language_detection.py @@ -0,0 +1,58 @@ +from pathlib import Path + +from pytest_bdd import given, parsers, scenarios, then, when + +from run_codeql.scanner import detect_langs + +scenarios("../features/language_detection.feature") + + +def _touch(tmp_path: Path, rel_path: str) -> None: + full = tmp_path / rel_path + full.parent.mkdir(parents=True, exist_ok=True) + full.touch() + + +# ── given ───────────────────────────────────────────────────────────────────── + + +@given(parsers.parse('the repo contains "{rel_path}"')) +def repo_contains_file(cli_ctx, rel_path): + _touch(cli_ctx["tmp_path"], rel_path) + + +@given("the repo is empty") +def repo_is_empty(cli_ctx): + pass # tmp_path starts empty + + +# ── when ────────────────────────────────────────────────────────────────────── + + +@when("I run language detection") +def run_language_detection(cli_ctx): + cli_ctx["detected_langs"] = detect_langs(cli_ctx["tmp_path"]) + + +# ── then ────────────────────────────────────────────────────────────────────── + + +@then(parsers.parse('the detected languages include "{lang}"')) +def detected_includes(cli_ctx, lang): + assert lang in cli_ctx["detected_langs"] + + +@then(parsers.parse('the detected languages do not include "{lang}"')) +def detected_excludes(cli_ctx, lang): + assert lang not in cli_ctx["detected_langs"] + + +@then("no languages are detected") +def no_languages_detected(cli_ctx): + assert cli_ctx["detected_langs"] == [] + + +@then("the detected languages are sorted alphabetically") +def langs_are_sorted(cli_ctx): + langs = cli_ctx["detected_langs"] + assert langs == sorted(langs) diff --git a/tests/steps/test_repo_config.py b/tests/steps/test_repo_config.py new file mode 100644 index 0000000..9f39c50 --- /dev/null +++ b/tests/steps/test_repo_config.py @@ -0,0 +1,64 @@ +from pytest_bdd import given, parsers, scenarios, then, when + +from tests.conftest import make_report_dir, run_rcql, write_repo_config, write_sarif_with_paths + +scenarios("../features/repo_config.feature") + + +# ── given ───────────────────────────────────────────────────────────────────── + + +@given("a Python SARIF report with findings exists") +def python_sarif_with_findings(cli_ctx): + make_report_dir(cli_ctx["tmp_path"], "python-code-quality.sarif") + + +@given(parsers.parse('a SARIF report exists with findings in "{path_a}" and "{path_b}"')) +def sarif_with_two_paths(cli_ctx, path_a, path_b): + write_sarif_with_paths(cli_ctx["tmp_path"], [path_a, path_b]) + + +@given(parsers.parse('the repo config sets files to "{pattern}"')) +def repo_config_files(cli_ctx, pattern): + write_repo_config(cli_ctx["tmp_path"], {"files": [pattern]}) + + +@given(parsers.parse('the repo config sets exclude_files to "{pattern}"')) +def repo_config_exclude_files(cli_ctx, pattern): + write_repo_config(cli_ctx["tmp_path"], {"exclude_files": [pattern]}) + + +@given("the repo config sets include_third_party to true") +def repo_config_include_third_party(cli_ctx): + write_repo_config(cli_ctx["tmp_path"], {"include_third_party": True}) + + +@given("no repo config file exists") +def no_repo_config(cli_ctx): + pass # tmp_path starts without .rcql.json + + +# ── when ────────────────────────────────────────────────────────────────────── + + +@when(parsers.parse('I run rcql with "{args}"')) +def run_rcql_with_args(cli_ctx, args): + cli_ctx["result"] = run_rcql(args.split(), cli_ctx["tmp_path"]) + + +# ── then ────────────────────────────────────────────────────────────────────── + + +@then("the exit code is zero") +def exit_code_zero(cli_ctx): + assert cli_ctx["result"].returncode == 0 + + +@then(parsers.parse('stdout contains "{text}"')) +def stdout_contains(cli_ctx, text): + assert text in cli_ctx["result"].stdout + + +@then(parsers.parse('stdout does not contain "{text}"')) +def stdout_not_contains(cli_ctx, text): + assert text not in cli_ctx["result"].stdout