diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 00000000..23f656af --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,50 @@ +name: Deploy docs site + +# Builds the Astro Starlight site under docs-site/ and publishes it to GitHub Pages. +# Requires "Settings → Pages → Source: GitHub Actions" to be enabled on the repo. + +on: + push: + branches: [main, prototype-docs-site] + paths: + - 'docs-site/**' + - '.github/workflows/docs.yml' + workflow_dispatch: + +permissions: + contents: read + pages: write + id-token: write + +concurrency: + group: pages + cancel-in-progress: false + +jobs: + build: + runs-on: ubuntu-latest + defaults: + run: + working-directory: docs-site + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-node@v4 + with: + node-version: 20 + cache: npm + cache-dependency-path: docs-site/package-lock.json + - run: npm ci + - run: npm run build + - uses: actions/upload-pages-artifact@v3 + with: + path: docs-site/dist + + deploy: + needs: build + runs-on: ubuntu-latest + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + steps: + - id: deployment + uses: actions/deploy-pages@v4 diff --git a/docs-site/.gitignore b/docs-site/.gitignore new file mode 100644 index 00000000..2c5d6f49 --- /dev/null +++ b/docs-site/.gitignore @@ -0,0 +1,5 @@ +node_modules/ +dist/ +.astro/ +.env +.DS_Store diff --git a/docs-site/astro.config.mjs b/docs-site/astro.config.mjs new file mode 100644 index 00000000..8a46f0f3 --- /dev/null +++ b/docs-site/astro.config.mjs @@ -0,0 +1,102 @@ +import { defineConfig } from 'astro/config'; +import starlight from '@astrojs/starlight'; +import react from '@astrojs/react'; + +// `site` and `base` together determine the public URL. +// For the upstream awslabs project page: site='https://awslabs.github.io', base='/graphrag-toolkit' +// For a fork's project page: site='https://.github.io', base='/' +export default defineConfig({ + site: 'https://oussamahansal.github.io', + base: '/graphrag-toolkit', + integrations: [ + react(), + starlight({ + title: 'GraphRAG Toolkit', + description: + 'Documentation for the AWS GraphRAG Toolkit — lexical-graph and BYOKG-RAG.', + logo: { src: './src/assets/logo.svg' }, + customCss: ['./src/styles/custom.css'], + social: { + github: 'https://github.com/awslabs/graphrag-toolkit', + }, + head: [ + { + tag: 'link', + attrs: { + rel: 'stylesheet', + href: 'https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700;800&family=JetBrains+Mono:wght@400;500&display=swap', + }, + }, + ], + sidebar: [ + { + label: 'Lexical Graph', + items: [ + { label: 'Overview', slug: 'lexical-graph/overview' }, + { label: 'Graph Model', slug: 'lexical-graph/graph-model' }, + { label: 'Storage Model', slug: 'lexical-graph/storage-model' }, + { + label: 'Indexing', + items: [ + { label: 'Indexing', slug: 'lexical-graph/indexing' }, + { label: 'Batch Extraction', slug: 'lexical-graph/batch-extraction' }, + { label: 'Configuring Batch Extraction', slug: 'lexical-graph/configuring-batch-extraction' }, + { label: 'Versioned Updates', slug: 'lexical-graph/versioned-updates' }, + { label: 'Metadata Filtering', slug: 'lexical-graph/metadata-filtering' }, + { label: 'Reader Providers', slug: 'lexical-graph/readers' }, + { label: 'External Properties', slug: 'lexical-graph/external-properties' }, + ], + }, + { + label: 'Querying', + items: [ + { label: 'Querying', slug: 'lexical-graph/querying' }, + { label: 'Traversal-Based Search', slug: 'lexical-graph/traversal-based-search' }, + { label: 'Traversal-Based Search Configuration', slug: 'lexical-graph/traversal-based-search-configuration' }, + { label: 'Semantic-Guided Search', slug: 'lexical-graph/semantic-guided-search' }, + ], + }, + { + label: 'Graph Stores', + items: [ + { label: 'Neptune Analytics', slug: 'lexical-graph/graph-store-neptune-analytics' }, + { label: 'Neptune Database', slug: 'lexical-graph/graph-store-neptune-db' }, + { label: 'Neo4j', slug: 'lexical-graph/graph-store-neo4j' }, + { label: 'FalkorDB', slug: 'lexical-graph/graph-store-falkor-db' }, + ], + }, + { + label: 'Vector Stores', + items: [ + { label: 'Neptune Analytics', slug: 'lexical-graph/vector-store-neptune-analytics' }, + { label: 'OpenSearch Serverless', slug: 'lexical-graph/vector-store-opensearch-serverless' }, + { label: 'Postgres', slug: 'lexical-graph/vector-store-postgres' }, + { label: 'S3 Vectors', slug: 'lexical-graph/vector-store-s3-vectors' }, + ], + }, + { label: 'Configuration', slug: 'lexical-graph/configuration' }, + { label: 'Multi-Tenancy', slug: 'lexical-graph/multi-tenancy' }, + { label: 'Custom Prompts', slug: 'lexical-graph/prompts' }, + { label: 'Security', slug: 'lexical-graph/security' }, + { label: 'Hybrid Deployment', slug: 'lexical-graph/hybrid-deployment' }, + { label: 'AWS Profile Configuration', slug: 'lexical-graph/aws-profile' }, + { label: 'Nova 2 Model Support', slug: 'lexical-graph/nova-2-model-support' }, + { label: 'FAQ', slug: 'lexical-graph/faq' }, + ], + }, + { + label: 'BYOKG-RAG', + items: [ + { label: 'Overview', slug: 'byokg-rag/overview' }, + { label: 'Indexing', slug: 'byokg-rag/indexing' }, + { label: 'Query Engine', slug: 'byokg-rag/query-engine' }, + { label: 'Graph Retrievers', slug: 'byokg-rag/graph-retrievers' }, + { label: 'Multi-Strategy Retrieval', slug: 'byokg-rag/multi-strategy-retrieval' }, + { label: 'Configuration', slug: 'byokg-rag/configuration' }, + { label: 'FAQ', slug: 'byokg-rag/faq' }, + ], + }, + ], + }), + ], +}); diff --git a/docs-site/package-lock.json b/docs-site/package-lock.json new file mode 100644 index 00000000..5b8dd865 --- /dev/null +++ b/docs-site/package-lock.json @@ -0,0 +1,6417 @@ +{ + "name": "graphrag-toolkit-docs", + "version": "0.0.1", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "graphrag-toolkit-docs", + "version": "0.0.1", + "dependencies": { + "@astrojs/react": "^4.2.0", + "@astrojs/starlight": "0.32.5", + "@types/react": "^18.3.28", + "@types/react-dom": "^18.3.7", + "astro": "5.6.2", + "react": "^18.3.1", + "react-dom": "^18.3.1", + "sharp": "^0.33.5" + } + }, + "node_modules/@astrojs/compiler": { + "version": "2.13.1", + "resolved": "https://registry.npmjs.org/@astrojs/compiler/-/compiler-2.13.1.tgz", + "integrity": "sha512-f3FN83d2G/v32ipNClRKgYv30onQlMZX1vCeZMjPsMMPl1mDpmbl0+N5BYo4S/ofzqJyS5hvwacEo0CCVDn/Qg==" + }, + "node_modules/@astrojs/internal-helpers": { + "version": "0.7.6", + "resolved": "https://registry.npmjs.org/@astrojs/internal-helpers/-/internal-helpers-0.7.6.tgz", + "integrity": "sha512-GOle7smBWKfMSP8osUIGOlB5kaHdQLV3foCsf+5Q9Wsuu+C6Fs3Ez/ttXmhjZ1HkSgsogcM1RXSjjOVieHq16Q==" + }, + "node_modules/@astrojs/markdown-remark": { + "version": "6.3.11", + "resolved": "https://registry.npmjs.org/@astrojs/markdown-remark/-/markdown-remark-6.3.11.tgz", + "integrity": "sha512-hcaxX/5aC6lQgHeGh1i+aauvSwIT6cfyFjKWvExYSxUhZZBBdvCliOtu06gbQyhbe0pGJNoNmqNlQZ5zYUuIyQ==", + "dependencies": { + "@astrojs/internal-helpers": "0.7.6", + "@astrojs/prism": "3.3.0", + "github-slugger": "^2.0.0", + "hast-util-from-html": "^2.0.3", + "hast-util-to-text": "^4.0.2", + "import-meta-resolve": "^4.2.0", + "js-yaml": "^4.1.1", + "mdast-util-definitions": "^6.0.0", + "rehype-raw": "^7.0.0", + "rehype-stringify": "^10.0.1", + "remark-gfm": "^4.0.1", + "remark-parse": "^11.0.0", + "remark-rehype": "^11.1.2", + "remark-smartypants": "^3.0.2", + "shiki": "^3.21.0", + "smol-toml": "^1.6.0", + "unified": "^11.0.5", + "unist-util-remove-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "unist-util-visit-parents": "^6.0.2", + "vfile": "^6.0.3" + } + }, + "node_modules/@astrojs/mdx": { + "version": "4.3.14", + "resolved": "https://registry.npmjs.org/@astrojs/mdx/-/mdx-4.3.14.tgz", + "integrity": "sha512-FBrqJQORVm+rkRa2TS5CjU9PBA6hkhrwLVBSS9A77gN2+iehvjq1w6yya/d0YKC7osiVorKkr3Qd9wNbl0ZkGA==", + "dependencies": { + "@astrojs/markdown-remark": "6.3.11", + "@mdx-js/mdx": "^3.1.1", + "acorn": "^8.15.0", + "es-module-lexer": "^1.7.0", + "estree-util-visit": "^2.0.0", + "hast-util-to-html": "^9.0.5", + "piccolore": "^0.1.3", + "rehype-raw": "^7.0.0", + "remark-gfm": "^4.0.1", + "remark-smartypants": "^3.0.2", + "source-map": "^0.7.6", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.3" + }, + "engines": { + "node": "18.20.8 || ^20.3.0 || >=22.0.0" + }, + "peerDependencies": { + "astro": "^5.0.0" + } + }, + "node_modules/@astrojs/prism": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/@astrojs/prism/-/prism-3.3.0.tgz", + "integrity": "sha512-q8VwfU/fDZNoDOf+r7jUnMC2//H2l0TuQ6FkGJL8vD8nw/q5KiL3DS1KKBI3QhI9UQhpJ5dc7AtqfbXWuOgLCQ==", + "dependencies": { + "prismjs": "^1.30.0" + }, + "engines": { + "node": "18.20.8 || ^20.3.0 || >=22.0.0" + } + }, + "node_modules/@astrojs/react": { + "version": "4.4.2", + "resolved": "https://registry.npmjs.org/@astrojs/react/-/react-4.4.2.tgz", + "integrity": "sha512-1tl95bpGfuaDMDn8O3x/5Dxii1HPvzjvpL2YTuqOOrQehs60I2DKiDgh1jrKc7G8lv+LQT5H15V6QONQ+9waeQ==", + "dependencies": { + "@vitejs/plugin-react": "^4.7.0", + "ultrahtml": "^1.6.0", + "vite": "^6.4.1" + }, + "engines": { + "node": "18.20.8 || ^20.3.0 || >=22.0.0" + }, + "peerDependencies": { + "@types/react": "^17.0.50 || ^18.0.21 || ^19.0.0", + "@types/react-dom": "^17.0.17 || ^18.0.6 || ^19.0.0", + "react": "^17.0.2 || ^18.0.0 || ^19.0.0", + "react-dom": "^17.0.2 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/@astrojs/sitemap": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/@astrojs/sitemap/-/sitemap-3.2.1.tgz", + "integrity": "sha512-uxMfO8f7pALq0ADL6Lk68UV6dNYjJ2xGUzyjjVj60JLBs5a6smtlkBYv3tQ0DzoqwS7c9n4FUx5lgv0yPo/fgA==", + "dependencies": { + "sitemap": "^8.0.0", + "stream-replace-string": "^2.0.0", + "zod": "^3.23.8" + } + }, + "node_modules/@astrojs/starlight": { + "version": "0.32.5", + "resolved": "https://registry.npmjs.org/@astrojs/starlight/-/starlight-0.32.5.tgz", + "integrity": "sha512-KYZsYbA5eEAsCO3XNc6DWDPml1JCD6GVusuB15fYq5dbxyB7RtC6kgwbtiEmr84maIjcQUmDtFJCz3sN4CdaSg==", + "dependencies": { + "@astrojs/mdx": "^4.0.5", + "@astrojs/sitemap": "^3.2.1", + "@pagefind/default-ui": "^1.3.0", + "@types/hast": "^3.0.4", + "@types/js-yaml": "^4.0.9", + "@types/mdast": "^4.0.4", + "astro-expressive-code": "^0.40.0", + "bcp-47": "^2.1.0", + "hast-util-from-html": "^2.0.1", + "hast-util-select": "^6.0.2", + "hast-util-to-string": "^3.0.0", + "hastscript": "^9.0.0", + "i18next": "^23.11.5", + "js-yaml": "^4.1.0", + "klona": "^2.0.6", + "mdast-util-directive": "^3.0.0", + "mdast-util-to-markdown": "^2.1.0", + "mdast-util-to-string": "^4.0.0", + "pagefind": "^1.3.0", + "rehype": "^13.0.1", + "rehype-format": "^5.0.0", + "remark-directive": "^3.0.0", + "unified": "^11.0.5", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.2" + }, + "peerDependencies": { + "astro": "^5.1.5" + } + }, + "node_modules/@astrojs/telemetry": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/@astrojs/telemetry/-/telemetry-3.2.0.tgz", + "integrity": "sha512-wxhSKRfKugLwLlr4OFfcqovk+LIFtKwLyGPqMsv+9/ibqqnW3Gv7tBhtKEb0gAyUAC4G9BTVQeQahqnQAhd6IQ==", + "dependencies": { + "ci-info": "^4.1.0", + "debug": "^4.3.7", + "dlv": "^1.1.3", + "dset": "^3.1.4", + "is-docker": "^3.0.0", + "is-wsl": "^3.1.0", + "which-pm-runs": "^1.1.0" + }, + "engines": { + "node": "^18.17.1 || ^20.3.0 || >=22.0.0" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.29.0.tgz", + "integrity": "sha512-9NhCeYjq9+3uxgdtp20LSiJXJvN0FeCtNGpJxuMFZ1Kv3cWUNb6DOhJwUvcVCzKGR66cw4njwM6hrJLqgOwbcw==", + "dependencies": { + "@babel/helper-validator-identifier": "^7.28.5", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.29.0.tgz", + "integrity": "sha512-T1NCJqT/j9+cn8fvkt7jtwbLBfLC/1y1c7NtCeXFRgzGTsafi68MRv8yzkYSapBnFA6L3U2VSc02ciDzoAJhJg==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.29.0.tgz", + "integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==", + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-compilation-targets": "^7.28.6", + "@babel/helper-module-transforms": "^7.28.6", + "@babel/helpers": "^7.28.6", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/traverse": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/remapping": "^2.3.5", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/generator": { + "version": "7.29.1", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.29.1.tgz", + "integrity": "sha512-qsaF+9Qcm2Qv8SRIMMscAvG4O3lJ0F1GuMo5HR/Bp02LopNgnZBC/EkbevHFeGs4ls/oPz9v+Bsmzbkbe+0dUw==", + "dependencies": { + "@babel/parser": "^7.29.0", + "@babel/types": "^7.29.0", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.28.6.tgz", + "integrity": "sha512-JYtls3hqi15fcx5GaSNL7SCTJ2MNmjrkHXg4FSpOA/grxK8KwyZ5bubHsCq8FXCkua6xhuaaBit+3b7+VZRfcA==", + "dependencies": { + "@babel/compat-data": "^7.28.6", + "@babel/helper-validator-option": "^7.27.1", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.28.6.tgz", + "integrity": "sha512-l5XkZK7r7wa9LucGw9LwZyyCUscb4x37JWTPz7swwFE/0FMQAGpiWUZn8u9DzkSBWEcK25jmvubfpw2dnAMdbw==", + "dependencies": { + "@babel/traverse": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.28.6.tgz", + "integrity": "sha512-67oXFAYr2cDLDVGLXTEABjdBJZ6drElUSI7WKp70NrpyISso3plG9SAGEF6y7zbha/wOzUByWWTJvEDVNIUGcA==", + "dependencies": { + "@babel/helper-module-imports": "^7.28.6", + "@babel/helper-validator-identifier": "^7.28.5", + "@babel/traverse": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.28.6.tgz", + "integrity": "sha512-S9gzZ/bz83GRysI7gAD4wPT/AI3uCnY+9xn+Mx/KPs2JwHJIz1W8PZkg2cqyt3RNOBM8ejcXhV6y8Og7ly/Dug==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.27.1.tgz", + "integrity": "sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.29.2", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.29.2.tgz", + "integrity": "sha512-HoGuUs4sCZNezVEKdVcwqmZN8GoHirLUcLaYVNBK2J0DadGtdcqgr3BCbvH8+XUo4NGjNl3VOtSjEKNzqfFgKw==", + "dependencies": { + "@babel/template": "^7.28.6", + "@babel/types": "^7.29.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.29.2", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.29.2.tgz", + "integrity": "sha512-4GgRzy/+fsBa72/RZVJmGKPmZu9Byn8o4MoLpmNe1m8ZfYnz5emHLQz3U4gLud6Zwl0RZIcgiLD7Uq7ySFuDLA==", + "dependencies": { + "@babel/types": "^7.29.0" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-self": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.27.1.tgz", + "integrity": "sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-react-jsx-source": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.27.1.tgz", + "integrity": "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==", + "dependencies": { + "@babel/helper-plugin-utils": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/runtime": { + "version": "7.29.2", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.29.2.tgz", + "integrity": "sha512-JiDShH45zKHWyGe4ZNVRrCjBz8Nh9TMmZG1kh4QTK8hCBTWBi8Da+i7s1fJw7/lYpM4ccepSNfqzZ/QvABBi5g==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/template": { + "version": "7.28.6", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.28.6.tgz", + "integrity": "sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ==", + "dependencies": { + "@babel/code-frame": "^7.28.6", + "@babel/parser": "^7.28.6", + "@babel/types": "^7.28.6" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.29.0.tgz", + "integrity": "sha512-4HPiQr0X7+waHfyXPZpWPfWL/J7dcN1mx9gL6WdQVMbPnF3+ZhSMs8tCxN7oHddJE9fhNE7+lxdnlyemKfJRuA==", + "dependencies": { + "@babel/code-frame": "^7.29.0", + "@babel/generator": "^7.29.0", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.29.0", + "@babel/template": "^7.28.6", + "@babel/types": "^7.29.0", + "debug": "^4.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.29.0", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.29.0.tgz", + "integrity": "sha512-LwdZHpScM4Qz8Xw2iKSzS+cfglZzJGvofQICy7W7v4caru4EaAmyUuO6BGrbyQ2mYV11W0U8j5mBhd14dd3B0A==", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@ctrl/tinycolor": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/@ctrl/tinycolor/-/tinycolor-4.2.0.tgz", + "integrity": "sha512-kzyuwOAQnXJNLS9PSyrk0CWk35nWJW/zl/6KvnTBMFK65gm7U1/Z5BqjxeapjZCIhQcM/DsrEmcbRwDyXyXK4A==", + "engines": { + "node": ">=14" + } + }, + "node_modules/@emnapi/runtime": { + "version": "1.9.2", + "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.9.2.tgz", + "integrity": "sha512-3U4+MIWHImeyu1wnmVygh5WlgfYDtyf0k8AbLhMFxOipihf6nrWC4syIm/SwEeec0mNSafiiNnMJwbza/Is6Lw==", + "optional": true, + "dependencies": { + "tslib": "^2.4.0" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.25.12.tgz", + "integrity": "sha512-Hhmwd6CInZ3dwpuGTF8fJG6yoWmsToE+vYgD4nytZVxcu1ulHpUQRAB1UJ8+N1Am3Mz4+xOByoQoSZf4D+CpkA==", + "cpu": [ + "ppc64" + ], + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.25.12.tgz", + "integrity": "sha512-VJ+sKvNA/GE7Ccacc9Cha7bpS8nyzVv0jdVgwNDaR4gDMC/2TTRc33Ip8qrNYUcpkOHUT5OZ0bUcNNVZQ9RLlg==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.25.12.tgz", + "integrity": "sha512-6AAmLG7zwD1Z159jCKPvAxZd4y/VTO0VkprYy+3N2FtJ8+BQWFXU+OxARIwA46c5tdD9SsKGZ/1ocqBS/gAKHg==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.25.12.tgz", + "integrity": "sha512-5jbb+2hhDHx5phYR2By8GTWEzn6I9UqR11Kwf22iKbNpYrsmRB18aX/9ivc5cabcUiAT/wM+YIZ6SG9QO6a8kg==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.25.12.tgz", + "integrity": "sha512-N3zl+lxHCifgIlcMUP5016ESkeQjLj/959RxxNYIthIg+CQHInujFuXeWbWMgnTo4cp5XVHqFPmpyu9J65C1Yg==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.25.12.tgz", + "integrity": "sha512-HQ9ka4Kx21qHXwtlTUVbKJOAnmG1ipXhdWTmNXiPzPfWKpXqASVcWdnf2bnL73wgjNrFXAa3yYvBSd9pzfEIpA==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.25.12.tgz", + "integrity": "sha512-gA0Bx759+7Jve03K1S0vkOu5Lg/85dou3EseOGUes8flVOGxbhDDh/iZaoek11Y8mtyKPGF3vP8XhnkDEAmzeg==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.25.12.tgz", + "integrity": "sha512-TGbO26Yw2xsHzxtbVFGEXBFH0FRAP7gtcPE7P5yP7wGy7cXK2oO7RyOhL5NLiqTlBh47XhmIUXuGciXEqYFfBQ==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.25.12.tgz", + "integrity": "sha512-lPDGyC1JPDou8kGcywY0YILzWlhhnRjdof3UlcoqYmS9El818LLfJJc3PXXgZHrHCAKs/Z2SeZtDJr5MrkxtOw==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.25.12.tgz", + "integrity": "sha512-8bwX7a8FghIgrupcxb4aUmYDLp8pX06rGh5HqDT7bB+8Rdells6mHvrFHHW2JAOPZUbnjUpKTLg6ECyzvas2AQ==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.25.12.tgz", + "integrity": "sha512-0y9KrdVnbMM2/vG8KfU0byhUN+EFCny9+8g202gYqSSVMonbsCfLjUO+rCci7pM0WBEtz+oK/PIwHkzxkyharA==", + "cpu": [ + "ia32" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.25.12.tgz", + "integrity": "sha512-h///Lr5a9rib/v1GGqXVGzjL4TMvVTv+s1DPoxQdz7l/AYv6LDSxdIwzxkrPW438oUXiDtwM10o9PmwS/6Z0Ng==", + "cpu": [ + "loong64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.25.12.tgz", + "integrity": "sha512-iyRrM1Pzy9GFMDLsXn1iHUm18nhKnNMWscjmp4+hpafcZjrr2WbT//d20xaGljXDBYHqRcl8HnxbX6uaA/eGVw==", + "cpu": [ + "mips64el" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.25.12.tgz", + "integrity": "sha512-9meM/lRXxMi5PSUqEXRCtVjEZBGwB7P/D4yT8UG/mwIdze2aV4Vo6U5gD3+RsoHXKkHCfSxZKzmDssVlRj1QQA==", + "cpu": [ + "ppc64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.25.12.tgz", + "integrity": "sha512-Zr7KR4hgKUpWAwb1f3o5ygT04MzqVrGEGXGLnj15YQDJErYu/BGg+wmFlIDOdJp0PmB0lLvxFIOXZgFRrdjR0w==", + "cpu": [ + "riscv64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.25.12.tgz", + "integrity": "sha512-MsKncOcgTNvdtiISc/jZs/Zf8d0cl/t3gYWX8J9ubBnVOwlk65UIEEvgBORTiljloIWnBzLs4qhzPkJcitIzIg==", + "cpu": [ + "s390x" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.25.12.tgz", + "integrity": "sha512-uqZMTLr/zR/ed4jIGnwSLkaHmPjOjJvnm6TVVitAa08SLS9Z0VM8wIRx7gWbJB5/J54YuIMInDquWyYvQLZkgw==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-arm64/-/netbsd-arm64-0.25.12.tgz", + "integrity": "sha512-xXwcTq4GhRM7J9A8Gv5boanHhRa/Q9KLVmcyXHCTaM4wKfIpWkdXiMog/KsnxzJ0A1+nD+zoecuzqPmCRyBGjg==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.25.12.tgz", + "integrity": "sha512-Ld5pTlzPy3YwGec4OuHh1aCVCRvOXdH8DgRjfDy/oumVovmuSzWfnSJg+VtakB9Cm0gxNO9BzWkj6mtO1FMXkQ==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-arm64/-/openbsd-arm64-0.25.12.tgz", + "integrity": "sha512-fF96T6KsBo/pkQI950FARU9apGNTSlZGsv1jZBAlcLL1MLjLNIWPBkj5NlSz8aAzYKg+eNqknrUJ24QBybeR5A==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.25.12.tgz", + "integrity": "sha512-MZyXUkZHjQxUvzK7rN8DJ3SRmrVrke8ZyRusHlP+kuwqTcfWLyqMOE3sScPPyeIXN/mDJIfGXvcMqCgYKekoQw==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/openharmony-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/openharmony-arm64/-/openharmony-arm64-0.25.12.tgz", + "integrity": "sha512-rm0YWsqUSRrjncSXGA7Zv78Nbnw4XL6/dzr20cyrQf7ZmRcsovpcRBdhD43Nuk3y7XIoW2OxMVvwuRvk9XdASg==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "openharmony" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.25.12.tgz", + "integrity": "sha512-3wGSCDyuTHQUzt0nV7bocDy72r2lI33QL3gkDNGkod22EsYl04sMf0qLb8luNKTOmgF/eDEDP5BFNwoBKH441w==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.25.12.tgz", + "integrity": "sha512-rMmLrur64A7+DKlnSuwqUdRKyd3UE7oPJZmnljqEptesKM8wx9J8gx5u0+9Pq0fQQW8vqeKebwNXdfOyP+8Bsg==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.25.12.tgz", + "integrity": "sha512-HkqnmmBoCbCwxUKKNPBixiWDGCpQGVsrQfJoVGYLPT41XWF8lHuE5N6WhVia2n4o5QK5M4tYr21827fNhi4byQ==", + "cpu": [ + "ia32" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.25.12.tgz", + "integrity": "sha512-alJC0uCZpTFrSL0CCDjcgleBXPnCrEAhTBILpeAp7M/OFgoqtAetfBzX0xM00MUsVVPpVjlPuMbREqnZCXaTnA==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=18" + } + }, + "node_modules/@expressive-code/core": { + "version": "0.40.2", + "resolved": "https://registry.npmjs.org/@expressive-code/core/-/core-0.40.2.tgz", + "integrity": "sha512-gXY3v7jbgz6nWKvRpoDxK4AHUPkZRuJsM79vHX/5uhV9/qX6Qnctp/U/dMHog/LCVXcuOps+5nRmf1uxQVPb3w==", + "dependencies": { + "@ctrl/tinycolor": "^4.0.4", + "hast-util-select": "^6.0.2", + "hast-util-to-html": "^9.0.1", + "hast-util-to-text": "^4.0.1", + "hastscript": "^9.0.0", + "postcss": "^8.4.38", + "postcss-nested": "^6.0.1", + "unist-util-visit": "^5.0.0", + "unist-util-visit-parents": "^6.0.1" + } + }, + "node_modules/@expressive-code/plugin-frames": { + "version": "0.40.2", + "resolved": "https://registry.npmjs.org/@expressive-code/plugin-frames/-/plugin-frames-0.40.2.tgz", + "integrity": "sha512-aLw5IlDlZWb10Jo/TTDCVsmJhKfZ7FJI83Zo9VDrV0OBlmHAg7klZqw68VDz7FlftIBVAmMby53/MNXPnMjTSQ==", + "dependencies": { + "@expressive-code/core": "^0.40.2" + } + }, + "node_modules/@expressive-code/plugin-shiki": { + "version": "0.40.2", + "resolved": "https://registry.npmjs.org/@expressive-code/plugin-shiki/-/plugin-shiki-0.40.2.tgz", + "integrity": "sha512-t2HMR5BO6GdDW1c1ISBTk66xO503e/Z8ecZdNcr6E4NpUfvY+MRje+LtrcvbBqMwWBBO8RpVKcam/Uy+1GxwKQ==", + "dependencies": { + "@expressive-code/core": "^0.40.2", + "shiki": "^1.26.1" + } + }, + "node_modules/@expressive-code/plugin-shiki/node_modules/@shikijs/core": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/@shikijs/core/-/core-1.29.2.tgz", + "integrity": "sha512-vju0lY9r27jJfOY4Z7+Rt/nIOjzJpZ3y+nYpqtUZInVoXQ/TJZcfGnNOGnKjFdVZb8qexiCuSlZRKcGfhhTTZQ==", + "dependencies": { + "@shikijs/engine-javascript": "1.29.2", + "@shikijs/engine-oniguruma": "1.29.2", + "@shikijs/types": "1.29.2", + "@shikijs/vscode-textmate": "^10.0.1", + "@types/hast": "^3.0.4", + "hast-util-to-html": "^9.0.4" + } + }, + "node_modules/@expressive-code/plugin-shiki/node_modules/@shikijs/engine-javascript": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/@shikijs/engine-javascript/-/engine-javascript-1.29.2.tgz", + "integrity": "sha512-iNEZv4IrLYPv64Q6k7EPpOCE/nuvGiKl7zxdq0WFuRPF5PAE9PRo2JGq/d8crLusM59BRemJ4eOqrFrC4wiQ+A==", + "dependencies": { + "@shikijs/types": "1.29.2", + "@shikijs/vscode-textmate": "^10.0.1", + "oniguruma-to-es": "^2.2.0" + } + }, + "node_modules/@expressive-code/plugin-shiki/node_modules/@shikijs/engine-oniguruma": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/@shikijs/engine-oniguruma/-/engine-oniguruma-1.29.2.tgz", + "integrity": "sha512-7iiOx3SG8+g1MnlzZVDYiaeHe7Ez2Kf2HrJzdmGwkRisT7r4rak0e655AcM/tF9JG/kg5fMNYlLLKglbN7gBqA==", + "dependencies": { + "@shikijs/types": "1.29.2", + "@shikijs/vscode-textmate": "^10.0.1" + } + }, + "node_modules/@expressive-code/plugin-shiki/node_modules/@shikijs/langs": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/@shikijs/langs/-/langs-1.29.2.tgz", + "integrity": "sha512-FIBA7N3LZ+223U7cJDUYd5shmciFQlYkFXlkKVaHsCPgfVLiO+e12FmQE6Tf9vuyEsFe3dIl8qGWKXgEHL9wmQ==", + "dependencies": { + "@shikijs/types": "1.29.2" + } + }, + "node_modules/@expressive-code/plugin-shiki/node_modules/@shikijs/themes": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/@shikijs/themes/-/themes-1.29.2.tgz", + "integrity": "sha512-i9TNZlsq4uoyqSbluIcZkmPL9Bfi3djVxRnofUHwvx/h6SRW3cwgBC5SML7vsDcWyukY0eCzVN980rqP6qNl9g==", + "dependencies": { + "@shikijs/types": "1.29.2" + } + }, + "node_modules/@expressive-code/plugin-shiki/node_modules/@shikijs/types": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/@shikijs/types/-/types-1.29.2.tgz", + "integrity": "sha512-VJjK0eIijTZf0QSTODEXCqinjBn0joAHQ+aPSBzrv4O2d/QSbsMw+ZeSRx03kV34Hy7NzUvV/7NqfYGRLrASmw==", + "dependencies": { + "@shikijs/vscode-textmate": "^10.0.1", + "@types/hast": "^3.0.4" + } + }, + "node_modules/@expressive-code/plugin-shiki/node_modules/oniguruma-to-es": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/oniguruma-to-es/-/oniguruma-to-es-2.3.0.tgz", + "integrity": "sha512-bwALDxriqfKGfUufKGGepCzu9x7nJQuoRoAFp4AnwehhC2crqrDIAP/uN2qdlsAvSMpeRC3+Yzhqc7hLmle5+g==", + "dependencies": { + "emoji-regex-xs": "^1.0.0", + "regex": "^5.1.1", + "regex-recursion": "^5.1.1" + } + }, + "node_modules/@expressive-code/plugin-shiki/node_modules/regex": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/regex/-/regex-5.1.1.tgz", + "integrity": "sha512-dN5I359AVGPnwzJm2jN1k0W9LPZ+ePvoOeVMMfqIMFz53sSwXkxaJoxr50ptnsC771lK95BnTrVSZxq0b9yCGw==", + "dependencies": { + "regex-utilities": "^2.3.0" + } + }, + "node_modules/@expressive-code/plugin-shiki/node_modules/regex-recursion": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/regex-recursion/-/regex-recursion-5.1.1.tgz", + "integrity": "sha512-ae7SBCbzVNrIjgSbh7wMznPcQel1DNlDtzensnFxpiNpXt1U2ju/bHugH422r+4LAVS1FpW1YCwilmnNsjum9w==", + "dependencies": { + "regex": "^5.1.1", + "regex-utilities": "^2.3.0" + } + }, + "node_modules/@expressive-code/plugin-shiki/node_modules/shiki": { + "version": "1.29.2", + "resolved": "https://registry.npmjs.org/shiki/-/shiki-1.29.2.tgz", + "integrity": "sha512-njXuliz/cP+67jU2hukkxCNuH1yUi4QfdZZY+sMr5PPrIyXSu5iTb/qYC4BiWWB0vZ+7TbdvYUCeL23zpwCfbg==", + "dependencies": { + "@shikijs/core": "1.29.2", + "@shikijs/engine-javascript": "1.29.2", + "@shikijs/engine-oniguruma": "1.29.2", + "@shikijs/langs": "1.29.2", + "@shikijs/themes": "1.29.2", + "@shikijs/types": "1.29.2", + "@shikijs/vscode-textmate": "^10.0.1", + "@types/hast": "^3.0.4" + } + }, + "node_modules/@expressive-code/plugin-text-markers": { + "version": "0.40.2", + "resolved": "https://registry.npmjs.org/@expressive-code/plugin-text-markers/-/plugin-text-markers-0.40.2.tgz", + "integrity": "sha512-/XoLjD67K9nfM4TgDlXAExzMJp6ewFKxNpfUw4F7q5Ecy+IU3/9zQQG/O70Zy+RxYTwKGw2MA9kd7yelsxnSmw==", + "dependencies": { + "@expressive-code/core": "^0.40.2" + } + }, + "node_modules/@img/sharp-darwin-arm64": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-darwin-arm64/-/sharp-darwin-arm64-0.33.5.tgz", + "integrity": "sha512-UT4p+iz/2H4twwAoLCqfA9UH5pI6DggwKEGuaPy7nCVQ8ZsiY5PIcrRvD1DzuY3qYL07NtIQcWnBSY/heikIFQ==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-darwin-arm64": "1.0.4" + } + }, + "node_modules/@img/sharp-darwin-x64": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-darwin-x64/-/sharp-darwin-x64-0.33.5.tgz", + "integrity": "sha512-fyHac4jIc1ANYGRDxtiqelIbdWkIuQaI84Mv45KvGRRxSAa7o7d1ZKAOBaYbnepLC1WqxfpimdeWfvqqSGwR2Q==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-darwin-x64": "1.0.4" + } + }, + "node_modules/@img/sharp-libvips-darwin-arm64": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-arm64/-/sharp-libvips-darwin-arm64-1.0.4.tgz", + "integrity": "sha512-XblONe153h0O2zuFfTAbQYAX2JhYmDHeWikp1LM9Hul9gVPjFY427k6dFEcOL72O01QxQsWi761svJ/ev9xEDg==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "darwin" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-darwin-x64": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-darwin-x64/-/sharp-libvips-darwin-x64-1.0.4.tgz", + "integrity": "sha512-xnGR8YuZYfJGmWPvmlunFaWJsb9T/AO2ykoP3Fz/0X5XV2aoYBPkX6xqCQvUTKKiLddarLaxpzNe+b1hjeWHAQ==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "darwin" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-arm": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm/-/sharp-libvips-linux-arm-1.0.5.tgz", + "integrity": "sha512-gvcC4ACAOPRNATg/ov8/MnbxFDJqf/pDePbBnuBDcjsI8PssmjoKMAz4LtLaVi+OnSb5FK/yIOamqDwGmXW32g==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-arm64": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-arm64/-/sharp-libvips-linux-arm64-1.0.4.tgz", + "integrity": "sha512-9B+taZ8DlyyqzZQnoeIvDVR/2F4EbMepXMc/NdVbkzsJbzkUjhXv/70GQJ7tdLA4YJgNP25zukcxpX2/SueNrA==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-s390x": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-s390x/-/sharp-libvips-linux-s390x-1.0.4.tgz", + "integrity": "sha512-u7Wz6ntiSSgGSGcjZ55im6uvTrOxSIS8/dgoVMoiGE9I6JAfU50yH5BoDlYA1tcuGS7g/QNtetJnxA6QEsCVTA==", + "cpu": [ + "s390x" + ], + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linux-x64": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linux-x64/-/sharp-libvips-linux-x64-1.0.4.tgz", + "integrity": "sha512-MmWmQ3iPFZr0Iev+BAgVMb3ZyC4KeFc3jFxnNbEPas60e1cIfevbtuyf9nDGIzOaW9PdnDciJm+wFFaTlj5xYw==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linuxmusl-arm64": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-arm64/-/sharp-libvips-linuxmusl-arm64-1.0.4.tgz", + "integrity": "sha512-9Ti+BbTYDcsbp4wfYib8Ctm1ilkugkA/uscUn6UXK1ldpC1JjiXbLfFZtRlBhjPZ5o1NCLiDbg8fhUPKStHoTA==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-libvips-linuxmusl-x64": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@img/sharp-libvips-linuxmusl-x64/-/sharp-libvips-linuxmusl-x64-1.0.4.tgz", + "integrity": "sha512-viYN1KX9m+/hGkJtvYYp+CCLgnJXwiQB39damAO7WMdKWlIhmYTfHjwSbQeUK/20vY154mwezd9HflVFM1wVSw==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ], + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-linux-arm": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-arm/-/sharp-linux-arm-0.33.5.tgz", + "integrity": "sha512-JTS1eldqZbJxjvKaAkxhZmBqPRGmxgu+qFKSInv8moZ2AmT5Yib3EQ1c6gp493HvrvV8QgdOXdyaIBrhvFhBMQ==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-arm": "1.0.5" + } + }, + "node_modules/@img/sharp-linux-arm64": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-arm64/-/sharp-linux-arm64-0.33.5.tgz", + "integrity": "sha512-JMVv+AMRyGOHtO1RFBiJy/MBsgz0x4AWrT6QoEVVTyh1E39TrCUpTRI7mx9VksGX4awWASxqCYLCV4wBZHAYxA==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-arm64": "1.0.4" + } + }, + "node_modules/@img/sharp-linux-s390x": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-s390x/-/sharp-linux-s390x-0.33.5.tgz", + "integrity": "sha512-y/5PCd+mP4CA/sPDKl2961b+C9d+vPAveS33s6Z3zfASk2j5upL6fXVPZi7ztePZ5CuH+1kW8JtvxgbuXHRa4Q==", + "cpu": [ + "s390x" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-s390x": "1.0.4" + } + }, + "node_modules/@img/sharp-linux-x64": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linux-x64/-/sharp-linux-x64-0.33.5.tgz", + "integrity": "sha512-opC+Ok5pRNAzuvq1AG0ar+1owsu842/Ab+4qvU879ippJBHvyY5n2mxF1izXqkPYlGuP/M556uh53jRLJmzTWA==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linux-x64": "1.0.4" + } + }, + "node_modules/@img/sharp-linuxmusl-arm64": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-arm64/-/sharp-linuxmusl-arm64-0.33.5.tgz", + "integrity": "sha512-XrHMZwGQGvJg2V/oRSUfSAfjfPxO+4DkiRh6p2AFjLQztWUuY/o8Mq0eMQVIY7HJ1CDQUJlxGGZRw1a5bqmd1g==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linuxmusl-arm64": "1.0.4" + } + }, + "node_modules/@img/sharp-linuxmusl-x64": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-linuxmusl-x64/-/sharp-linuxmusl-x64-0.33.5.tgz", + "integrity": "sha512-WT+d/cgqKkkKySYmqoZ8y3pxx7lx9vVejxW/W4DOFMYVSkErR+w7mf2u8m/y4+xHe7yY9DAXQMWQhpnMuFfScw==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-libvips-linuxmusl-x64": "1.0.4" + } + }, + "node_modules/@img/sharp-wasm32": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-wasm32/-/sharp-wasm32-0.33.5.tgz", + "integrity": "sha512-ykUW4LVGaMcU9lu9thv85CbRMAwfeadCJHRsg2GmeRa/cJxsVY9Rbd57JcMxBkKHag5U/x7TSBpScF4U8ElVzg==", + "cpu": [ + "wasm32" + ], + "optional": true, + "dependencies": { + "@emnapi/runtime": "^1.2.0" + }, + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-win32-ia32": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-win32-ia32/-/sharp-win32-ia32-0.33.5.tgz", + "integrity": "sha512-T36PblLaTwuVJ/zw/LaH0PdZkRz5rd3SmMHX8GSmR7vtNSP5Z6bQkExdSK7xGWyxLw4sUknBuugTelgw2faBbQ==", + "cpu": [ + "ia32" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@img/sharp-win32-x64": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/@img/sharp-win32-x64/-/sharp-win32-x64-0.33.5.tgz", + "integrity": "sha512-MpY/o8/8kj+EcnxwvrP4aTJSWw/aZ7JIGR4aBeZkZw5B7/Jn+tY9/VNwtcoGmdT7GfggGIU4kygOMSbYnOrAbg==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/remapping": { + "version": "2.3.5", + "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz", + "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@mdx-js/mdx": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@mdx-js/mdx/-/mdx-3.1.1.tgz", + "integrity": "sha512-f6ZO2ifpwAQIpzGWaBQT2TXxPv6z3RBzQKpVftEWN78Vl/YweF1uwussDx8ECAXVtr3Rs89fKyG9YlzUs9DyGQ==", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdx": "^2.0.0", + "acorn": "^8.0.0", + "collapse-white-space": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "estree-util-scope": "^1.0.0", + "estree-walker": "^3.0.0", + "hast-util-to-jsx-runtime": "^2.0.0", + "markdown-extensions": "^2.0.0", + "recma-build-jsx": "^1.0.0", + "recma-jsx": "^1.0.0", + "recma-stringify": "^1.0.0", + "rehype-recma": "^1.0.0", + "remark-mdx": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-rehype": "^11.0.0", + "source-map": "^0.7.0", + "unified": "^11.0.0", + "unist-util-position-from-estree": "^2.0.0", + "unist-util-stringify-position": "^4.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/@oslojs/encoding": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@oslojs/encoding/-/encoding-1.1.0.tgz", + "integrity": "sha512-70wQhgYmndg4GCPxPPxPGevRKqTIJ2Nh4OkiMWmDAVYsTQ+Ta7Sq+rPevXyXGdzr30/qZBnyOalCszoMxlyldQ==" + }, + "node_modules/@pagefind/darwin-arm64": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@pagefind/darwin-arm64/-/darwin-arm64-1.5.0.tgz", + "integrity": "sha512-OXQVlxALU9+Lz/LxkAa+RvaxY1cnRKUDCuwl9o8PY5Lg/znP573y4WIbVOOIz8Bv7uj7r00TGy7pD+NSLMJGBw==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@pagefind/darwin-x64": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@pagefind/darwin-x64/-/darwin-x64-1.5.0.tgz", + "integrity": "sha512-+LK1Xq5n/B0hHc08DW61SnfIlfLKyXZ1oKcbfZ1MimE7Rn0Q6R0VI/TlC04f/JDPm+67zAOwPGizzvefOi5vqQ==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@pagefind/default-ui": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@pagefind/default-ui/-/default-ui-1.5.0.tgz", + "integrity": "sha512-C8VZ5pDz1Kc89GicXsWZiIlAwTVwUtFDOzh0RzJt5FmbkJzsmPVICPkLUfOsWgBCyFAH/vYR+lUTaGPDxZ4IXw==" + }, + "node_modules/@pagefind/freebsd-x64": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@pagefind/freebsd-x64/-/freebsd-x64-1.5.0.tgz", + "integrity": "sha512-kicDfUF9gn/z06NimTwNlZXF8z3pLsN3BIPPt6N8unuh0n55fr64tVs2p3a5RKYmQkJGjPfOE/C9GI5YTEpURg==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@pagefind/linux-arm64": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@pagefind/linux-arm64/-/linux-arm64-1.5.0.tgz", + "integrity": "sha512-e5rDB3wPm89bcSLiatKBDTrVTbsMQrrtkXRaAoUJYU0C1suXVvEzZfjmMvrUDvYhZBx/Ls8hGuGxlqSJBz3gDg==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@pagefind/linux-x64": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@pagefind/linux-x64/-/linux-x64-1.5.0.tgz", + "integrity": "sha512-vh52DcBiF/mRMmq+Rwt3M3RgEWgl00jFk/M5NWhLEHJFq4+papQXwbyKbi7cNlxaeYrKx6wOfW3fm9cftfc/Kg==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@pagefind/windows-arm64": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@pagefind/windows-arm64/-/windows-arm64-1.5.0.tgz", + "integrity": "sha512-kg+szZwffZdyWn6SL6RHjAYjhSvJ2bT4qkv3KepGsbmD9fuSHUSC+2kydDneDVUA9qEDRf9uSFoEAsXsp1/JKA==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@pagefind/windows-x64": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@pagefind/windows-x64/-/windows-x64-1.5.0.tgz", + "integrity": "sha512-8eOCmB8lnpyvwz+HrcTXLuBxhj7UseAFh6KGEXRe8UCcAfVQih+qPy/4akJRezViI+ONijz9oi7HpMkw9rdtBg==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rolldown/pluginutils": { + "version": "1.0.0-beta.27", + "resolved": "https://registry.npmjs.org/@rolldown/pluginutils/-/pluginutils-1.0.0-beta.27.tgz", + "integrity": "sha512-+d0F4MKMCbeVUJwG96uQ4SgAznZNSq93I3V+9NHA4OpvqG8mRCpGdKmK8l/dl02h2CCDHwW2FqilnTyDcAnqjA==" + }, + "node_modules/@rollup/pluginutils": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/@rollup/pluginutils/-/pluginutils-5.3.0.tgz", + "integrity": "sha512-5EdhGZtnu3V88ces7s53hhfK5KSASnJZv8Lulpc04cWO3REESroJXg73DFsOmgbU2BhwV0E20bu2IDZb3VKW4Q==", + "dependencies": { + "@types/estree": "^1.0.0", + "estree-walker": "^2.0.2", + "picomatch": "^4.0.2" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "rollup": "^1.20.0||^2.0.0||^3.0.0||^4.0.0" + }, + "peerDependenciesMeta": { + "rollup": { + "optional": true + } + } + }, + "node_modules/@rollup/pluginutils/node_modules/estree-walker": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", + "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==" + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.60.1.tgz", + "integrity": "sha512-d6FinEBLdIiK+1uACUttJKfgZREXrF0Qc2SmLII7W2AD8FfiZ9Wjd+rD/iRuf5s5dWrr1GgwXCvPqOuDquOowA==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.60.1.tgz", + "integrity": "sha512-YjG/EwIDvvYI1YvYbHvDz/BYHtkY4ygUIXHnTdLhG+hKIQFBiosfWiACWortsKPKU/+dUwQQCKQM3qrDe8c9BA==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.60.1.tgz", + "integrity": "sha512-mjCpF7GmkRtSJwon+Rq1N8+pI+8l7w5g9Z3vWj4T7abguC4Czwi3Yu/pFaLvA3TTeMVjnu3ctigusqWUfjZzvw==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.60.1.tgz", + "integrity": "sha512-haZ7hJ1JT4e9hqkoT9R/19XW2QKqjfJVv+i5AGg57S+nLk9lQnJ1F/eZloRO3o9Scy9CM3wQ9l+dkXtcBgN5Ew==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.60.1.tgz", + "integrity": "sha512-czw90wpQq3ZsAVBlinZjAYTKduOjTywlG7fEeWKUA7oCmpA8xdTkxZZlwNJKWqILlq0wehoZcJYfBvOyhPTQ6w==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.60.1.tgz", + "integrity": "sha512-KVB2rqsxTHuBtfOeySEyzEOB7ltlB/ux38iu2rBQzkjbwRVlkhAGIEDiiYnO2kFOkJp+Z7pUXKyrRRFuFUKt+g==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.60.1.tgz", + "integrity": "sha512-L+34Qqil+v5uC0zEubW7uByo78WOCIrBvci69E7sFASRl0X7b/MB6Cqd1lky/CtcSVTydWa2WZwFuWexjS5o6g==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.60.1.tgz", + "integrity": "sha512-n83O8rt4v34hgFzlkb1ycniJh7IR5RCIqt6mz1VRJD6pmhRi0CXdmfnLu9dIUS6buzh60IvACM842Ffb3xd6Gg==", + "cpu": [ + "arm" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.60.1.tgz", + "integrity": "sha512-Nql7sTeAzhTAja3QXeAI48+/+GjBJ+QmAH13snn0AJSNL50JsDqotyudHyMbO2RbJkskbMbFJfIJKWA6R1LCJQ==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.60.1.tgz", + "integrity": "sha512-+pUymDhd0ys9GcKZPPWlFiZ67sTWV5UU6zOJat02M1+PiuSGDziyRuI/pPue3hoUwm2uGfxdL+trT6Z9rxnlMA==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.60.1.tgz", + "integrity": "sha512-VSvgvQeIcsEvY4bKDHEDWcpW4Yw7BtlKG1GUT4FzBUlEKQK0rWHYBqQt6Fm2taXS+1bXvJT6kICu5ZwqKCnvlQ==", + "cpu": [ + "loong64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-musl": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.60.1.tgz", + "integrity": "sha512-4LqhUomJqwe641gsPp6xLfhqWMbQV04KtPp7/dIp0nzPxAkNY1AbwL5W0MQpcalLYk07vaW9Kp1PBhdpZYYcEw==", + "cpu": [ + "loong64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.60.1.tgz", + "integrity": "sha512-tLQQ9aPvkBxOc/EUT6j3pyeMD6Hb8QF2BTBnCQWP/uu1lhc9AIrIjKnLYMEroIz/JvtGYgI9dF3AxHZNaEH0rw==", + "cpu": [ + "ppc64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-musl": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.60.1.tgz", + "integrity": "sha512-RMxFhJwc9fSXP6PqmAz4cbv3kAyvD1etJFjTx4ONqFP9DkTkXsAMU4v3Vyc5BgzC+anz7nS/9tp4obsKfqkDHg==", + "cpu": [ + "ppc64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.60.1.tgz", + "integrity": "sha512-QKgFl+Yc1eEk6MmOBfRHYF6lTxiiiV3/z/BRrbSiW2I7AFTXoBFvdMEyglohPj//2mZS4hDOqeB0H1ACh3sBbg==", + "cpu": [ + "riscv64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.60.1.tgz", + "integrity": "sha512-RAjXjP/8c6ZtzatZcA1RaQr6O1TRhzC+adn8YZDnChliZHviqIjmvFwHcxi4JKPSDAt6Uhf/7vqcBzQJy0PDJg==", + "cpu": [ + "riscv64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.60.1.tgz", + "integrity": "sha512-wcuocpaOlaL1COBYiA89O6yfjlp3RwKDeTIA0hM7OpmhR1Bjo9j31G1uQVpDlTvwxGn2nQs65fBFL5UFd76FcQ==", + "cpu": [ + "s390x" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.60.1.tgz", + "integrity": "sha512-77PpsFQUCOiZR9+LQEFg9GClyfkNXj1MP6wRnzYs0EeWbPcHs02AXu4xuUbM1zhwn3wqaizle3AEYg5aeoohhg==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.60.1.tgz", + "integrity": "sha512-5cIATbk5vynAjqqmyBjlciMJl1+R/CwX9oLk/EyiFXDWd95KpHdrOJT//rnUl4cUcskrd0jCCw3wpZnhIHdD9w==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-openbsd-x64": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.60.1.tgz", + "integrity": "sha512-cl0w09WsCi17mcmWqqglez9Gk8isgeWvoUZ3WiJFYSR3zjBQc2J5/ihSjpl+VLjPqjQ/1hJRcqBfLjssREQILw==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "openbsd" + ] + }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.60.1.tgz", + "integrity": "sha512-4Cv23ZrONRbNtbZa37mLSueXUCtN7MXccChtKpUnQNgF010rjrjfHx3QxkS2PI7LqGT5xXyYs1a7LbzAwT0iCA==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "openharmony" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.60.1.tgz", + "integrity": "sha512-i1okWYkA4FJICtr7KpYzFpRTHgy5jdDbZiWfvny21iIKky5YExiDXP+zbXzm3dUcFpkEeYNHgQ5fuG236JPq0g==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.60.1.tgz", + "integrity": "sha512-u09m3CuwLzShA0EYKMNiFgcjjzwqtUMLmuCJLeZWjjOYA3IT2Di09KaxGBTP9xVztWyIWjVdsB2E9goMjZvTQg==", + "cpu": [ + "ia32" + ], + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.60.1.tgz", + "integrity": "sha512-k+600V9Zl1CM7eZxJgMyTUzmrmhB/0XZnF4pRypKAlAgxmedUA+1v9R+XOFv56W4SlHEzfeMtzujLJD22Uz5zg==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.60.1.tgz", + "integrity": "sha512-lWMnixq/QzxyhTV6NjQJ4SFo1J6PvOX8vUx5Wb4bBPsEb+8xZ89Bz6kOXpfXj9ak9AHTQVQzlgzBEc1SyM27xQ==", + "cpu": [ + "x64" + ], + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@shikijs/core": { + "version": "3.23.0", + "resolved": "https://registry.npmjs.org/@shikijs/core/-/core-3.23.0.tgz", + "integrity": "sha512-NSWQz0riNb67xthdm5br6lAkvpDJRTgB36fxlo37ZzM2yq0PQFFzbd8psqC2XMPgCzo1fW6cVi18+ArJ44wqgA==", + "dependencies": { + "@shikijs/types": "3.23.0", + "@shikijs/vscode-textmate": "^10.0.2", + "@types/hast": "^3.0.4", + "hast-util-to-html": "^9.0.5" + } + }, + "node_modules/@shikijs/engine-javascript": { + "version": "3.23.0", + "resolved": "https://registry.npmjs.org/@shikijs/engine-javascript/-/engine-javascript-3.23.0.tgz", + "integrity": "sha512-aHt9eiGFobmWR5uqJUViySI1bHMqrAgamWE1TYSUoftkAeCCAiGawPMwM+VCadylQtF4V3VNOZ5LmfItH5f3yA==", + "dependencies": { + "@shikijs/types": "3.23.0", + "@shikijs/vscode-textmate": "^10.0.2", + "oniguruma-to-es": "^4.3.4" + } + }, + "node_modules/@shikijs/engine-oniguruma": { + "version": "3.23.0", + "resolved": "https://registry.npmjs.org/@shikijs/engine-oniguruma/-/engine-oniguruma-3.23.0.tgz", + "integrity": "sha512-1nWINwKXxKKLqPibT5f4pAFLej9oZzQTsby8942OTlsJzOBZ0MWKiwzMsd+jhzu8YPCHAswGnnN1YtQfirL35g==", + "dependencies": { + "@shikijs/types": "3.23.0", + "@shikijs/vscode-textmate": "^10.0.2" + } + }, + "node_modules/@shikijs/langs": { + "version": "3.23.0", + "resolved": "https://registry.npmjs.org/@shikijs/langs/-/langs-3.23.0.tgz", + "integrity": "sha512-2Ep4W3Re5aB1/62RSYQInK9mM3HsLeB91cHqznAJMuylqjzNVAVCMnNWRHFtcNHXsoNRayP9z1qj4Sq3nMqYXg==", + "dependencies": { + "@shikijs/types": "3.23.0" + } + }, + "node_modules/@shikijs/themes": { + "version": "3.23.0", + "resolved": "https://registry.npmjs.org/@shikijs/themes/-/themes-3.23.0.tgz", + "integrity": "sha512-5qySYa1ZgAT18HR/ypENL9cUSGOeI2x+4IvYJu4JgVJdizn6kG4ia5Q1jDEOi7gTbN4RbuYtmHh0W3eccOrjMA==", + "dependencies": { + "@shikijs/types": "3.23.0" + } + }, + "node_modules/@shikijs/types": { + "version": "3.23.0", + "resolved": "https://registry.npmjs.org/@shikijs/types/-/types-3.23.0.tgz", + "integrity": "sha512-3JZ5HXOZfYjsYSk0yPwBrkupyYSLpAE26Qc0HLghhZNGTZg/SKxXIIgoxOpmmeQP0RRSDJTk1/vPfw9tbw+jSQ==", + "dependencies": { + "@shikijs/vscode-textmate": "^10.0.2", + "@types/hast": "^3.0.4" + } + }, + "node_modules/@shikijs/vscode-textmate": { + "version": "10.0.2", + "resolved": "https://registry.npmjs.org/@shikijs/vscode-textmate/-/vscode-textmate-10.0.2.tgz", + "integrity": "sha512-83yeghZ2xxin3Nj8z1NMd/NCuca+gsYXswywDy5bHvwlWL8tpTQmzGeUuHd9FC3E/SBEMvzJRwWEOz5gGes9Qg==" + }, + "node_modules/@types/babel__core": { + "version": "7.20.5", + "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz", + "integrity": "sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==", + "dependencies": { + "@babel/parser": "^7.20.7", + "@babel/types": "^7.20.7", + "@types/babel__generator": "*", + "@types/babel__template": "*", + "@types/babel__traverse": "*" + } + }, + "node_modules/@types/babel__generator": { + "version": "7.27.0", + "resolved": "https://registry.npmjs.org/@types/babel__generator/-/babel__generator-7.27.0.tgz", + "integrity": "sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==", + "dependencies": { + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__template": { + "version": "7.4.4", + "resolved": "https://registry.npmjs.org/@types/babel__template/-/babel__template-7.4.4.tgz", + "integrity": "sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==", + "dependencies": { + "@babel/parser": "^7.1.0", + "@babel/types": "^7.0.0" + } + }, + "node_modules/@types/babel__traverse": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@types/babel__traverse/-/babel__traverse-7.28.0.tgz", + "integrity": "sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==", + "dependencies": { + "@babel/types": "^7.28.2" + } + }, + "node_modules/@types/debug": { + "version": "4.1.13", + "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.13.tgz", + "integrity": "sha512-KSVgmQmzMwPlmtljOomayoR89W4FynCAi3E8PPs7vmDVPe84hT+vGPKkJfThkmXs0x0jAaa9U8uW8bbfyS2fWw==", + "dependencies": { + "@types/ms": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==" + }, + "node_modules/@types/estree-jsx": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree-jsx/-/estree-jsx-1.0.5.tgz", + "integrity": "sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/@types/hast": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-3.0.4.tgz", + "integrity": "sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/js-yaml": { + "version": "4.0.9", + "resolved": "https://registry.npmjs.org/@types/js-yaml/-/js-yaml-4.0.9.tgz", + "integrity": "sha512-k4MGaQl5TGo/iipqb2UDG2UwjXziSWkh0uysQelTlJpX1qGlpUZYm8PnO4DxG1qBomtJUdYJ6qR6xdIah10JLg==" + }, + "node_modules/@types/mdast": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-4.0.4.tgz", + "integrity": "sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/mdx": { + "version": "2.0.13", + "resolved": "https://registry.npmjs.org/@types/mdx/-/mdx-2.0.13.tgz", + "integrity": "sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw==" + }, + "node_modules/@types/ms": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@types/ms/-/ms-2.1.0.tgz", + "integrity": "sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==" + }, + "node_modules/@types/nlcst": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@types/nlcst/-/nlcst-2.0.3.tgz", + "integrity": "sha512-vSYNSDe6Ix3q+6Z7ri9lyWqgGhJTmzRjZRqyq15N0Z/1/UnVsno9G/N40NBijoYx2seFDIl0+B2mgAb9mezUCA==", + "dependencies": { + "@types/unist": "*" + } + }, + "node_modules/@types/node": { + "version": "25.5.2", + "resolved": "https://registry.npmjs.org/@types/node/-/node-25.5.2.tgz", + "integrity": "sha512-tO4ZIRKNC+MDWV4qKVZe3Ql/woTnmHDr5JD8UI5hn2pwBrHEwOEMZK7WlNb5RKB6EoJ02gwmQS9OrjuFnZYdpg==", + "dependencies": { + "undici-types": "~7.18.0" + } + }, + "node_modules/@types/prop-types": { + "version": "15.7.15", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.15.tgz", + "integrity": "sha512-F6bEyamV9jKGAFBEmlQnesRPGOQqS2+Uwi0Em15xenOxHaf2hv6L8YCVn3rPdPJOiJfPiCnLIRyvwVaqMY3MIw==" + }, + "node_modules/@types/react": { + "version": "18.3.28", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.3.28.tgz", + "integrity": "sha512-z9VXpC7MWrhfWipitjNdgCauoMLRdIILQsAEV+ZesIzBq/oUlxk0m3ApZuMFCXdnS4U7KrI+l3WRUEGQ8K1QKw==", + "dependencies": { + "@types/prop-types": "*", + "csstype": "^3.2.2" + } + }, + "node_modules/@types/react-dom": { + "version": "18.3.7", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.3.7.tgz", + "integrity": "sha512-MEe3UeoENYVFXzoXEWsvcpg6ZvlrFNlOQ7EOsvhI3CfAXwzPfO8Qwuxd40nepsYKqyyVQnTdEfv68q91yLcKrQ==", + "peerDependencies": { + "@types/react": "^18.0.0" + } + }, + "node_modules/@types/sax": { + "version": "1.2.7", + "resolved": "https://registry.npmjs.org/@types/sax/-/sax-1.2.7.tgz", + "integrity": "sha512-rO73L89PJxeYM3s3pPPjiPgVVcymqU490g0YO5n5By0k2Erzj6tay/4lr1CHAAU4JyOWd1rpQ8bCf6cZfHU96A==", + "dependencies": { + "@types/node": "*" + } + }, + "node_modules/@types/unist": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-3.0.3.tgz", + "integrity": "sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==" + }, + "node_modules/@ungap/structured-clone": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz", + "integrity": "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==" + }, + "node_modules/@vitejs/plugin-react": { + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react/-/plugin-react-4.7.0.tgz", + "integrity": "sha512-gUu9hwfWvvEDBBmgtAowQCojwZmJ5mcLn3aufeCsitijs3+f2NsrPtlAWIR6OPiqljl96GVCUbLe0HyqIpVaoA==", + "dependencies": { + "@babel/core": "^7.28.0", + "@babel/plugin-transform-react-jsx-self": "^7.27.1", + "@babel/plugin-transform-react-jsx-source": "^7.27.1", + "@rolldown/pluginutils": "1.0.0-beta.27", + "@types/babel__core": "^7.20.5", + "react-refresh": "^0.17.0" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "peerDependencies": { + "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0" + } + }, + "node_modules/acorn": { + "version": "8.16.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.16.0.tgz", + "integrity": "sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw==", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/ansi-align": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz", + "integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==", + "dependencies": { + "string-width": "^4.1.0" + } + }, + "node_modules/ansi-align/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-align/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/ansi-align/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-align/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/ansi-styles": { + "version": "6.2.3", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", + "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/anymatch/node_modules/picomatch": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.2.tgz", + "integrity": "sha512-V7+vQEJ06Z+c5tSye8S+nHUfI51xoXIXjHQ99cQtKUkQqqO1kO/KCJUfZXuB47h/YBlDhah2H3hdUGXn8ie0oA==", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/arg": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==" + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==" + }, + "node_modules/aria-query": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/aria-query/-/aria-query-5.3.2.tgz", + "integrity": "sha512-COROpnaoap1E2F000S62r6A60uHZnmlvomhfyT2DlTcrY1OrBKn2UhH7qn5wTC9zMvD0AY7csdPSNwKP+7WiQw==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/array-iterate": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/array-iterate/-/array-iterate-2.0.1.tgz", + "integrity": "sha512-I1jXZMjAgCMmxT4qxXfPXa6SthSoE8h6gkSI9BGGNv8mP8G/v0blc+qFnZu6K42vTOiuME596QaLO0TP3Lk0xg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/astring": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/astring/-/astring-1.9.0.tgz", + "integrity": "sha512-LElXdjswlqjWrPpJFg1Fx4wpkOCxj1TDHlSV4PlaRxHGWko024xICaa97ZkMfs6DRKlCguiAI+rbXv5GWwXIkg==", + "bin": { + "astring": "bin/astring" + } + }, + "node_modules/astro": { + "version": "5.6.2", + "resolved": "https://registry.npmjs.org/astro/-/astro-5.6.2.tgz", + "integrity": "sha512-Ds4x3Au2s+YfBKLXY2HCHird+73ff4wTds+cuAGqaHmY4GR+Gc+sCAP54Mq6blkpuFqXwqjPUAcmxvAwovJPGQ==", + "dependencies": { + "@astrojs/compiler": "^2.11.0", + "@astrojs/internal-helpers": "0.6.1", + "@astrojs/markdown-remark": "6.3.1", + "@astrojs/telemetry": "3.2.0", + "@oslojs/encoding": "^1.1.0", + "@rollup/pluginutils": "^5.1.4", + "acorn": "^8.14.1", + "aria-query": "^5.3.2", + "axobject-query": "^4.1.0", + "boxen": "8.0.1", + "ci-info": "^4.2.0", + "clsx": "^2.1.1", + "common-ancestor-path": "^1.0.1", + "cookie": "^1.0.2", + "cssesc": "^3.0.0", + "debug": "^4.4.0", + "deterministic-object-hash": "^2.0.2", + "devalue": "^5.1.1", + "diff": "^5.2.0", + "dlv": "^1.1.3", + "dset": "^3.1.4", + "es-module-lexer": "^1.6.0", + "esbuild": "^0.25.0", + "estree-walker": "^3.0.3", + "flattie": "^1.1.1", + "github-slugger": "^2.0.0", + "html-escaper": "3.0.3", + "http-cache-semantics": "^4.1.1", + "js-yaml": "^4.1.0", + "kleur": "^4.1.5", + "magic-string": "^0.30.17", + "magicast": "^0.3.5", + "mrmime": "^2.0.1", + "neotraverse": "^0.6.18", + "p-limit": "^6.2.0", + "p-queue": "^8.1.0", + "package-manager-detector": "^1.1.0", + "picomatch": "^4.0.2", + "prompts": "^2.4.2", + "rehype": "^13.0.2", + "semver": "^7.7.1", + "shiki": "^3.2.1", + "tinyexec": "^0.3.2", + "tinyglobby": "^0.2.12", + "tsconfck": "^3.1.5", + "ultrahtml": "^1.6.0", + "unist-util-visit": "^5.0.0", + "unstorage": "^1.15.0", + "vfile": "^6.0.3", + "vite": "^6.2.6", + "vitefu": "^1.0.6", + "xxhash-wasm": "^1.1.0", + "yargs-parser": "^21.1.1", + "yocto-spinner": "^0.2.1", + "zod": "^3.24.2", + "zod-to-json-schema": "^3.24.5", + "zod-to-ts": "^1.2.0" + }, + "bin": { + "astro": "astro.js" + }, + "engines": { + "node": "^18.17.1 || ^20.3.0 || >=22.0.0", + "npm": ">=9.6.5", + "pnpm": ">=7.1.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/astrodotbuild" + }, + "optionalDependencies": { + "sharp": "^0.33.3" + } + }, + "node_modules/astro-expressive-code": { + "version": "0.40.2", + "resolved": "https://registry.npmjs.org/astro-expressive-code/-/astro-expressive-code-0.40.2.tgz", + "integrity": "sha512-yJMQId0yXSAbW9I6yqvJ3FcjKzJ8zRL7elbJbllkv1ZJPlsI0NI83Pxn1YL1IapEM347EvOOkSW2GL+2+NO61w==", + "dependencies": { + "rehype-expressive-code": "^0.40.2" + }, + "peerDependencies": { + "astro": "^4.0.0-beta || ^5.0.0-beta || ^3.3.0" + } + }, + "node_modules/astro/node_modules/@astrojs/internal-helpers": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/@astrojs/internal-helpers/-/internal-helpers-0.6.1.tgz", + "integrity": "sha512-l5Pqf6uZu31aG+3Lv8nl/3s4DbUzdlxTWDof4pEpto6GUJNhhCbelVi9dEyurOVyqaelwmS9oSyOWOENSfgo9A==" + }, + "node_modules/astro/node_modules/@astrojs/markdown-remark": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/@astrojs/markdown-remark/-/markdown-remark-6.3.1.tgz", + "integrity": "sha512-c5F5gGrkczUaTVgmMW9g1YMJGzOtRvjjhw6IfGuxarM6ct09MpwysP10US729dy07gg8y+ofVifezvP3BNsWZg==", + "dependencies": { + "@astrojs/internal-helpers": "0.6.1", + "@astrojs/prism": "3.2.0", + "github-slugger": "^2.0.0", + "hast-util-from-html": "^2.0.3", + "hast-util-to-text": "^4.0.2", + "import-meta-resolve": "^4.1.0", + "js-yaml": "^4.1.0", + "mdast-util-definitions": "^6.0.0", + "rehype-raw": "^7.0.0", + "rehype-stringify": "^10.0.1", + "remark-gfm": "^4.0.1", + "remark-parse": "^11.0.0", + "remark-rehype": "^11.1.1", + "remark-smartypants": "^3.0.2", + "shiki": "^3.0.0", + "smol-toml": "^1.3.1", + "unified": "^11.0.5", + "unist-util-remove-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "unist-util-visit-parents": "^6.0.1", + "vfile": "^6.0.3" + } + }, + "node_modules/astro/node_modules/@astrojs/prism": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/@astrojs/prism/-/prism-3.2.0.tgz", + "integrity": "sha512-GilTHKGCW6HMq7y3BUv9Ac7GMe/MO9gi9GW62GzKtth0SwukCu/qp2wLiGpEujhY+VVhaG9v7kv/5vFzvf4NYw==", + "dependencies": { + "prismjs": "^1.29.0" + }, + "engines": { + "node": "^18.17.1 || ^20.3.0 || >=22.0.0" + } + }, + "node_modules/astro/node_modules/lru-cache": { + "version": "11.3.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.3.3.tgz", + "integrity": "sha512-JvNw9Y81y33E+BEYPr0U7omo+U9AySnsMsEiXgwT6yqd31VQWTLNQqmT4ou5eqPFUrTfIDFta2wKhB1hyohtAQ==", + "engines": { + "node": "20 || >=22" + } + }, + "node_modules/astro/node_modules/semver": { + "version": "7.7.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.4.tgz", + "integrity": "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/astro/node_modules/unstorage": { + "version": "1.17.5", + "resolved": "https://registry.npmjs.org/unstorage/-/unstorage-1.17.5.tgz", + "integrity": "sha512-0i3iqvRfx29hkNntHyQvJTpf5W9dQ9ZadSoRU8+xVlhVtT7jAX57fazYO9EHvcRCfBCyi5YRya7XCDOsbTgkPg==", + "dependencies": { + "anymatch": "^3.1.3", + "chokidar": "^5.0.0", + "destr": "^2.0.5", + "h3": "^1.15.10", + "lru-cache": "^11.2.7", + "node-fetch-native": "^1.6.7", + "ofetch": "^1.5.1", + "ufo": "^1.6.3" + }, + "peerDependencies": { + "@azure/app-configuration": "^1.8.0", + "@azure/cosmos": "^4.2.0", + "@azure/data-tables": "^13.3.0", + "@azure/identity": "^4.6.0", + "@azure/keyvault-secrets": "^4.9.0", + "@azure/storage-blob": "^12.26.0", + "@capacitor/preferences": "^6 || ^7 || ^8", + "@deno/kv": ">=0.9.0", + "@netlify/blobs": "^6.5.0 || ^7.0.0 || ^8.1.0 || ^9.0.0 || ^10.0.0", + "@planetscale/database": "^1.19.0", + "@upstash/redis": "^1.34.3", + "@vercel/blob": ">=0.27.1", + "@vercel/functions": "^2.2.12 || ^3.0.0", + "@vercel/kv": "^1 || ^2 || ^3", + "aws4fetch": "^1.0.20", + "db0": ">=0.2.1", + "idb-keyval": "^6.2.1", + "ioredis": "^5.4.2", + "uploadthing": "^7.4.4" + }, + "peerDependenciesMeta": { + "@azure/app-configuration": { + "optional": true + }, + "@azure/cosmos": { + "optional": true + }, + "@azure/data-tables": { + "optional": true + }, + "@azure/identity": { + "optional": true + }, + "@azure/keyvault-secrets": { + "optional": true + }, + "@azure/storage-blob": { + "optional": true + }, + "@capacitor/preferences": { + "optional": true + }, + "@deno/kv": { + "optional": true + }, + "@netlify/blobs": { + "optional": true + }, + "@planetscale/database": { + "optional": true + }, + "@upstash/redis": { + "optional": true + }, + "@vercel/blob": { + "optional": true + }, + "@vercel/functions": { + "optional": true + }, + "@vercel/kv": { + "optional": true + }, + "aws4fetch": { + "optional": true + }, + "db0": { + "optional": true + }, + "idb-keyval": { + "optional": true + }, + "ioredis": { + "optional": true + }, + "uploadthing": { + "optional": true + } + } + }, + "node_modules/axobject-query": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/axobject-query/-/axobject-query-4.1.0.tgz", + "integrity": "sha512-qIj0G9wZbMGNLjLmg1PT6v2mE9AH2zlnADJD/2tC6E00hgmhUOfEB6greHPAfLRSufHqROIUTkw6E+M3lH0PTQ==", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/bail": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz", + "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/base-64": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/base-64/-/base-64-1.0.0.tgz", + "integrity": "sha512-kwDPIFCGx0NZHog36dj+tHiwP4QMzsZ3AgMViUBKI0+V5n4U0ufTCUMhnQ04diaRI8EX/QcPfql7zlhZ7j4zgg==" + }, + "node_modules/baseline-browser-mapping": { + "version": "2.10.16", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.10.16.tgz", + "integrity": "sha512-Lyf3aK28zpsD1yQMiiHD4RvVb6UdMoo8xzG2XzFIfR9luPzOpcBlAsT/qfB1XWS1bxWT+UtE4WmQgsp297FYOA==", + "bin": { + "baseline-browser-mapping": "dist/cli.cjs" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/bcp-47": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/bcp-47/-/bcp-47-2.1.0.tgz", + "integrity": "sha512-9IIS3UPrvIa1Ej+lVDdDwO7zLehjqsaByECw0bu2RRGP73jALm6FYbzI5gWbgHLvNdkvfXB5YrSbocZdOS0c0w==", + "dependencies": { + "is-alphabetical": "^2.0.0", + "is-alphanumerical": "^2.0.0", + "is-decimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/bcp-47-match": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/bcp-47-match/-/bcp-47-match-2.0.3.tgz", + "integrity": "sha512-JtTezzbAibu8G0R9op9zb3vcWZd9JF6M0xOYGPn0fNCd7wOpRB1mU2mH9T8gaBGbAAyIIVgB2G7xG0GP98zMAQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/boolbase": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", + "integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==" + }, + "node_modules/boxen": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/boxen/-/boxen-8.0.1.tgz", + "integrity": "sha512-F3PH5k5juxom4xktynS7MoFY+NUWH5LC4CnH11YB8NPew+HLpmBLCybSAEyb2F+4pRXhuhWqFesoQd6DAyc2hw==", + "dependencies": { + "ansi-align": "^3.0.1", + "camelcase": "^8.0.0", + "chalk": "^5.3.0", + "cli-boxes": "^3.0.0", + "string-width": "^7.2.0", + "type-fest": "^4.21.0", + "widest-line": "^5.0.0", + "wrap-ansi": "^9.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/browserslist": { + "version": "4.28.2", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.2.tgz", + "integrity": "sha512-48xSriZYYg+8qXna9kwqjIVzuQxi+KYWp2+5nCYnYKPTr0LvD89Jqk2Or5ogxz0NUMfIjhh2lIUX/LyX9B4oIg==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "baseline-browser-mapping": "^2.10.12", + "caniuse-lite": "^1.0.30001782", + "electron-to-chromium": "^1.5.328", + "node-releases": "^2.0.36", + "update-browserslist-db": "^1.2.3" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/camelcase": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-8.0.0.tgz", + "integrity": "sha512-8WB3Jcas3swSvjIeA2yvCJ+Miyz5l1ZmB6HFb9R1317dt9LCQoswg/BGrmAmkWVEszSrrg4RwmO46qIm2OEnSA==", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001787", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001787.tgz", + "integrity": "sha512-mNcrMN9KeI68u7muanUpEejSLghOKlVhRqS/Za2IeyGllJ9I9otGpR9g3nsw7n4W378TE/LyIteA0+/FOZm4Kg==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ] + }, + "node_modules/ccount": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz", + "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/chalk": { + "version": "5.6.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.6.2.tgz", + "integrity": "sha512-7NzBL0rN6fMUW+f7A6Io4h40qQlG+xGmtMxfbnH/K7TAtt8JQWVQK+6g0UXKMeVJoyV5EkkNsErQ8pVD3bLHbA==", + "engines": { + "node": "^12.17.0 || ^14.13 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/character-entities": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz", + "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-html4": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-2.1.0.tgz", + "integrity": "sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-legacy": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-3.0.0.tgz", + "integrity": "sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-reference-invalid": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-2.0.1.tgz", + "integrity": "sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/chokidar": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-5.0.0.tgz", + "integrity": "sha512-TQMmc3w+5AxjpL8iIiwebF73dRDF4fBIieAqGn9RGCWaEVwQ6Fb2cGe31Yns0RRIzii5goJ1Y7xbMwo1TxMplw==", + "dependencies": { + "readdirp": "^5.0.0" + }, + "engines": { + "node": ">= 20.19.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/ci-info": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-4.4.0.tgz", + "integrity": "sha512-77PSwercCZU2Fc4sX94eF8k8Pxte6JAwL4/ICZLFjJLqegs7kCuAsqqj/70NQF6TvDpgFjkubQB2FW2ZZddvQg==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "engines": { + "node": ">=8" + } + }, + "node_modules/cli-boxes": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-3.0.0.tgz", + "integrity": "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/clsx": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", + "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/collapse-white-space": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/collapse-white-space/-/collapse-white-space-2.1.0.tgz", + "integrity": "sha512-loKTxY1zCOuG4j9f6EPnuyyYkf58RnhhWTvRoZEokgB+WbdXehfjFviyOVYkqzEWz1Q5kRiZdBYS5SwxbQYwzw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/color": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/color/-/color-4.2.3.tgz", + "integrity": "sha512-1rXeuUUiGGrykh+CeBdu5Ie7OJwinCgQY0bc7GCRxy5xVHy+moaqkpL/jqQq0MtQOeYcrqEz4abc5f0KtU7W4A==", + "dependencies": { + "color-convert": "^2.0.1", + "color-string": "^1.9.0" + }, + "engines": { + "node": ">=12.5.0" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/color-string": { + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/color-string/-/color-string-1.9.1.tgz", + "integrity": "sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg==", + "dependencies": { + "color-name": "^1.0.0", + "simple-swizzle": "^0.2.2" + } + }, + "node_modules/comma-separated-tokens": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz", + "integrity": "sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/common-ancestor-path": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/common-ancestor-path/-/common-ancestor-path-1.0.1.tgz", + "integrity": "sha512-L3sHRo1pXXEqX8VU28kfgUY+YGsk09hPqZiZmLacNib6XNTCM8ubYeT7ryXQw8asB1sKgcU5lkB7ONug08aB8w==" + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==" + }, + "node_modules/cookie": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-1.1.1.tgz", + "integrity": "sha512-ei8Aos7ja0weRpFzJnEA9UHJ/7XQmqglbRwnf2ATjcB9Wq874VKH9kfjjirM6UhU2/E5fFYadylyhFldcqSidQ==", + "engines": { + "node": ">=18" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" + } + }, + "node_modules/cookie-es": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/cookie-es/-/cookie-es-1.2.3.tgz", + "integrity": "sha512-lXVyvUvrNXblMqzIRrxHb57UUVmqsSWlxqt3XIjCkUP0wDAf6uicO6KMbEgYrMNtEvWgWHwe42CKxPu9MYAnWw==" + }, + "node_modules/crossws": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/crossws/-/crossws-0.3.5.tgz", + "integrity": "sha512-ojKiDvcmByhwa8YYqbQI/hg7MEU0NC03+pSdEq4ZUnZR9xXpwk7E43SMNGkn+JxJGPFtNvQ48+vV2p+P1ml5PA==", + "dependencies": { + "uncrypto": "^0.1.3" + } + }, + "node_modules/css-selector-parser": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/css-selector-parser/-/css-selector-parser-3.3.0.tgz", + "integrity": "sha512-Y2asgMGFqJKF4fq4xHDSlFYIkeVfRsm69lQC1q9kbEsH5XtnINTMrweLkjYMeaUgiXBy/uvKeO/a1JHTNnmB2g==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/mdevils" + }, + { + "type": "patreon", + "url": "https://patreon.com/mdevils" + } + ] + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/csstype": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", + "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==" + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decode-named-character-reference": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.3.0.tgz", + "integrity": "sha512-GtpQYB283KrPp6nRw50q3U9/VfOutZOe103qlN7BPP6Ad27xYnOIWv4lPzo8HCAL+mMZofJ9KEy30fq6MfaK6Q==", + "dependencies": { + "character-entities": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/defu": { + "version": "6.1.7", + "resolved": "https://registry.npmjs.org/defu/-/defu-6.1.7.tgz", + "integrity": "sha512-7z22QmUWiQ/2d0KkdYmANbRUVABpZ9SNYyH5vx6PZ+nE5bcC0l7uFvEfHlyld/HcGBFTL536ClDt3DEcSlEJAQ==" + }, + "node_modules/dequal": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz", + "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/destr": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/destr/-/destr-2.0.5.tgz", + "integrity": "sha512-ugFTXCtDZunbzasqBxrK93Ik/DRYsO6S/fedkWEMKqt04xZ4csmnmwGDBAb07QWNaGMAmnTIemsYZCksjATwsA==" + }, + "node_modules/detect-libc": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.1.2.tgz", + "integrity": "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/deterministic-object-hash": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/deterministic-object-hash/-/deterministic-object-hash-2.0.2.tgz", + "integrity": "sha512-KxektNH63SrbfUyDiwXqRb1rLwKt33AmMv+5Nhsw1kqZ13SJBRTgZHtGbE+hH3a1mVW1cz+4pqSWVPAtLVXTzQ==", + "dependencies": { + "base-64": "^1.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/devalue": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/devalue/-/devalue-5.7.1.tgz", + "integrity": "sha512-MUbZ586EgQqdRnC4yDrlod3BEdyvE4TapGYHMW2CiaW+KkkFmWEFqBUaLltEZCGi0iFXCEjRF0OjF0DV2QHjOA==" + }, + "node_modules/devlop": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/devlop/-/devlop-1.1.0.tgz", + "integrity": "sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==", + "dependencies": { + "dequal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/diff": { + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/diff/-/diff-5.2.2.tgz", + "integrity": "sha512-vtcDfH3TOjP8UekytvnHH1o1P4FcUdt4eQ1Y+Abap1tk/OB2MWQvcwS2ClCd1zuIhc3JKOx6p3kod8Vfys3E+A==", + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/direction": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/direction/-/direction-2.0.1.tgz", + "integrity": "sha512-9S6m9Sukh1cZNknO1CWAr2QAWsbKLafQiyM5gZ7VgXHeuaoUwffKN4q6NC4A/Mf9iiPlOXQEKW/Mv/mh9/3YFA==", + "bin": { + "direction": "cli.js" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/dlv": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz", + "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==" + }, + "node_modules/dset": { + "version": "3.1.4", + "resolved": "https://registry.npmjs.org/dset/-/dset-3.1.4.tgz", + "integrity": "sha512-2QF/g9/zTaPDc3BjNcVTGoBbXBgYfMTTceLaYcFJ/W9kggFUkhxD/hMEeuLKbugyef9SqAx8cpgwlIP/jinUTA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.5.334", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.334.tgz", + "integrity": "sha512-mgjZAz7Jyx1SRCwEpy9wefDS7GvNPazLthHg8eQMJ76wBdGQQDW33TCrUTvQ4wzpmOrv2zrFoD3oNufMdyMpog==" + }, + "node_modules/emoji-regex": { + "version": "10.6.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.6.0.tgz", + "integrity": "sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A==" + }, + "node_modules/emoji-regex-xs": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex-xs/-/emoji-regex-xs-1.0.0.tgz", + "integrity": "sha512-LRlerrMYoIDrT6jgpeZ2YYl/L8EulRTt5hQcYjy5AInh7HWXKimpqx68aknBFpGL2+/IcogTcaydJEgaTmOpDg==" + }, + "node_modules/entities": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/entities/-/entities-6.0.1.tgz", + "integrity": "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/es-module-lexer": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", + "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==" + }, + "node_modules/esast-util-from-estree": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/esast-util-from-estree/-/esast-util-from-estree-2.0.0.tgz", + "integrity": "sha512-4CyanoAudUSBAn5K13H4JhsMH6L9ZP7XbLVe/dKybkxMO7eDyLsT8UHl9TRNrU2Gr9nz+FovfSIjuXWJ81uVwQ==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "devlop": "^1.0.0", + "estree-util-visit": "^2.0.0", + "unist-util-position-from-estree": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/esast-util-from-js": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/esast-util-from-js/-/esast-util-from-js-2.0.1.tgz", + "integrity": "sha512-8Ja+rNJ0Lt56Pcf3TAmpBZjmx8ZcK5Ts4cAzIOjsjevg9oSXJnl6SUQ2EevU8tv3h6ZLWmoKL5H4fgWvdvfETw==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "acorn": "^8.0.0", + "esast-util-from-estree": "^2.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/esbuild": { + "version": "0.25.12", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.25.12.tgz", + "integrity": "sha512-bbPBYYrtZbkt6Os6FiTLCTFxvq4tt3JKall1vRwshA3fdVztsLAatFaZobhkBC8/BrPetoa0oksYoKXoG4ryJg==", + "hasInstallScript": true, + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=18" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.25.12", + "@esbuild/android-arm": "0.25.12", + "@esbuild/android-arm64": "0.25.12", + "@esbuild/android-x64": "0.25.12", + "@esbuild/darwin-arm64": "0.25.12", + "@esbuild/darwin-x64": "0.25.12", + "@esbuild/freebsd-arm64": "0.25.12", + "@esbuild/freebsd-x64": "0.25.12", + "@esbuild/linux-arm": "0.25.12", + "@esbuild/linux-arm64": "0.25.12", + "@esbuild/linux-ia32": "0.25.12", + "@esbuild/linux-loong64": "0.25.12", + "@esbuild/linux-mips64el": "0.25.12", + "@esbuild/linux-ppc64": "0.25.12", + "@esbuild/linux-riscv64": "0.25.12", + "@esbuild/linux-s390x": "0.25.12", + "@esbuild/linux-x64": "0.25.12", + "@esbuild/netbsd-arm64": "0.25.12", + "@esbuild/netbsd-x64": "0.25.12", + "@esbuild/openbsd-arm64": "0.25.12", + "@esbuild/openbsd-x64": "0.25.12", + "@esbuild/openharmony-arm64": "0.25.12", + "@esbuild/sunos-x64": "0.25.12", + "@esbuild/win32-arm64": "0.25.12", + "@esbuild/win32-ia32": "0.25.12", + "@esbuild/win32-x64": "0.25.12" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz", + "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/estree-util-attach-comments": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/estree-util-attach-comments/-/estree-util-attach-comments-3.0.0.tgz", + "integrity": "sha512-cKUwm/HUcTDsYh/9FgnuFqpfquUbwIqwKM26BVCGDPVgvaCl/nDCCjUfiLlx6lsEZ3Z4RFxNbOQ60pkaEwFxGw==", + "dependencies": { + "@types/estree": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-build-jsx": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/estree-util-build-jsx/-/estree-util-build-jsx-3.0.1.tgz", + "integrity": "sha512-8U5eiL6BTrPxp/CHbs2yMgP8ftMhR5ww1eIKoWRMlqvltHF8fZn5LRDvTKuxD3DUn+shRbLGqXemcP51oFCsGQ==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "estree-walker": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-is-identifier-name": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/estree-util-is-identifier-name/-/estree-util-is-identifier-name-3.0.0.tgz", + "integrity": "sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-scope": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/estree-util-scope/-/estree-util-scope-1.0.0.tgz", + "integrity": "sha512-2CAASclonf+JFWBNJPndcOpA8EMJwa0Q8LUFJEKqXLW6+qBvbFZuF5gItbQOs/umBUkjviCSDCbBwU2cXbmrhQ==", + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-to-js": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/estree-util-to-js/-/estree-util-to-js-2.0.0.tgz", + "integrity": "sha512-WDF+xj5rRWmD5tj6bIqRi6CkLIXbbNQUcxQHzGysQzvHmdYG2G7p/Tf0J0gpxGgkeMZNTIjT/AoSvC9Xehcgdg==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "astring": "^1.8.0", + "source-map": "^0.7.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-util-visit": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/estree-util-visit/-/estree-util-visit-2.0.0.tgz", + "integrity": "sha512-m5KgiH85xAhhW8Wta0vShLcUvOsh3LLPI2YVwcbio1l7E09NTLL1EyMZFM1OyWowoH0skScNbhOPl4kcBgzTww==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/eventemitter3": { + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-5.0.4.tgz", + "integrity": "sha512-mlsTRyGaPBjPedk6Bvw+aqbsXDtoAyAzm5MO7JgU+yVRyMQ5O8bD4Kcci7BS85f93veegeCPkL8R4GLClnjLFw==" + }, + "node_modules/expressive-code": { + "version": "0.40.2", + "resolved": "https://registry.npmjs.org/expressive-code/-/expressive-code-0.40.2.tgz", + "integrity": "sha512-1zIda2rB0qiDZACawzw2rbdBQiWHBT56uBctS+ezFe5XMAaFaHLnnSYND/Kd+dVzO9HfCXRDpzH3d+3fvOWRcw==", + "dependencies": { + "@expressive-code/core": "^0.40.2", + "@expressive-code/plugin-frames": "^0.40.2", + "@expressive-code/plugin-shiki": "^0.40.2", + "@expressive-code/plugin-text-markers": "^0.40.2" + } + }, + "node_modules/extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" + }, + "node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/flattie": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/flattie/-/flattie-1.1.1.tgz", + "integrity": "sha512-9UbaD6XdAL97+k/n+N7JwX46K/M6Zc6KcFYskrYL8wbBV/Uyk0CTAMY0VT+qiK5PM7AIc9aTWYtq65U7T+aCNQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "hasInstallScript": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-east-asian-width": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/get-east-asian-width/-/get-east-asian-width-1.5.0.tgz", + "integrity": "sha512-CQ+bEO+Tva/qlmw24dCejulK5pMzVnUOFOijVogd3KQs07HnRIgp8TGipvCCRT06xeYEbpbgwaCxglFyiuIcmA==", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/github-slugger": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-2.0.0.tgz", + "integrity": "sha512-IaOQ9puYtjrkq7Y0Ygl9KDZnrf/aiUJYUpVf89y8kyaxbRG7Y1SrX/jaumrv81vc61+kiMempujsM3Yw7w5qcw==" + }, + "node_modules/h3": { + "version": "1.15.11", + "resolved": "https://registry.npmjs.org/h3/-/h3-1.15.11.tgz", + "integrity": "sha512-L3THSe2MPeBwgIZVSH5zLdBBU90TOxarvhK9d04IDY2AmVS8j2Jz2LIWtwsGOU3lu2I5jCN7FNvVfY2+XyF+mg==", + "dependencies": { + "cookie-es": "^1.2.3", + "crossws": "^0.3.5", + "defu": "^6.1.6", + "destr": "^2.0.5", + "iron-webcrypto": "^1.2.1", + "node-mock-http": "^1.0.4", + "radix3": "^1.1.2", + "ufo": "^1.6.3", + "uncrypto": "^0.1.3" + } + }, + "node_modules/hast-util-embedded": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hast-util-embedded/-/hast-util-embedded-3.0.0.tgz", + "integrity": "sha512-naH8sld4Pe2ep03qqULEtvYr7EjrLK2QHY8KJR6RJkTUjPGObe1vnx585uzem2hGra+s1q08DZZpfgDVYRbaXA==", + "dependencies": { + "@types/hast": "^3.0.0", + "hast-util-is-element": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-format": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/hast-util-format/-/hast-util-format-1.1.0.tgz", + "integrity": "sha512-yY1UDz6bC9rDvCWHpx12aIBGRG7krurX0p0Fm6pT547LwDIZZiNr8a+IHDogorAdreULSEzP82Nlv5SZkHZcjA==", + "dependencies": { + "@types/hast": "^3.0.0", + "hast-util-embedded": "^3.0.0", + "hast-util-minify-whitespace": "^1.0.0", + "hast-util-phrasing": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "html-whitespace-sensitive-tag-names": "^3.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-from-html": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/hast-util-from-html/-/hast-util-from-html-2.0.3.tgz", + "integrity": "sha512-CUSRHXyKjzHov8yKsQjGOElXy/3EKpyX56ELnkHH34vDVw1N1XSQ1ZcAvTyAPtGqLTuKP/uxM+aLkSPqF/EtMw==", + "dependencies": { + "@types/hast": "^3.0.0", + "devlop": "^1.1.0", + "hast-util-from-parse5": "^8.0.0", + "parse5": "^7.0.0", + "vfile": "^6.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-from-parse5": { + "version": "8.0.3", + "resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-8.0.3.tgz", + "integrity": "sha512-3kxEVkEKt0zvcZ3hCRYI8rqrgwtlIOFMWkbclACvjlDw8Li9S2hk/d51OI0nr/gIpdMHNepwgOKqZ/sy0Clpyg==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "devlop": "^1.0.0", + "hastscript": "^9.0.0", + "property-information": "^7.0.0", + "vfile": "^6.0.0", + "vfile-location": "^5.0.0", + "web-namespaces": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-has-property": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hast-util-has-property/-/hast-util-has-property-3.0.0.tgz", + "integrity": "sha512-MNilsvEKLFpV604hwfhVStK0usFY/QmM5zX16bo7EjnAEGofr5YyI37kzopBlZJkHD4t887i+q/C8/tr5Q94cA==", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-is-body-ok-link": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/hast-util-is-body-ok-link/-/hast-util-is-body-ok-link-3.0.1.tgz", + "integrity": "sha512-0qpnzOBLztXHbHQenVB8uNuxTnm/QBFUOmdOSsEn7GnBtyY07+ENTWVFBAnXd/zEgd9/SUG3lRY7hSIBWRgGpQ==", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-is-element": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hast-util-is-element/-/hast-util-is-element-3.0.0.tgz", + "integrity": "sha512-Val9mnv2IWpLbNPqc/pUem+a7Ipj2aHacCwgNfTiK0vJKl0LF+4Ba4+v1oPHFpf3bLYmreq0/l3Gud9S5OH42g==", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-minify-whitespace": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/hast-util-minify-whitespace/-/hast-util-minify-whitespace-1.0.1.tgz", + "integrity": "sha512-L96fPOVpnclQE0xzdWb/D12VT5FabA7SnZOUMtL1DbXmYiHJMXZvFkIZfiMmTCNJHUeO2K9UYNXoVyfz+QHuOw==", + "dependencies": { + "@types/hast": "^3.0.0", + "hast-util-embedded": "^3.0.0", + "hast-util-is-element": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-parse-selector": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-4.0.0.tgz", + "integrity": "sha512-wkQCkSYoOGCRKERFWcxMVMOcYE2K1AaNLU8DXS9arxnLOUEWbOXKXiJUNzEpqZ3JOKpnha3jkFrumEjVliDe7A==", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-phrasing": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/hast-util-phrasing/-/hast-util-phrasing-3.0.1.tgz", + "integrity": "sha512-6h60VfI3uBQUxHqTyMymMZnEbNl1XmEGtOxxKYL7stY2o601COo62AWAYBQR9lZbYXYSBoxag8UpPRXK+9fqSQ==", + "dependencies": { + "@types/hast": "^3.0.0", + "hast-util-embedded": "^3.0.0", + "hast-util-has-property": "^3.0.0", + "hast-util-is-body-ok-link": "^3.0.0", + "hast-util-is-element": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-raw": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-9.1.0.tgz", + "integrity": "sha512-Y8/SBAHkZGoNkpzqqfCldijcuUKh7/su31kEBp67cFY09Wy0mTRgtsLYsiIxMJxlu0f6AA5SUTbDR8K0rxnbUw==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "@ungap/structured-clone": "^1.0.0", + "hast-util-from-parse5": "^8.0.0", + "hast-util-to-parse5": "^8.0.0", + "html-void-elements": "^3.0.0", + "mdast-util-to-hast": "^13.0.0", + "parse5": "^7.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-select": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/hast-util-select/-/hast-util-select-6.0.4.tgz", + "integrity": "sha512-RqGS1ZgI0MwxLaKLDxjprynNzINEkRHY2i8ln4DDjgv9ZhcYVIHN9rlpiYsqtFwrgpYU361SyWDQcGNIBVu3lw==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "bcp-47-match": "^2.0.0", + "comma-separated-tokens": "^2.0.0", + "css-selector-parser": "^3.0.0", + "devlop": "^1.0.0", + "direction": "^2.0.0", + "hast-util-has-property": "^3.0.0", + "hast-util-to-string": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "nth-check": "^2.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "unist-util-visit": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-estree": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/hast-util-to-estree/-/hast-util-to-estree-3.1.3.tgz", + "integrity": "sha512-48+B/rJWAp0jamNbAAf9M7Uf//UVqAoMmgXhBdxTDJLGKY+LRnZ99qcG+Qjl5HfMpYNzS5v4EAwVEF34LeAj7w==", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-attach-comments": "^3.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-js": "^1.0.0", + "unist-util-position": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-html": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/hast-util-to-html/-/hast-util-to-html-9.0.5.tgz", + "integrity": "sha512-OguPdidb+fbHQSU4Q4ZiLKnzWo8Wwsf5bZfbvu7//a9oTYoqD/fWpe96NuHkoS9h0ccGOTe0C4NGXdtS0iObOw==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "ccount": "^2.0.0", + "comma-separated-tokens": "^2.0.0", + "hast-util-whitespace": "^3.0.0", + "html-void-elements": "^3.0.0", + "mdast-util-to-hast": "^13.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "stringify-entities": "^4.0.0", + "zwitch": "^2.0.4" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-jsx-runtime": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/hast-util-to-jsx-runtime/-/hast-util-to-jsx-runtime-2.3.6.tgz", + "integrity": "sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg==", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "hast-util-whitespace": "^3.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "style-to-js": "^1.0.0", + "unist-util-position": "^5.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-parse5": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/hast-util-to-parse5/-/hast-util-to-parse5-8.0.1.tgz", + "integrity": "sha512-MlWT6Pjt4CG9lFCjiz4BH7l9wmrMkfkJYCxFwKQic8+RTZgWPuWxwAfjJElsXkex7DJjfSJsQIt931ilUgmwdA==", + "dependencies": { + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "devlop": "^1.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0", + "web-namespaces": "^2.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-string": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/hast-util-to-string/-/hast-util-to-string-3.0.1.tgz", + "integrity": "sha512-XelQVTDWvqcl3axRfI0xSeoVKzyIFPwsAGSLIsKdJKQMXDYJS4WYrBNF/8J7RdhIcFI2BOHgAifggsvsxp/3+A==", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-to-text": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/hast-util-to-text/-/hast-util-to-text-4.0.2.tgz", + "integrity": "sha512-KK6y/BN8lbaq654j7JgBydev7wuNMcID54lkRav1P0CaE1e47P72AWWPiGKXTJU271ooYzcvTAn/Zt0REnvc7A==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/unist": "^3.0.0", + "hast-util-is-element": "^3.0.0", + "unist-util-find-after": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hast-util-whitespace": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/hast-util-whitespace/-/hast-util-whitespace-3.0.0.tgz", + "integrity": "sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==", + "dependencies": { + "@types/hast": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hastscript": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-9.0.1.tgz", + "integrity": "sha512-g7df9rMFX/SPi34tyGCyUBREQoKkapwdY/T04Qn9TDWfHhAYt4/I0gMVirzK5wEzeUqIjEB+LXC/ypb7Aqno5w==", + "dependencies": { + "@types/hast": "^3.0.0", + "comma-separated-tokens": "^2.0.0", + "hast-util-parse-selector": "^4.0.0", + "property-information": "^7.0.0", + "space-separated-tokens": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/html-escaper": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-3.0.3.tgz", + "integrity": "sha512-RuMffC89BOWQoY0WKGpIhn5gX3iI54O6nRA0yC124NYVtzjmFWBIiFd8M0x+ZdX0P9R4lADg1mgP8C7PxGOWuQ==" + }, + "node_modules/html-void-elements": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-3.0.0.tgz", + "integrity": "sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/html-whitespace-sensitive-tag-names": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/html-whitespace-sensitive-tag-names/-/html-whitespace-sensitive-tag-names-3.0.1.tgz", + "integrity": "sha512-q+310vW8zmymYHALr1da4HyXUQ0zgiIwIicEfotYPWGN0OJVEN/58IJ3A4GBYcEq3LGAZqKb+ugvP0GNB9CEAA==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/http-cache-semantics": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.2.0.tgz", + "integrity": "sha512-dTxcvPXqPvXBQpq5dUr6mEMJX4oIEFv6bwom3FDwKRDsuIjjJGANqhBuoAn9c1RQJIdAKav33ED65E2ys+87QQ==" + }, + "node_modules/i18next": { + "version": "23.16.8", + "resolved": "https://registry.npmjs.org/i18next/-/i18next-23.16.8.tgz", + "integrity": "sha512-06r/TitrM88Mg5FdUXAKL96dJMzgqLE5dv3ryBAra4KCwD9mJ4ndOTS95ZuymIGoE+2hzfdaMak2X11/es7ZWg==", + "funding": [ + { + "type": "individual", + "url": "https://locize.com" + }, + { + "type": "individual", + "url": "https://locize.com/i18next.html" + }, + { + "type": "individual", + "url": "https://www.i18next.com/how-to/faq#i18next-is-awesome.-how-can-i-support-the-project" + } + ], + "dependencies": { + "@babel/runtime": "^7.23.2" + } + }, + "node_modules/import-meta-resolve": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/import-meta-resolve/-/import-meta-resolve-4.2.0.tgz", + "integrity": "sha512-Iqv2fzaTQN28s/FwZAoFq0ZSs/7hMAHJVX+w8PZl3cY19Pxk6jFFalxQoIfW2826i/fDLXv8IiEZRIT0lDuWcg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/inline-style-parser": { + "version": "0.2.7", + "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.2.7.tgz", + "integrity": "sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA==" + }, + "node_modules/iron-webcrypto": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/iron-webcrypto/-/iron-webcrypto-1.2.1.tgz", + "integrity": "sha512-feOM6FaSr6rEABp/eDfVseKyTMDt+KGpeB35SkVn9Tyn0CqvVsY3EwI0v5i8nMHyJnzCIQf7nsy3p41TPkJZhg==", + "funding": { + "url": "https://github.com/sponsors/brc-dd" + } + }, + "node_modules/is-alphabetical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-2.0.1.tgz", + "integrity": "sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-alphanumerical": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-2.0.1.tgz", + "integrity": "sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==", + "dependencies": { + "is-alphabetical": "^2.0.0", + "is-decimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-arrayish": { + "version": "0.3.4", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.3.4.tgz", + "integrity": "sha512-m6UrgzFVUYawGBh1dUsWR5M2Clqic9RVXC/9f8ceNlv2IcO9j9J/z8UoCLPqtsPBFNzEpfR3xftohbfqDx8EQA==" + }, + "node_modules/is-decimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-2.0.1.tgz", + "integrity": "sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-docker": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-3.0.0.tgz", + "integrity": "sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ==", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-hexadecimal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-2.0.1.tgz", + "integrity": "sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-inside-container": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-inside-container/-/is-inside-container-1.0.0.tgz", + "integrity": "sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA==", + "dependencies": { + "is-docker": "^3.0.0" + }, + "bin": { + "is-inside-container": "cli.js" + }, + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-plain-obj": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz", + "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-wsl": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-3.1.1.tgz", + "integrity": "sha512-e6rvdUCiQCAuumZslxRJWR/Doq4VpPR82kqclvcS0efgt430SlGIk05vdCN58+VrzgtIcfNODjozVielycD4Sw==", + "dependencies": { + "is-inside-container": "^1.0.0" + }, + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==" + }, + "node_modules/js-yaml": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/kleur": { + "version": "4.1.5", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-4.1.5.tgz", + "integrity": "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==", + "engines": { + "node": ">=6" + } + }, + "node_modules/klona": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/klona/-/klona-2.0.6.tgz", + "integrity": "sha512-dhG34DXATL5hSxJbIexCft8FChFXtmskoZYnoPWjXQuebWYCNkVeV3KkGegCK9CP1oswI/vQibS2GY7Em/sJJA==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/longest-streak": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz", + "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/loose-envify": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz", + "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==", + "dependencies": { + "js-tokens": "^3.0.0 || ^4.0.0" + }, + "bin": { + "loose-envify": "cli.js" + } + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/magic-string": { + "version": "0.30.21", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", + "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.5" + } + }, + "node_modules/magicast": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/magicast/-/magicast-0.3.5.tgz", + "integrity": "sha512-L0WhttDl+2BOsybvEOLK7fW3UA0OQ0IQ2d6Zl2x/a6vVRs3bAY0ECOSHHeL5jD+SbOpOCUEi0y1DgHEn9Qn1AQ==", + "dependencies": { + "@babel/parser": "^7.25.4", + "@babel/types": "^7.25.4", + "source-map-js": "^1.2.0" + } + }, + "node_modules/markdown-extensions": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/markdown-extensions/-/markdown-extensions-2.0.0.tgz", + "integrity": "sha512-o5vL7aDWatOTX8LzaS1WMoaoxIiLRQJuIKKe2wAw6IeULDHaqbiqiggmx+pKvZDb1Sj+pE46Sn1T7lCqfFtg1Q==", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/markdown-table": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.4.tgz", + "integrity": "sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/mdast-util-definitions": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-definitions/-/mdast-util-definitions-6.0.0.tgz", + "integrity": "sha512-scTllyX6pnYNZH/AIp/0ePz6s4cZtARxImwoPJ7kS42n+MnVsI4XbnG6d4ibehRIldYMWM2LD7ImQblVhUejVQ==", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "unist-util-visit": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-directive": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-directive/-/mdast-util-directive-3.1.0.tgz", + "integrity": "sha512-I3fNFt+DHmpWCYAT7quoM6lHf9wuqtI+oCOfvILnoicNIqjh5E3dEJWiXuYME2gNe8vl1iMQwyUHa7bgFmak6Q==", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "ccount": "^2.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-find-and-replace": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-3.0.2.tgz", + "integrity": "sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg==", + "dependencies": { + "@types/mdast": "^4.0.0", + "escape-string-regexp": "^5.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-from-markdown": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-2.0.3.tgz", + "integrity": "sha512-W4mAWTvSlKvf8L6J+VN9yLSqQ9AOAAvHuoDAmPkz4dHf553m5gVj2ejadHJhoJmcmxEnOv6Pa8XJhpxE93kb8Q==", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark": "^4.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-3.1.0.tgz", + "integrity": "sha512-0ulfdQOM3ysHhCJ1p06l0b0VKlhU0wuQs3thxZQagjcjPrlFRqY215uZGHHJan9GEAXd9MbfPjFJz+qMkVR6zQ==", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-gfm-autolink-literal": "^2.0.0", + "mdast-util-gfm-footnote": "^2.0.0", + "mdast-util-gfm-strikethrough": "^2.0.0", + "mdast-util-gfm-table": "^2.0.0", + "mdast-util-gfm-task-list-item": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-autolink-literal": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-2.0.1.tgz", + "integrity": "sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==", + "dependencies": { + "@types/mdast": "^4.0.0", + "ccount": "^2.0.0", + "devlop": "^1.0.0", + "mdast-util-find-and-replace": "^3.0.0", + "micromark-util-character": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-sqpDWlsHn7Ac9GNZQMeUzPQSMzR6Wv0WKRNvQRg0KqHh02fpTz69Qc1QSseNX29bhz1ROIyNyxExfawVKTm1GQ==", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-strikethrough": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-2.0.0.tgz", + "integrity": "sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-table": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-2.0.0.tgz", + "integrity": "sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "markdown-table": "^3.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-gfm-task-list-item": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-2.0.0.tgz", + "integrity": "sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==", + "dependencies": { + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-mdx/-/mdast-util-mdx-3.0.0.tgz", + "integrity": "sha512-JfbYLAW7XnYTTbUsmpu0kdBUVe+yKVJZBItEjwyYJiDJuZ9w4eeaqks4HQO+R7objWgS2ymV60GYpI14Ug554w==", + "dependencies": { + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-mdx-expression": "^2.0.0", + "mdast-util-mdx-jsx": "^3.0.0", + "mdast-util-mdxjs-esm": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-expression": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-expression/-/mdast-util-mdx-expression-2.0.1.tgz", + "integrity": "sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdx-jsx": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/mdast-util-mdx-jsx/-/mdast-util-mdx-jsx-3.2.0.tgz", + "integrity": "sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "ccount": "^2.0.0", + "devlop": "^1.1.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0", + "parse-entities": "^4.0.0", + "stringify-entities": "^4.0.0", + "unist-util-stringify-position": "^4.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-mdxjs-esm": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mdast-util-mdxjs-esm/-/mdast-util-mdxjs-esm-2.0.1.tgz", + "integrity": "sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==", + "dependencies": { + "@types/estree-jsx": "^1.0.0", + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "devlop": "^1.0.0", + "mdast-util-from-markdown": "^2.0.0", + "mdast-util-to-markdown": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-phrasing": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-phrasing/-/mdast-util-phrasing-4.1.0.tgz", + "integrity": "sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==", + "dependencies": { + "@types/mdast": "^4.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-hast": { + "version": "13.2.1", + "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.2.1.tgz", + "integrity": "sha512-cctsq2wp5vTsLIcaymblUriiTcZd0CwWtCbLvrOzYCDZoWyMNV8sZ7krj09FSnsiJi3WVsHLM4k6Dq/yaPyCXA==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "@ungap/structured-clone": "^1.0.0", + "devlop": "^1.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "trim-lines": "^3.0.0", + "unist-util-position": "^5.0.0", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-markdown": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-2.1.2.tgz", + "integrity": "sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "@types/unist": "^3.0.0", + "longest-streak": "^3.0.0", + "mdast-util-phrasing": "^4.0.0", + "mdast-util-to-string": "^4.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-decode-string": "^2.0.0", + "unist-util-visit": "^5.0.0", + "zwitch": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/mdast-util-to-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-4.0.0.tgz", + "integrity": "sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==", + "dependencies": { + "@types/mdast": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/micromark/-/micromark-4.0.2.tgz", + "integrity": "sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "@types/debug": "^4.0.0", + "debug": "^4.0.0", + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-core-commonmark": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-2.0.3.tgz", + "integrity": "sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-destination": "^2.0.0", + "micromark-factory-label": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-title": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-html-tag-name": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-subtokenize": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-directive": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/micromark-extension-directive/-/micromark-extension-directive-3.0.2.tgz", + "integrity": "sha512-wjcXHgk+PPdmvR58Le9d7zQYWy+vKEU9Se44p2CrCDPiLr2FMyiT4Fyb5UFKFC66wGB3kPlgD7q3TnoqPS7SZA==", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-factory-whitespace": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "parse-entities": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-3.0.0.tgz", + "integrity": "sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w==", + "dependencies": { + "micromark-extension-gfm-autolink-literal": "^2.0.0", + "micromark-extension-gfm-footnote": "^2.0.0", + "micromark-extension-gfm-strikethrough": "^2.0.0", + "micromark-extension-gfm-table": "^2.0.0", + "micromark-extension-gfm-tagfilter": "^2.0.0", + "micromark-extension-gfm-task-list-item": "^2.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-autolink-literal": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-2.1.0.tgz", + "integrity": "sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==", + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-footnote": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-2.1.0.tgz", + "integrity": "sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==", + "dependencies": { + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-normalize-identifier": "^2.0.0", + "micromark-util-sanitize-uri": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-strikethrough": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-2.1.0.tgz", + "integrity": "sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==", + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-classify-character": "^2.0.0", + "micromark-util-resolve-all": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-table": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-2.1.1.tgz", + "integrity": "sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg==", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-tagfilter": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-2.0.0.tgz", + "integrity": "sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg==", + "dependencies": { + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-gfm-task-list-item": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-2.1.0.tgz", + "integrity": "sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==", + "dependencies": { + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdx-expression": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-expression/-/micromark-extension-mdx-expression-3.0.1.tgz", + "integrity": "sha512-dD/ADLJ1AeMvSAKBwO22zG22N4ybhe7kFIZ3LsDI0GlsNr2A3KYxb0LdC1u5rj4Nw+CHKY0RVdnHX8vj8ejm4Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-mdx-expression": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-extension-mdx-jsx": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-jsx/-/micromark-extension-mdx-jsx-3.0.2.tgz", + "integrity": "sha512-e5+q1DjMh62LZAJOnDraSSbDMvGJ8x3cbjygy2qFEi7HCeUT4BDKCvMozPozcD6WmOt6sVvYDNBKhFSz3kjOVQ==", + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "estree-util-is-identifier-name": "^3.0.0", + "micromark-factory-mdx-expression": "^2.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdx-md": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdx-md/-/micromark-extension-mdx-md-2.0.0.tgz", + "integrity": "sha512-EpAiszsB3blw4Rpba7xTOUptcFeBFi+6PY8VnJ2hhimH+vCQDirWgsMpz7w1XcZE7LVrSAUGb9VJpG9ghlYvYQ==", + "dependencies": { + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdxjs": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdxjs/-/micromark-extension-mdxjs-3.0.0.tgz", + "integrity": "sha512-A873fJfhnJ2siZyUrJ31l34Uqwy4xIFmvPY1oj+Ean5PHcPBYzEsvqvWGaWcfEIr11O5Dlw3p2y0tZWpKHDejQ==", + "dependencies": { + "acorn": "^8.0.0", + "acorn-jsx": "^5.0.0", + "micromark-extension-mdx-expression": "^3.0.0", + "micromark-extension-mdx-jsx": "^3.0.0", + "micromark-extension-mdx-md": "^2.0.0", + "micromark-extension-mdxjs-esm": "^3.0.0", + "micromark-util-combine-extensions": "^2.0.0", + "micromark-util-types": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-extension-mdxjs-esm": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/micromark-extension-mdxjs-esm/-/micromark-extension-mdxjs-esm-3.0.0.tgz", + "integrity": "sha512-DJFl4ZqkErRpq/dAPyeWp15tGrcrrJho1hKK5uBS70BCtfrIFg81sqcTVu3Ta+KD1Tk5vAtBNElWxtAa+m8K9A==", + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-core-commonmark": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-position-from-estree": "^2.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/micromark-factory-destination": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-2.0.1.tgz", + "integrity": "sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-label": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-2.0.1.tgz", + "integrity": "sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-mdx-expression": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/micromark-factory-mdx-expression/-/micromark-factory-mdx-expression-2.0.3.tgz", + "integrity": "sha512-kQnEtA3vzucU2BkrIa8/VaSAsP+EJ3CKOvhMuJgOEGg9KDC6OAY6nSnNDVRiVNRqj7Y4SlSzcStaH/5jge8JdQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "@types/estree": "^1.0.0", + "devlop": "^1.0.0", + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-events-to-acorn": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unist-util-position-from-estree": "^2.0.0", + "vfile-message": "^4.0.0" + } + }, + "node_modules/micromark-factory-space": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-2.0.1.tgz", + "integrity": "sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-title": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-2.0.1.tgz", + "integrity": "sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-factory-whitespace": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-2.0.1.tgz", + "integrity": "sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-factory-space": "^2.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-character": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-2.1.1.tgz", + "integrity": "sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-chunked": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-2.0.1.tgz", + "integrity": "sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-classify-character": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-2.0.1.tgz", + "integrity": "sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-combine-extensions": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-2.0.1.tgz", + "integrity": "sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-chunked": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-numeric-character-reference": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-2.0.2.tgz", + "integrity": "sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-decode-string": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-2.0.1.tgz", + "integrity": "sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "decode-named-character-reference": "^1.0.0", + "micromark-util-character": "^2.0.0", + "micromark-util-decode-numeric-character-reference": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-encode": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-2.0.1.tgz", + "integrity": "sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-events-to-acorn": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/micromark-util-events-to-acorn/-/micromark-util-events-to-acorn-2.0.3.tgz", + "integrity": "sha512-jmsiEIiZ1n7X1Rr5k8wVExBQCg5jy4UXVADItHmNk1zkwEVhBuIUKRu3fqv+hs4nxLISi2DQGlqIOGiFxgbfHg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "@types/estree": "^1.0.0", + "@types/unist": "^3.0.0", + "devlop": "^1.0.0", + "estree-util-visit": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0", + "vfile-message": "^4.0.0" + } + }, + "node_modules/micromark-util-html-tag-name": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-2.0.1.tgz", + "integrity": "sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-normalize-identifier": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-2.0.1.tgz", + "integrity": "sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-resolve-all": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-2.0.1.tgz", + "integrity": "sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-sanitize-uri": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-2.0.1.tgz", + "integrity": "sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "micromark-util-character": "^2.0.0", + "micromark-util-encode": "^2.0.0", + "micromark-util-symbol": "^2.0.0" + } + }, + "node_modules/micromark-util-subtokenize": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-2.1.0.tgz", + "integrity": "sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ], + "dependencies": { + "devlop": "^1.0.0", + "micromark-util-chunked": "^2.0.0", + "micromark-util-symbol": "^2.0.0", + "micromark-util-types": "^2.0.0" + } + }, + "node_modules/micromark-util-symbol": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-2.0.1.tgz", + "integrity": "sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/micromark-util-types": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-2.0.2.tgz", + "integrity": "sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA==", + "funding": [ + { + "type": "GitHub Sponsors", + "url": "https://github.com/sponsors/unifiedjs" + }, + { + "type": "OpenCollective", + "url": "https://opencollective.com/unified" + } + ] + }, + "node_modules/mrmime": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/mrmime/-/mrmime-2.0.1.tgz", + "integrity": "sha512-Y3wQdFg2Va6etvQ5I82yUhGdsKrcYox6p7FfL1LbK2J4V01F9TGlepTIhnK24t7koZibmg82KGglhA1XK5IsLQ==", + "engines": { + "node": ">=10" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==" + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/neotraverse": { + "version": "0.6.18", + "resolved": "https://registry.npmjs.org/neotraverse/-/neotraverse-0.6.18.tgz", + "integrity": "sha512-Z4SmBUweYa09+o6pG+eASabEpP6QkQ70yHj351pQoEXIs8uHbaU2DWVmzBANKgflPa47A50PtB2+NgRpQvr7vA==", + "engines": { + "node": ">= 10" + } + }, + "node_modules/nlcst-to-string": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/nlcst-to-string/-/nlcst-to-string-4.0.0.tgz", + "integrity": "sha512-YKLBCcUYKAg0FNlOBT6aI91qFmSiFKiluk655WzPF+DDMA02qIyy8uiRqI8QXtcFpEvll12LpL5MXqEmAZ+dcA==", + "dependencies": { + "@types/nlcst": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/node-fetch-native": { + "version": "1.6.7", + "resolved": "https://registry.npmjs.org/node-fetch-native/-/node-fetch-native-1.6.7.tgz", + "integrity": "sha512-g9yhqoedzIUm0nTnTqAQvueMPVOuIY16bqgAJJC8XOOubYFNwz6IER9qs0Gq2Xd0+CecCKFjtdDTMA4u4xG06Q==" + }, + "node_modules/node-mock-http": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/node-mock-http/-/node-mock-http-1.0.4.tgz", + "integrity": "sha512-8DY+kFsDkNXy1sJglUfuODx1/opAGJGyrTuFqEoN90oRc2Vk0ZbD4K2qmKXBBEhZQzdKHIVfEJpDU8Ak2NJEvQ==" + }, + "node_modules/node-releases": { + "version": "2.0.37", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.37.tgz", + "integrity": "sha512-1h5gKZCF+pO/o3Iqt5Jp7wc9rH3eJJ0+nh/CIoiRwjRxde/hAHyLPXYN4V3CqKAbiZPSeJFSWHmJsbkicta0Eg==" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/nth-check": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz", + "integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==", + "dependencies": { + "boolbase": "^1.0.0" + }, + "funding": { + "url": "https://github.com/fb55/nth-check?sponsor=1" + } + }, + "node_modules/ofetch": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/ofetch/-/ofetch-1.5.1.tgz", + "integrity": "sha512-2W4oUZlVaqAPAil6FUg/difl6YhqhUR7x2eZY4bQCko22UXg3hptq9KLQdqFClV+Wu85UX7hNtdGTngi/1BxcA==", + "dependencies": { + "destr": "^2.0.5", + "node-fetch-native": "^1.6.7", + "ufo": "^1.6.1" + } + }, + "node_modules/oniguruma-parser": { + "version": "0.12.1", + "resolved": "https://registry.npmjs.org/oniguruma-parser/-/oniguruma-parser-0.12.1.tgz", + "integrity": "sha512-8Unqkvk1RYc6yq2WBYRj4hdnsAxVze8i7iPfQr8e4uSP3tRv0rpZcbGUDvxfQQcdwHt/e9PrMvGCsa8OqG9X3w==" + }, + "node_modules/oniguruma-to-es": { + "version": "4.3.5", + "resolved": "https://registry.npmjs.org/oniguruma-to-es/-/oniguruma-to-es-4.3.5.tgz", + "integrity": "sha512-Zjygswjpsewa0NLTsiizVuMQZbp0MDyM6lIt66OxsF21npUDlzpHi1Mgb/qhQdkb+dWFTzJmFbEWdvZgRho8eQ==", + "dependencies": { + "oniguruma-parser": "^0.12.1", + "regex": "^6.1.0", + "regex-recursion": "^6.0.2" + } + }, + "node_modules/p-limit": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-6.2.0.tgz", + "integrity": "sha512-kuUqqHNUqoIWp/c467RI4X6mmyuojY5jGutNU0wVTmEOOfcuwLqyMVoAi9MKi2Ak+5i9+nhmrK4ufZE8069kHA==", + "dependencies": { + "yocto-queue": "^1.1.1" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-queue": { + "version": "8.1.1", + "resolved": "https://registry.npmjs.org/p-queue/-/p-queue-8.1.1.tgz", + "integrity": "sha512-aNZ+VfjobsWryoiPnEApGGmf5WmNsCo9xu8dfaYamG5qaLP7ClhLN6NgsFe6SwJ2UbLEBK5dv9x8Mn5+RVhMWQ==", + "dependencies": { + "eventemitter3": "^5.0.1", + "p-timeout": "^6.1.2" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-timeout": { + "version": "6.1.4", + "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-6.1.4.tgz", + "integrity": "sha512-MyIV3ZA/PmyBN/ud8vV9XzwTrNtR4jFrObymZYnZqMmW0zA8Z17vnT0rBgFE/TlohB+YCHqXMgZzb3Csp49vqg==", + "engines": { + "node": ">=14.16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/package-manager-detector": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/package-manager-detector/-/package-manager-detector-1.6.0.tgz", + "integrity": "sha512-61A5ThoTiDG/C8s8UMZwSorAGwMJ0ERVGj2OjoW5pAalsNOg15+iQiPzrLJ4jhZ1HJzmC2PIHT2oEiH3R5fzNA==" + }, + "node_modules/pagefind": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/pagefind/-/pagefind-1.5.0.tgz", + "integrity": "sha512-7vQ2xh0ZmjPjsuWONR68nqzb+QNfpPh7pdT6n6YDAthWAQiUkSACVegSswY5zPNONGYFWebFVgdnS5/m/Qqn+w==", + "bin": { + "pagefind": "lib/runner/bin.cjs" + }, + "optionalDependencies": { + "@pagefind/darwin-arm64": "1.5.0", + "@pagefind/darwin-x64": "1.5.0", + "@pagefind/freebsd-x64": "1.5.0", + "@pagefind/linux-arm64": "1.5.0", + "@pagefind/linux-x64": "1.5.0", + "@pagefind/windows-arm64": "1.5.0", + "@pagefind/windows-x64": "1.5.0" + } + }, + "node_modules/parse-entities": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-4.0.2.tgz", + "integrity": "sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw==", + "dependencies": { + "@types/unist": "^2.0.0", + "character-entities-legacy": "^3.0.0", + "character-reference-invalid": "^2.0.0", + "decode-named-character-reference": "^1.0.0", + "is-alphanumerical": "^2.0.0", + "is-decimal": "^2.0.0", + "is-hexadecimal": "^2.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/parse-entities/node_modules/@types/unist": { + "version": "2.0.11", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz", + "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==" + }, + "node_modules/parse-latin": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/parse-latin/-/parse-latin-7.0.0.tgz", + "integrity": "sha512-mhHgobPPua5kZ98EF4HWiH167JWBfl4pvAIXXdbaVohtK7a6YBOy56kvhCqduqyo/f3yrHFWmqmiMg/BkBkYYQ==", + "dependencies": { + "@types/nlcst": "^2.0.0", + "@types/unist": "^3.0.0", + "nlcst-to-string": "^4.0.0", + "unist-util-modify-children": "^4.0.0", + "unist-util-visit-children": "^3.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/parse5": { + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.3.0.tgz", + "integrity": "sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==", + "dependencies": { + "entities": "^6.0.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, + "node_modules/piccolore": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/piccolore/-/piccolore-0.1.3.tgz", + "integrity": "sha512-o8bTeDWjE086iwKrROaDf31K0qC/BENdm15/uH9usSC/uZjJOKb2YGiVHfLY4GhwsERiPI1jmwI2XrA7ACOxVw==" + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==" + }, + "node_modules/picomatch": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.4.tgz", + "integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/postcss": { + "version": "8.5.9", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.9.tgz", + "integrity": "sha512-7a70Nsot+EMX9fFU3064K/kdHWZqGVY+BADLyXc8Dfv+mTLLVl6JzJpPaCZ2kQL9gIJvKXSLMHhqdRRjwQeFtw==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-nested": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.2.0.tgz", + "integrity": "sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "postcss-selector-parser": "^6.1.1" + }, + "engines": { + "node": ">=12.0" + }, + "peerDependencies": { + "postcss": "^8.2.14" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz", + "integrity": "sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/prismjs": { + "version": "1.30.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.30.0.tgz", + "integrity": "sha512-DEvV2ZF2r2/63V+tK8hQvrR2ZGn10srHbXviTlcv7Kpzw8jWiNTqbVgjO3IY8RxrrOUF8VPMQQFysYYYv0YZxw==", + "engines": { + "node": ">=6" + } + }, + "node_modules/prompts": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz", + "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==", + "dependencies": { + "kleur": "^3.0.3", + "sisteransi": "^1.0.5" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/prompts/node_modules/kleur": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz", + "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==", + "engines": { + "node": ">=6" + } + }, + "node_modules/property-information": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-7.1.0.tgz", + "integrity": "sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/radix3": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/radix3/-/radix3-1.1.2.tgz", + "integrity": "sha512-b484I/7b8rDEdSDKckSSBA8knMpcdsXudlE/LNL639wFoHKwLbEkQFZHWEYwDC0wa0FKUcCY+GAF73Z7wxNVFA==" + }, + "node_modules/react": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz", + "integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==", + "dependencies": { + "loose-envify": "^1.1.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/react-dom": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz", + "integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==", + "dependencies": { + "loose-envify": "^1.1.0", + "scheduler": "^0.23.2" + }, + "peerDependencies": { + "react": "^18.3.1" + } + }, + "node_modules/react-refresh": { + "version": "0.17.0", + "resolved": "https://registry.npmjs.org/react-refresh/-/react-refresh-0.17.0.tgz", + "integrity": "sha512-z6F7K9bV85EfseRCp2bzrpyQ0Gkw1uLoCel9XBVWPg/TjRj94SkJzUTGfOa4bs7iJvBWtQG0Wq7wnI0syw3EBQ==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/readdirp": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-5.0.0.tgz", + "integrity": "sha512-9u/XQ1pvrQtYyMpZe7DXKv2p5CNvyVwzUB6uhLAnQwHMSgKMBR62lc7AHljaeteeHXn11XTAaLLUVZYVZyuRBQ==", + "engines": { + "node": ">= 20.19.0" + }, + "funding": { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/recma-build-jsx": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/recma-build-jsx/-/recma-build-jsx-1.0.0.tgz", + "integrity": "sha512-8GtdyqaBcDfva+GUKDr3nev3VpKAhup1+RvkMvUxURHpW7QyIvk9F5wz7Vzo06CEMSilw6uArgRqhpiUcWp8ew==", + "dependencies": { + "@types/estree": "^1.0.0", + "estree-util-build-jsx": "^3.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/recma-jsx": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/recma-jsx/-/recma-jsx-1.0.1.tgz", + "integrity": "sha512-huSIy7VU2Z5OLv6oFLosQGGDqPqdO1iq6bWNAdhzMxSJP7RAso4fCZ1cKu8j9YHCZf3TPrq4dw3okhrylgcd7w==", + "dependencies": { + "acorn-jsx": "^5.0.0", + "estree-util-to-js": "^2.0.0", + "recma-parse": "^1.0.0", + "recma-stringify": "^1.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + }, + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/recma-parse": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/recma-parse/-/recma-parse-1.0.0.tgz", + "integrity": "sha512-OYLsIGBB5Y5wjnSnQW6t3Xg7q3fQ7FWbw/vcXtORTnyaSFscOtABg+7Pnz6YZ6c27fG1/aN8CjfwoUEUIdwqWQ==", + "dependencies": { + "@types/estree": "^1.0.0", + "esast-util-from-js": "^2.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/recma-stringify": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/recma-stringify/-/recma-stringify-1.0.0.tgz", + "integrity": "sha512-cjwII1MdIIVloKvC9ErQ+OgAtwHBmcZ0Bg4ciz78FtbT8In39aAYbaA7zvxQ61xVMSPE8WxhLwLbhif4Js2C+g==", + "dependencies": { + "@types/estree": "^1.0.0", + "estree-util-to-js": "^2.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/regex": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/regex/-/regex-6.1.0.tgz", + "integrity": "sha512-6VwtthbV4o/7+OaAF9I5L5V3llLEsoPyq9P1JVXkedTP33c7MfCG0/5NOPcSJn0TzXcG9YUrR0gQSWioew3LDg==", + "dependencies": { + "regex-utilities": "^2.3.0" + } + }, + "node_modules/regex-recursion": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/regex-recursion/-/regex-recursion-6.0.2.tgz", + "integrity": "sha512-0YCaSCq2VRIebiaUviZNs0cBz1kg5kVS2UKUfNIx8YVs1cN3AV7NTctO5FOKBA+UT2BPJIWZauYHPqJODG50cg==", + "dependencies": { + "regex-utilities": "^2.3.0" + } + }, + "node_modules/regex-utilities": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/regex-utilities/-/regex-utilities-2.3.0.tgz", + "integrity": "sha512-8VhliFJAWRaUiVvREIiW2NXXTmHs4vMNnSzuJVhscgmGav3g9VDxLrQndI3dZZVVdp0ZO/5v0xmX516/7M9cng==" + }, + "node_modules/rehype": { + "version": "13.0.2", + "resolved": "https://registry.npmjs.org/rehype/-/rehype-13.0.2.tgz", + "integrity": "sha512-j31mdaRFrwFRUIlxGeuPXXKWQxet52RBQRvCmzl5eCefn/KGbomK5GMHNMsOJf55fgo3qw5tST5neDuarDYR2A==", + "dependencies": { + "@types/hast": "^3.0.0", + "rehype-parse": "^9.0.0", + "rehype-stringify": "^10.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rehype-expressive-code": { + "version": "0.40.2", + "resolved": "https://registry.npmjs.org/rehype-expressive-code/-/rehype-expressive-code-0.40.2.tgz", + "integrity": "sha512-+kn+AMGCrGzvtH8Q5lC6Y5lnmTV/r33fdmi5QU/IH1KPHKobKr5UnLwJuqHv5jBTSN/0v2wLDS7RTM73FVzqmQ==", + "dependencies": { + "expressive-code": "^0.40.2" + } + }, + "node_modules/rehype-format": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/rehype-format/-/rehype-format-5.0.1.tgz", + "integrity": "sha512-zvmVru9uB0josBVpr946OR8ui7nJEdzZobwLOOqHb/OOD88W0Vk2SqLwoVOj0fM6IPCCO6TaV9CvQvJMWwukFQ==", + "dependencies": { + "@types/hast": "^3.0.0", + "hast-util-format": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rehype-parse": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/rehype-parse/-/rehype-parse-9.0.1.tgz", + "integrity": "sha512-ksCzCD0Fgfh7trPDxr2rSylbwq9iYDkSn8TCDmEJ49ljEUBxDVCzCHv7QNzZOfODanX4+bWQ4WZqLCRWYLfhag==", + "dependencies": { + "@types/hast": "^3.0.0", + "hast-util-from-html": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rehype-raw": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/rehype-raw/-/rehype-raw-7.0.0.tgz", + "integrity": "sha512-/aE8hCfKlQeA8LmyeyQvQF3eBiLRGNlfBJEvWH7ivp9sBqs7TNqBL5X3v157rM4IFETqDnIOO+z5M/biZbo9Ww==", + "dependencies": { + "@types/hast": "^3.0.0", + "hast-util-raw": "^9.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rehype-recma": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/rehype-recma/-/rehype-recma-1.0.0.tgz", + "integrity": "sha512-lqA4rGUf1JmacCNWWZx0Wv1dHqMwxzsDWYMTowuplHF3xH0N/MmrZ/G3BDZnzAkRmxDadujCjaKM2hqYdCBOGw==", + "dependencies": { + "@types/estree": "^1.0.0", + "@types/hast": "^3.0.0", + "hast-util-to-estree": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rehype-stringify": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/rehype-stringify/-/rehype-stringify-10.0.1.tgz", + "integrity": "sha512-k9ecfXHmIPuFVI61B9DeLPN0qFHfawM6RsuX48hoqlaKSF61RskNjSm1lI8PhBEM0MRdLxVVm4WmTqJQccH9mA==", + "dependencies": { + "@types/hast": "^3.0.0", + "hast-util-to-html": "^9.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-directive": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/remark-directive/-/remark-directive-3.0.1.tgz", + "integrity": "sha512-gwglrEQEZcZYgVyG1tQuA+h58EZfq5CSULw7J90AFuCTyib1thgHPoqQ+h9iFvU6R+vnZ5oNFQR5QKgGpk741A==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-directive": "^3.0.0", + "micromark-extension-directive": "^3.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-gfm": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-4.0.1.tgz", + "integrity": "sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-gfm": "^3.0.0", + "micromark-extension-gfm": "^3.0.0", + "remark-parse": "^11.0.0", + "remark-stringify": "^11.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-mdx": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/remark-mdx/-/remark-mdx-3.1.1.tgz", + "integrity": "sha512-Pjj2IYlUY3+D8x00UJsIOg5BEvfMyeI+2uLPn9VO9Wg4MEtN/VTIq2NEJQfde9PnX15KgtHyl9S0BcTnWrIuWg==", + "dependencies": { + "mdast-util-mdx": "^3.0.0", + "micromark-extension-mdxjs": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-parse": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-11.0.0.tgz", + "integrity": "sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-from-markdown": "^2.0.0", + "micromark-util-types": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-rehype": { + "version": "11.1.2", + "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-11.1.2.tgz", + "integrity": "sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw==", + "dependencies": { + "@types/hast": "^3.0.0", + "@types/mdast": "^4.0.0", + "mdast-util-to-hast": "^13.0.0", + "unified": "^11.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/remark-smartypants": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/remark-smartypants/-/remark-smartypants-3.0.2.tgz", + "integrity": "sha512-ILTWeOriIluwEvPjv67v7Blgrcx+LZOkAUVtKI3putuhlZm84FnqDORNXPPm+HY3NdZOMhyDwZ1E+eZB/Df5dA==", + "dependencies": { + "retext": "^9.0.0", + "retext-smartypants": "^6.0.0", + "unified": "^11.0.4", + "unist-util-visit": "^5.0.0" + }, + "engines": { + "node": ">=16.0.0" + } + }, + "node_modules/remark-stringify": { + "version": "11.0.0", + "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-11.0.0.tgz", + "integrity": "sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==", + "dependencies": { + "@types/mdast": "^4.0.0", + "mdast-util-to-markdown": "^2.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/retext": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/retext/-/retext-9.0.0.tgz", + "integrity": "sha512-sbMDcpHCNjvlheSgMfEcVrZko3cDzdbe1x/e7G66dFp0Ff7Mldvi2uv6JkJQzdRcvLYE8CA8Oe8siQx8ZOgTcA==", + "dependencies": { + "@types/nlcst": "^2.0.0", + "retext-latin": "^4.0.0", + "retext-stringify": "^4.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/retext-latin": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/retext-latin/-/retext-latin-4.0.0.tgz", + "integrity": "sha512-hv9woG7Fy0M9IlRQloq/N6atV82NxLGveq+3H2WOi79dtIYWN8OaxogDm77f8YnVXJL2VD3bbqowu5E3EMhBYA==", + "dependencies": { + "@types/nlcst": "^2.0.0", + "parse-latin": "^7.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/retext-smartypants": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/retext-smartypants/-/retext-smartypants-6.2.0.tgz", + "integrity": "sha512-kk0jOU7+zGv//kfjXEBjdIryL1Acl4i9XNkHxtM7Tm5lFiCog576fjNC9hjoR7LTKQ0DsPWy09JummSsH1uqfQ==", + "dependencies": { + "@types/nlcst": "^2.0.0", + "nlcst-to-string": "^4.0.0", + "unist-util-visit": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/retext-stringify": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/retext-stringify/-/retext-stringify-4.0.0.tgz", + "integrity": "sha512-rtfN/0o8kL1e+78+uxPTqu1Klt0yPzKuQ2BfWwwfgIUSayyzxpM1PJzkKt4V8803uB9qSy32MvI7Xep9khTpiA==", + "dependencies": { + "@types/nlcst": "^2.0.0", + "nlcst-to-string": "^4.0.0", + "unified": "^11.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/rollup": { + "version": "4.60.1", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.60.1.tgz", + "integrity": "sha512-VmtB2rFU/GroZ4oL8+ZqXgSA38O6GR8KSIvWmEFv63pQ0G6KaBH9s07PO8XTXP4vI+3UJUEypOfjkGfmSBBR0w==", + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.60.1", + "@rollup/rollup-android-arm64": "4.60.1", + "@rollup/rollup-darwin-arm64": "4.60.1", + "@rollup/rollup-darwin-x64": "4.60.1", + "@rollup/rollup-freebsd-arm64": "4.60.1", + "@rollup/rollup-freebsd-x64": "4.60.1", + "@rollup/rollup-linux-arm-gnueabihf": "4.60.1", + "@rollup/rollup-linux-arm-musleabihf": "4.60.1", + "@rollup/rollup-linux-arm64-gnu": "4.60.1", + "@rollup/rollup-linux-arm64-musl": "4.60.1", + "@rollup/rollup-linux-loong64-gnu": "4.60.1", + "@rollup/rollup-linux-loong64-musl": "4.60.1", + "@rollup/rollup-linux-ppc64-gnu": "4.60.1", + "@rollup/rollup-linux-ppc64-musl": "4.60.1", + "@rollup/rollup-linux-riscv64-gnu": "4.60.1", + "@rollup/rollup-linux-riscv64-musl": "4.60.1", + "@rollup/rollup-linux-s390x-gnu": "4.60.1", + "@rollup/rollup-linux-x64-gnu": "4.60.1", + "@rollup/rollup-linux-x64-musl": "4.60.1", + "@rollup/rollup-openbsd-x64": "4.60.1", + "@rollup/rollup-openharmony-arm64": "4.60.1", + "@rollup/rollup-win32-arm64-msvc": "4.60.1", + "@rollup/rollup-win32-ia32-msvc": "4.60.1", + "@rollup/rollup-win32-x64-gnu": "4.60.1", + "@rollup/rollup-win32-x64-msvc": "4.60.1", + "fsevents": "~2.3.2" + } + }, + "node_modules/sax": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/sax/-/sax-1.6.0.tgz", + "integrity": "sha512-6R3J5M4AcbtLUdZmRv2SygeVaM7IhrLXu9BmnOGmmACak8fiUtOsYNWUS4uK7upbmHIBbLBeFeI//477BKLBzA==", + "engines": { + "node": ">=11.0.0" + } + }, + "node_modules/scheduler": { + "version": "0.23.2", + "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.2.tgz", + "integrity": "sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==", + "dependencies": { + "loose-envify": "^1.1.0" + } + }, + "node_modules/semver": { + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/sharp": { + "version": "0.33.5", + "resolved": "https://registry.npmjs.org/sharp/-/sharp-0.33.5.tgz", + "integrity": "sha512-haPVm1EkS9pgvHrQ/F3Xy+hgcuMV0Wm9vfIBSiwZ05k+xgb0PkBQpGsAA/oWdDobNaZTH5ppvHtzCFbnSEwHVw==", + "hasInstallScript": true, + "dependencies": { + "color": "^4.2.3", + "detect-libc": "^2.0.3", + "semver": "^7.6.3" + }, + "engines": { + "node": "^18.17.0 || ^20.3.0 || >=21.0.0" + }, + "funding": { + "url": "https://opencollective.com/libvips" + }, + "optionalDependencies": { + "@img/sharp-darwin-arm64": "0.33.5", + "@img/sharp-darwin-x64": "0.33.5", + "@img/sharp-libvips-darwin-arm64": "1.0.4", + "@img/sharp-libvips-darwin-x64": "1.0.4", + "@img/sharp-libvips-linux-arm": "1.0.5", + "@img/sharp-libvips-linux-arm64": "1.0.4", + "@img/sharp-libvips-linux-s390x": "1.0.4", + "@img/sharp-libvips-linux-x64": "1.0.4", + "@img/sharp-libvips-linuxmusl-arm64": "1.0.4", + "@img/sharp-libvips-linuxmusl-x64": "1.0.4", + "@img/sharp-linux-arm": "0.33.5", + "@img/sharp-linux-arm64": "0.33.5", + "@img/sharp-linux-s390x": "0.33.5", + "@img/sharp-linux-x64": "0.33.5", + "@img/sharp-linuxmusl-arm64": "0.33.5", + "@img/sharp-linuxmusl-x64": "0.33.5", + "@img/sharp-wasm32": "0.33.5", + "@img/sharp-win32-ia32": "0.33.5", + "@img/sharp-win32-x64": "0.33.5" + } + }, + "node_modules/sharp/node_modules/semver": { + "version": "7.7.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.4.tgz", + "integrity": "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/shiki": { + "version": "3.23.0", + "resolved": "https://registry.npmjs.org/shiki/-/shiki-3.23.0.tgz", + "integrity": "sha512-55Dj73uq9ZXL5zyeRPzHQsK7Nbyt6Y10k5s7OjuFZGMhpp4r/rsLBH0o/0fstIzX1Lep9VxefWljK/SKCzygIA==", + "dependencies": { + "@shikijs/core": "3.23.0", + "@shikijs/engine-javascript": "3.23.0", + "@shikijs/engine-oniguruma": "3.23.0", + "@shikijs/langs": "3.23.0", + "@shikijs/themes": "3.23.0", + "@shikijs/types": "3.23.0", + "@shikijs/vscode-textmate": "^10.0.2", + "@types/hast": "^3.0.4" + } + }, + "node_modules/simple-swizzle": { + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/simple-swizzle/-/simple-swizzle-0.2.4.tgz", + "integrity": "sha512-nAu1WFPQSMNr2Zn9PGSZK9AGn4t/y97lEm+MXTtUDwfP0ksAIX4nO+6ruD9Jwut4C49SB1Ws+fbXsm/yScWOHw==", + "dependencies": { + "is-arrayish": "^0.3.1" + } + }, + "node_modules/sisteransi": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz", + "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==" + }, + "node_modules/sitemap": { + "version": "8.0.3", + "resolved": "https://registry.npmjs.org/sitemap/-/sitemap-8.0.3.tgz", + "integrity": "sha512-9Ew1tR2WYw8RGE2XLy7GjkusvYXy8Rg6y8TYuBuQMfIEdGcWoJpY2Wr5DzsEiL/TKCw56+YKTCCUHglorEYK+A==", + "dependencies": { + "@types/node": "^17.0.5", + "@types/sax": "^1.2.1", + "arg": "^5.0.0", + "sax": "^1.4.1" + }, + "bin": { + "sitemap": "dist/cli.js" + }, + "engines": { + "node": ">=14.0.0", + "npm": ">=6.0.0" + } + }, + "node_modules/sitemap/node_modules/@types/node": { + "version": "17.0.45", + "resolved": "https://registry.npmjs.org/@types/node/-/node-17.0.45.tgz", + "integrity": "sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw==" + }, + "node_modules/smol-toml": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/smol-toml/-/smol-toml-1.6.1.tgz", + "integrity": "sha512-dWUG8F5sIIARXih1DTaQAX4SsiTXhInKf1buxdY9DIg4ZYPZK5nGM1VRIYmEbDbsHt7USo99xSLFu5Q1IqTmsg==", + "engines": { + "node": ">= 18" + }, + "funding": { + "url": "https://github.com/sponsors/cyyynthia" + } + }, + "node_modules/source-map": { + "version": "0.7.6", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.7.6.tgz", + "integrity": "sha512-i5uvt8C3ikiWeNZSVZNWcfZPItFQOsYTUAOkcUPGd8DqDy1uOUikjt5dG+uRlwyvR108Fb9DOd4GvXfT0N2/uQ==", + "engines": { + "node": ">= 12" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/space-separated-tokens": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-2.0.2.tgz", + "integrity": "sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/stream-replace-string": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/stream-replace-string/-/stream-replace-string-2.0.0.tgz", + "integrity": "sha512-TlnjJ1C0QrmxRNrON00JvaFFlNh5TTG00APw23j74ET7gkQpTASi6/L2fuiav8pzK715HXtUeClpBTw2NPSn6w==" + }, + "node_modules/string-width": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", + "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", + "dependencies": { + "emoji-regex": "^10.3.0", + "get-east-asian-width": "^1.0.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/stringify-entities": { + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-4.0.4.tgz", + "integrity": "sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==", + "dependencies": { + "character-entities-html4": "^2.0.0", + "character-entities-legacy": "^3.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/strip-ansi": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.2.0.tgz", + "integrity": "sha512-yDPMNjp4WyfYBkHnjIRLfca1i6KMyGCtsVgoKe/z1+6vukgaENdgGBZt+ZmKPc4gavvEZ5OgHfHdrazhgNyG7w==", + "dependencies": { + "ansi-regex": "^6.2.2" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/style-to-js": { + "version": "1.1.21", + "resolved": "https://registry.npmjs.org/style-to-js/-/style-to-js-1.1.21.tgz", + "integrity": "sha512-RjQetxJrrUJLQPHbLku6U/ocGtzyjbJMP9lCNK7Ag0CNh690nSH8woqWH9u16nMjYBAok+i7JO1NP2pOy8IsPQ==", + "dependencies": { + "style-to-object": "1.0.14" + } + }, + "node_modules/style-to-object": { + "version": "1.0.14", + "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-1.0.14.tgz", + "integrity": "sha512-LIN7rULI0jBscWQYaSswptyderlarFkjQ+t79nzty8tcIAceVomEVlLzH5VP4Cmsv6MtKhs7qaAiwlcp+Mgaxw==", + "dependencies": { + "inline-style-parser": "0.2.7" + } + }, + "node_modules/tinyexec": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-0.3.2.tgz", + "integrity": "sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==" + }, + "node_modules/tinyglobby": { + "version": "0.2.16", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.16.tgz", + "integrity": "sha512-pn99VhoACYR8nFHhxqix+uvsbXineAasWm5ojXoN8xEwK5Kd3/TrhNn1wByuD52UxWRLy8pu+kRMniEi6Eq9Zg==", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.4" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/trim-lines": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/trim-lines/-/trim-lines-3.0.1.tgz", + "integrity": "sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/trough": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/trough/-/trough-2.2.0.tgz", + "integrity": "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/tsconfck": { + "version": "3.1.6", + "resolved": "https://registry.npmjs.org/tsconfck/-/tsconfck-3.1.6.tgz", + "integrity": "sha512-ks6Vjr/jEw0P1gmOVwutM3B7fWxoWBL2KRDb1JfqGVawBmO5UsvmWOQFGHBPl5yxYz4eERr19E6L7NMv+Fej4w==", + "bin": { + "tsconfck": "bin/tsconfck.js" + }, + "engines": { + "node": "^18 || >=20" + }, + "peerDependencies": { + "typescript": "^5.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "optional": true + }, + "node_modules/type-fest": { + "version": "4.41.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.41.0.tgz", + "integrity": "sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typescript": { + "version": "5.9.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz", + "integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==", + "peer": true, + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/ufo": { + "version": "1.6.3", + "resolved": "https://registry.npmjs.org/ufo/-/ufo-1.6.3.tgz", + "integrity": "sha512-yDJTmhydvl5lJzBmy/hyOAA0d+aqCBuwl818haVdYCRrWV84o7YyeVm4QlVHStqNrrJSTb6jKuFAVqAFsr+K3Q==" + }, + "node_modules/ultrahtml": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/ultrahtml/-/ultrahtml-1.6.0.tgz", + "integrity": "sha512-R9fBn90VTJrqqLDwyMph+HGne8eqY1iPfYhPzZrvKpIfwkWZbcYlfpsb8B9dTvBfpy1/hqAD7Wi8EKfP9e8zdw==" + }, + "node_modules/uncrypto": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/uncrypto/-/uncrypto-0.1.3.tgz", + "integrity": "sha512-Ql87qFHB3s/De2ClA9e0gsnS6zXG27SkTiSJwjCc9MebbfapQfuPzumMIUMi38ezPZVNFcHI9sUIepeQfw8J8Q==" + }, + "node_modules/undici-types": { + "version": "7.18.2", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.18.2.tgz", + "integrity": "sha512-AsuCzffGHJybSaRrmr5eHr81mwJU3kjw6M+uprWvCXiNeN9SOGwQ3Jn8jb8m3Z6izVgknn1R0FTCEAP2QrLY/w==" + }, + "node_modules/unified": { + "version": "11.0.5", + "resolved": "https://registry.npmjs.org/unified/-/unified-11.0.5.tgz", + "integrity": "sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==", + "dependencies": { + "@types/unist": "^3.0.0", + "bail": "^2.0.0", + "devlop": "^1.0.0", + "extend": "^3.0.0", + "is-plain-obj": "^4.0.0", + "trough": "^2.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-find-after": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-find-after/-/unist-util-find-after-5.0.0.tgz", + "integrity": "sha512-amQa0Ep2m6hE2g72AugUItjbuM8X8cGQnFoHk0pGfrFeT9GZhzN5SW8nRsiGKK7Aif4CrACPENkA6P/Lw6fHGQ==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-is": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-6.0.1.tgz", + "integrity": "sha512-LsiILbtBETkDz8I9p1dQ0uyRUWuaQzd/cuEeS1hoRSyW5E5XGmTzlwY1OrNzzakGowI9Dr/I8HVaw4hTtnxy8g==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-modify-children": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/unist-util-modify-children/-/unist-util-modify-children-4.0.0.tgz", + "integrity": "sha512-+tdN5fGNddvsQdIzUF3Xx82CU9sMM+fA0dLgR9vOmT0oPT2jH+P1nd5lSqfCfXAw+93NhcXNY2qqvTUtE4cQkw==", + "dependencies": { + "@types/unist": "^3.0.0", + "array-iterate": "^2.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-5.0.0.tgz", + "integrity": "sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-position-from-estree": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/unist-util-position-from-estree/-/unist-util-position-from-estree-2.0.0.tgz", + "integrity": "sha512-KaFVRjoqLyF6YXCbVLNad/eS4+OfPQQn2yOd7zF/h5T/CSL2v8NpN6a5TPvtbXthAGw5nG+PuTtq+DdIZr+cRQ==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-remove-position": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/unist-util-remove-position/-/unist-util-remove-position-5.0.0.tgz", + "integrity": "sha512-Hp5Kh3wLxv0PHj9m2yZhhLt58KzPtEYKQQ4yxfYFEO7EvHwzyDYnduhHnY1mDxoqr7VUwVuHXk9RXKIiYS1N8Q==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-visit": "^5.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-stringify-position": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-4.0.0.tgz", + "integrity": "sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-5.1.0.tgz", + "integrity": "sha512-m+vIdyeCOpdr/QeQCu2EzxX/ohgS8KbnPDgFni4dQsfSCtpz8UqDyY5GjRru8PDKuYn7Fq19j1CQ+nJSsGKOzg==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0", + "unist-util-visit-parents": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit-children": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/unist-util-visit-children/-/unist-util-visit-children-3.0.0.tgz", + "integrity": "sha512-RgmdTfSBOg04sdPcpTSD1jzoNBjt9a80/ZCzp5cI9n1qPzLZWF9YdvWGN2zmTumP1HWhXKdUWexjy/Wy/lJ7tA==", + "dependencies": { + "@types/unist": "^3.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/unist-util-visit-parents": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-6.0.2.tgz", + "integrity": "sha512-goh1s1TBrqSqukSc8wrjwWhL0hiJxgA8m4kFxGlQ+8FYQ3C/m11FcTs4YYem7V664AhHVvgoQLk890Ssdsr2IQ==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-is": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", + "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" + }, + "node_modules/vfile": { + "version": "6.0.3", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-6.0.3.tgz", + "integrity": "sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==", + "dependencies": { + "@types/unist": "^3.0.0", + "vfile-message": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-location": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-5.0.3.tgz", + "integrity": "sha512-5yXvWDEgqeiYiBe1lbxYF7UMAIm/IcopxMHrMQDq3nvKcjPKIhZklUKL+AE7J7uApI4kwe2snsK+eI6UTj9EHg==", + "dependencies": { + "@types/unist": "^3.0.0", + "vfile": "^6.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vfile-message": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-4.0.3.tgz", + "integrity": "sha512-QTHzsGd1EhbZs4AsQ20JX1rC3cOlt/IWJruk893DfLRr57lcnOeMaWG4K0JrRta4mIJZKth2Au3mM3u03/JWKw==", + "dependencies": { + "@types/unist": "^3.0.0", + "unist-util-stringify-position": "^4.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/vite": { + "version": "6.4.2", + "resolved": "https://registry.npmjs.org/vite/-/vite-6.4.2.tgz", + "integrity": "sha512-2N/55r4JDJ4gdrCvGgINMy+HH3iRpNIz8K6SFwVsA+JbQScLiC+clmAxBgwiSPgcG9U15QmvqCGWzMbqda5zGQ==", + "dependencies": { + "esbuild": "^0.25.0", + "fdir": "^6.4.4", + "picomatch": "^4.0.2", + "postcss": "^8.5.3", + "rollup": "^4.34.9", + "tinyglobby": "^0.2.13" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || ^20.0.0 || >=22.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || ^20.0.0 || >=22.0.0", + "jiti": ">=1.21.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "sass-embedded": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.16.0", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "jiti": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/vitefu": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/vitefu/-/vitefu-1.1.3.tgz", + "integrity": "sha512-ub4okH7Z5KLjb6hDyjqrGXqWtWvoYdU3IGm/NorpgHncKoLTCfRIbvlhBm7r0YstIaQRYlp4yEbFqDcKSzXSSg==", + "workspaces": [ + "tests/deps/*", + "tests/projects/*", + "tests/projects/workspace/packages/*" + ], + "peerDependencies": { + "vite": "^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "vite": { + "optional": true + } + } + }, + "node_modules/web-namespaces": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/web-namespaces/-/web-namespaces-2.0.1.tgz", + "integrity": "sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/which-pm-runs": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/which-pm-runs/-/which-pm-runs-1.1.0.tgz", + "integrity": "sha512-n1brCuqClxfFfq/Rb0ICg9giSZqCS+pLtccdag6C2HyufBrh3fBOiy9nb6ggRMvWOVH5GrdJskj5iGTZNxd7SA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/widest-line": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-5.0.0.tgz", + "integrity": "sha512-c9bZp7b5YtRj2wOe6dlj32MK+Bx/M/d+9VB2SHM1OtsUHR0aV0tdP6DWh/iMt0kWi1t5g1Iudu6hQRNd1A4PVA==", + "dependencies": { + "string-width": "^7.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/wrap-ansi": { + "version": "9.0.2", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.2.tgz", + "integrity": "sha512-42AtmgqjV+X1VpdOfyTGOYRi0/zsoLqtXQckTmqTeybT+BDIbM/Guxo7x3pE2vtpr1ok6xRqM9OpBe+Jyoqyww==", + "dependencies": { + "ansi-styles": "^6.2.1", + "string-width": "^7.0.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/xxhash-wasm": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/xxhash-wasm/-/xxhash-wasm-1.1.0.tgz", + "integrity": "sha512-147y/6YNh+tlp6nd/2pWq38i9h6mz/EuQ6njIrmW8D1BS5nCqs0P6DG+m6zTGnNz5I+uhZ0SHxBs9BsPrwcKDA==" + }, + "node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==" + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "engines": { + "node": ">=12" + } + }, + "node_modules/yocto-queue": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.2.2.tgz", + "integrity": "sha512-4LCcse/U2MHZ63HAJVE+v71o7yOdIe4cZ70Wpf8D/IyjDKYQLV5GD46B+hSTjJsvV5PztjvHoU580EftxjDZFQ==", + "engines": { + "node": ">=12.20" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/yocto-spinner": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/yocto-spinner/-/yocto-spinner-0.2.3.tgz", + "integrity": "sha512-sqBChb33loEnkoXte1bLg45bEBsOP9N1kzQh5JZNKj/0rik4zAPTNSAVPj3uQAdc6slYJ0Ksc403G2XgxsJQFQ==", + "dependencies": { + "yoctocolors": "^2.1.1" + }, + "engines": { + "node": ">=18.19" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/yoctocolors": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/yoctocolors/-/yoctocolors-2.1.2.tgz", + "integrity": "sha512-CzhO+pFNo8ajLM2d2IW/R93ipy99LWjtwblvC1RsoSUMZgyLbYFr221TnSNT7GjGdYui6P459mw9JH/g/zW2ug==", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/zod": { + "version": "3.25.76", + "resolved": "https://registry.npmjs.org/zod/-/zod-3.25.76.tgz", + "integrity": "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==", + "funding": { + "url": "https://github.com/sponsors/colinhacks" + } + }, + "node_modules/zod-to-json-schema": { + "version": "3.25.2", + "resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.25.2.tgz", + "integrity": "sha512-O/PgfnpT1xKSDeQYSCfRI5Gy3hPf91mKVDuYLUHZJMiDFptvP41MSnWofm8dnCm0256ZNfZIM7DSzuSMAFnjHA==", + "peerDependencies": { + "zod": "^3.25.28 || ^4" + } + }, + "node_modules/zod-to-ts": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/zod-to-ts/-/zod-to-ts-1.2.0.tgz", + "integrity": "sha512-x30XE43V+InwGpvTySRNz9kB7qFU8DlyEy7BsSTCHPH1R0QasMmHWZDCzYm6bVXtj/9NNJAZF3jW8rzFvH5OFA==", + "peerDependencies": { + "typescript": "^4.9.4 || ^5.0.2", + "zod": "^3" + } + }, + "node_modules/zwitch": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz", + "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + } + } +} diff --git a/docs-site/package.json b/docs-site/package.json new file mode 100644 index 00000000..574d2844 --- /dev/null +++ b/docs-site/package.json @@ -0,0 +1,24 @@ +{ + "name": "graphrag-toolkit-docs", + "type": "module", + "version": "0.0.1", + "private": true, + "scripts": { + "dev": "astro dev", + "build": "astro build", + "preview": "astro preview" + }, + "dependencies": { + "@astrojs/react": "^4.2.0", + "@astrojs/starlight": "0.32.5", + "@types/react": "^18.3.28", + "@types/react-dom": "^18.3.7", + "astro": "5.6.2", + "react": "^18.3.1", + "react-dom": "^18.3.1", + "sharp": "^0.33.5" + }, + "overrides": { + "@astrojs/sitemap": "3.2.1" + } +} diff --git a/docs-site/public/.nojekyll b/docs-site/public/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/docs-site/src/assets/logo.svg b/docs-site/src/assets/logo.svg new file mode 100644 index 00000000..38144e03 --- /dev/null +++ b/docs-site/src/assets/logo.svg @@ -0,0 +1,21 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/docs-site/src/components/StoreUrlBuilder.tsx b/docs-site/src/components/StoreUrlBuilder.tsx new file mode 100644 index 00000000..a863e82f --- /dev/null +++ b/docs-site/src/components/StoreUrlBuilder.tsx @@ -0,0 +1,159 @@ +import { useState } from 'react'; + +type Backend = + | { kind: 'neptune-db'; cluster: string; region: string } + | { kind: 'neptune-graph'; graphId: string } + | { kind: 'neo4j'; host: string; port: string } + | { kind: 'falkordb'; host: string; port: string }; + +const backends = [ + { id: 'neptune-db', label: 'Neptune DB' }, + { id: 'neptune-graph', label: 'Neptune Analytics' }, + { id: 'neo4j', label: 'Neo4j' }, + { id: 'falkordb', label: 'FalkorDB' }, +] as const; + +function buildUrl(b: Backend): string { + switch (b.kind) { + case 'neptune-db': + return `neptune-db://${b.cluster}.cluster-xxxxxxxx.${b.region}.neptune.amazonaws.com`; + case 'neptune-graph': + return `neptune-graph://${b.graphId}`; + case 'neo4j': + return `neo4j://${b.host}:${b.port}`; + case 'falkordb': + return `falkordb://${b.host}:${b.port}`; + } +} + +const inputStyle: React.CSSProperties = { + padding: '0.5rem 0.75rem', + background: 'var(--sl-color-bg-inline-code)', + color: 'var(--sl-color-white)', + border: '1px solid var(--sl-color-gray-5)', + borderRadius: '6px', + fontSize: '0.9rem', + fontFamily: 'var(--sl-font-system-mono)', + width: '100%', +}; + +const labelStyle: React.CSSProperties = { + display: 'block', + fontSize: '0.8rem', + color: 'var(--sl-color-gray-2)', + marginBottom: '0.25rem', + textTransform: 'uppercase', + letterSpacing: '0.05em', +}; + +export default function StoreUrlBuilder() { + const [kind, setKind] = useState('neptune-db'); + const [cluster, setCluster] = useState('my-graph'); + const [region, setRegion] = useState('us-east-1'); + const [graphId, setGraphId] = useState('g-abc123def456'); + const [host, setHost] = useState('localhost'); + const [port, setPort] = useState('7687'); + + let backend: Backend; + if (kind === 'neptune-db') backend = { kind, cluster, region }; + else if (kind === 'neptune-graph') backend = { kind, graphId }; + else if (kind === 'neo4j') backend = { kind, host, port }; + else backend = { kind, host, port }; + + const url = buildUrl(backend); + + return ( +
+
+ {backends.map((b) => ( + + ))} +
+ +
+ {kind === 'neptune-db' && ( + <> +
+ + setCluster(e.target.value)} /> +
+
+ + setRegion(e.target.value)} /> +
+ + )} + {kind === 'neptune-graph' && ( +
+ + setGraphId(e.target.value)} /> +
+ )} + {(kind === 'neo4j' || kind === 'falkordb') && ( + <> +
+ + setHost(e.target.value)} /> +
+
+ + setPort(e.target.value)} /> +
+ + )} +
+ +
+ + + {url} + +
+
+ ); +} diff --git a/docs-site/src/content.config.ts b/docs-site/src/content.config.ts new file mode 100644 index 00000000..6a7b7a02 --- /dev/null +++ b/docs-site/src/content.config.ts @@ -0,0 +1,7 @@ +import { defineCollection } from 'astro:content'; +import { docsLoader } from '@astrojs/starlight/loaders'; +import { docsSchema } from '@astrojs/starlight/schema'; + +export const collections = { + docs: defineCollection({ loader: docsLoader(), schema: docsSchema() }), +}; diff --git a/docs-site/src/content/docs/404.mdx b/docs-site/src/content/docs/404.mdx new file mode 100644 index 00000000..fd08f7e0 --- /dev/null +++ b/docs-site/src/content/docs/404.mdx @@ -0,0 +1,7 @@ +--- +title: 'Page not found' +template: splash +editUrl: false +--- + +That page does not exist. diff --git a/docs-site/src/content/docs/byokg-rag/configuration.mdx b/docs-site/src/content/docs/byokg-rag/configuration.mdx new file mode 100644 index 00000000..1aa4a13c --- /dev/null +++ b/docs-site/src/content/docs/byokg-rag/configuration.mdx @@ -0,0 +1,264 @@ +--- +title: Configuration +--- + +## Overview + +The byokg-rag library provides extensive configuration options to customize query processing, retrieval strategies, and LLM (Large Language Model) behavior. Configuration occurs at multiple levels: query engine initialization, retriever setup, entity linking, and LLM parameters. + +This document provides complete parameter documentation for all configurable components. Most components provide sensible defaults, allowing you to start with minimal configuration and adjust as needed for your specific use case. + +## Query Engine Configuration + +### ByoKGQueryEngine + +The query engine orchestrates the entire KGQA (Knowledge Graph Question Answering) pipeline, coordinating entity linking, retrieval, and answer generation. + +#### Constructor Parameters + +| Parameter | Type | Default | Description | Example | +|-----------|------|---------|-------------|---------| +| `graph_store` | GraphStore | Required | Graph store instance providing access to knowledge graph data | `NeptuneAnalyticsGraphStore(...)` | +| `entity_linker` | EntityLinker | Auto-created | Component for linking text mentions to graph entities | `EntityLinker(...)` | +| `triplet_retriever` | GRetriever | Auto-created | Retriever for extracting relevant triplets from the graph | `AgenticRetriever(...)` | +| `path_retriever` | PathRetriever | Auto-created | Retriever for finding paths between entities | `PathRetriever(...)` | +| `graph_query_executor` | GraphQueryRetriever | Auto-created | Executor for running structured graph queries | `GraphQueryRetriever(...)` | +| `llm_generator` | BaseGenerator | Auto-created | Language model for generating responses | `BedrockGenerator(...)` | +| `kg_linker` | KGLinker | Auto-created | Linker for multi-strategy retrieval operations | `KGLinker(...)` | +| `cypher_kg_linker` | CypherKGLinker | None | Specialized linker for Cypher-based retrieval | `CypherKGLinker(...)` | +| `direct_query_linking` | bool | False | Enable direct entity linking using query embeddings | `True` | + +NOTE: When parameters are not provided, the query engine creates default instances with standard configurations. You can override any component to customize behavior. + +#### Query Method Parameters + +| Parameter | Type | Default | Description | Example | +|-----------|------|---------|-------------|---------| +| `query` | str | Required | The natural language question to answer | "Who won the Nobel Prize in Physics in 1921?" | +| `iterations` | int | 2 | Number of multi-strategy retrieval iterations | 3 | +| `cypher_iterations` | int | 2 | Number of Cypher query generation attempts | 3 | +| `user_input` | str | "" | Additional instructions or context for the LLM | "Focus on recent discoveries" | + +**Valid Ranges:** + +- `iterations`: 1-10 (higher values increase retrieval coverage but also latency) +- `cypher_iterations`: 1-5 (higher values allow more query refinement attempts) + +## Retriever Configuration + +### AgenticRetriever + +The agentic retriever implements iterative, LLM-guided exploration of the knowledge graph. + +#### Constructor Parameters + +| Parameter | Type | Default | Description | Example | +|-----------|------|---------|-------------|---------| +| `llm_generator` | BaseGenerator | Required | Language model for guiding exploration | `BedrockGenerator(...)` | +| `graph_traversal` | GTraversal | Required | Component for traversing graph structure | `GTraversal(graph_store)` | +| `graph_verbalizer` | TripletGVerbalizer | Required | Component for converting triplets to text | `TripletGVerbalizer()` | +| `pruning_reranker` | Reranker | None | Optional reranker for pruning results | `BGEReranker()` | +| `max_num_relations` | int | 5 | Maximum relations to consider per iteration | 10 | +| `max_num_entities` | int | 3 | Maximum entities to explore per iteration | 5 | +| `max_num_iterations` | int | 3 | Maximum exploration iterations | 5 | +| `max_num_triplets` | int | 50 | Maximum triplets to retain after pruning | 100 | + +**Parameter Guidelines:** + +- Increase `max_num_relations` for broader exploration of relationship types +- Increase `max_num_entities` to explore more entity neighborhoods +- Increase `max_num_iterations` for complex multi-hop reasoning +- Increase `max_num_triplets` to retain more context (at the cost of LLM input length) + +### PathRetriever + +The path retriever finds structured paths between entities following metapath patterns. + +#### Constructor Parameters + +| Parameter | Type | Default | Description | Example | +|-----------|------|---------|-------------|---------| +| `graph_traversal` | GTraversal | Required | Component for traversing graph structure | `GTraversal(graph_store)` | +| `path_verbalizer` | PathVerbalizer | Required | Component for converting paths to text | `PathVerbalizer()` | + +The path retriever has minimal configuration. Its behavior is primarily controlled by the metapaths provided during retrieval. + +### GraphQueryRetriever + +The graph query retriever executes structured queries (openCypher) against the graph store. + +#### Constructor Parameters + +| Parameter | Type | Default | Description | Example | +|-----------|------|---------|-------------|---------| +| `graph_store` | GraphStore | Required | Graph store instance for query execution | `NeptuneAnalyticsGraphStore(...)` | +| `block_graph_modification` | bool | True | Block queries that modify the graph | `True` | + +WARNING: Setting `block_graph_modification` to False allows DELETE, CREATE, and other modification operations. Only disable this in controlled environments where query safety is guaranteed. + +## Entity Linker Configuration + +### KGLinker + +The KG linker coordinates LLM-based entity extraction and linking for multi-strategy retrieval. + +#### Constructor Parameters + +| Parameter | Type | Default | Description | Example | +|-----------|------|---------|-------------|---------| +| `llm_generator` | BaseGenerator | Required | Language model for entity extraction | `BedrockGenerator(...)` | +| `graph_store` | GraphStore | Required | Graph store for schema and entity information | `NeptuneAnalyticsGraphStore(...)` | +| `max_input_tokens` | int | 32000 | Maximum tokens allowed in user input and question | 16000 | + +The `max_input_tokens` parameter prevents excessively long inputs that could cause LLM errors or high costs. + +### CypherKGLinker + +The Cypher KG linker specializes in generating and executing openCypher queries. + +#### Constructor Parameters + +| Parameter | Type | Default | Description | Example | +|-----------|------|---------|-------------|---------| +| `llm_generator` | BaseGenerator | Required | Language model for Cypher generation | `BedrockGenerator(...)` | +| `graph_store` | GraphStore | Required | Graph store supporting openCypher execution | `NeptuneAnalyticsGraphStore(...)` | +| `max_input_tokens` | int | 32000 | Maximum tokens allowed in user input and question | 16000 | + +NOTE: The graph store must support openCypher query execution. Use Neptune Analytics or Neptune Database graph stores. + +## LLM Configuration + +### BedrockGenerator + +The Bedrock generator provides access to foundation models through Amazon Bedrock. + +#### Constructor Parameters + +| Parameter | Type | Default | Description | Example | +|-----------|------|---------|-------------|---------| +| `model_name` | str | "anthropic.claude-3-7-sonnet-20250219-v1:0" | Bedrock model identifier | "anthropic.claude-3-5-sonnet-20240620-v1:0" | +| `region_name` | str | "us-west-2" | AWS region for Bedrock service | "us-east-1" | +| `max_tokens` | int | 4096 | Maximum tokens to generate in responses | 8192 | +| `max_retries` | int | 10 | Maximum retry attempts for failed requests | 5 | +| `prefill` | bool | False | Enable response prefilling (advanced) | False | +| `inference_config` | dict | None | Custom inference configuration | `{"temperature": 0.7}` | +| `reasoning_config` | dict | None | Reasoning configuration for supported models | None | + +**Supported Models:** + +- Claude 3.5 Sonnet: `anthropic.claude-3-5-sonnet-20240620-v1:0` +- Claude 3.7 Sonnet: `anthropic.claude-3-7-sonnet-20250219-v1:0` +- Claude 3 Opus: `anthropic.claude-3-opus-20240229-v1:0` +- Claude 3 Haiku: `anthropic.claude-3-haiku-20240307-v1:0` + +TIP: Claude 3.5 Sonnet provides the best balance of performance and cost for most KGQA applications. Use Claude 3.7 Sonnet for the latest capabilities. + +**Inference Configuration:** + +The `inference_config` parameter accepts a dictionary with Bedrock inference parameters: + +```python +inference_config = { + "temperature": 0.7, # Controls randomness (0.0-1.0) + "topP": 0.9, # Nucleus sampling threshold + "maxTokens": 4096 # Maximum tokens to generate +} +``` + +## Complete Configuration Example + +This example shows a fully configured query engine with custom components: + +```python +from graphrag_toolkit.byokg_rag.graphstore import NeptuneAnalyticsGraphStore +from graphrag_toolkit.byokg_rag.llm import BedrockGenerator +from graphrag_toolkit.byokg_rag.graph_connectors import KGLinker +from graphrag_toolkit.byokg_rag.graph_retrievers import ( + AgenticRetriever, + PathRetriever, + GraphQueryRetriever, + EntityLinker, + GTraversal, + TripletGVerbalizer, + PathVerbalizer +) +from graphrag_toolkit.byokg_rag.indexing import FuzzyStringIndex +from graphrag_toolkit.byokg_rag.byokg_query_engine import ByoKGQueryEngine + +# Step 1: Set up graph store +graph_store = NeptuneAnalyticsGraphStore( + graph_identifier="", + region="" +) + +# Step 2: Set up LLM +llm_generator = BedrockGenerator( + model_name="anthropic.claude-3-5-sonnet-20240620-v1:0", + region_name="", + max_tokens=4096, + max_retries=10 +) + +# Step 3: Set up entity linking +fuzzy_index = FuzzyStringIndex() +fuzzy_index.add(graph_store.nodes()) +entity_matcher = fuzzy_index.as_entity_matcher() +entity_linker = EntityLinker(entity_matcher) + +# Step 4: Set up retrievers +graph_traversal = GTraversal(graph_store) +triplet_verbalizer = TripletGVerbalizer() +path_verbalizer = PathVerbalizer() + +triplet_retriever = AgenticRetriever( + llm_generator=llm_generator, + graph_traversal=graph_traversal, + graph_verbalizer=triplet_verbalizer, + max_num_relations=5, + max_num_entities=3, + max_num_iterations=3, + max_num_triplets=50 +) + +path_retriever = PathRetriever( + graph_traversal=graph_traversal, + path_verbalizer=path_verbalizer +) + +graph_query_executor = GraphQueryRetriever( + graph_store=graph_store, + block_graph_modification=True +) + +# Step 5: Set up KG linker +kg_linker = KGLinker( + llm_generator=llm_generator, + graph_store=graph_store, + max_input_tokens=32000 +) + +# Step 6: Create query engine +query_engine = ByoKGQueryEngine( + graph_store=graph_store, + entity_linker=entity_linker, + triplet_retriever=triplet_retriever, + path_retriever=path_retriever, + graph_query_executor=graph_query_executor, + llm_generator=llm_generator, + kg_linker=kg_linker, + direct_query_linking=False +) + +# Step 7: Execute query +context = query_engine.query( + query="Who won the Nobel Prize in Physics in 1921?", + iterations=2, + user_input="" +) + +print("Retrieved context:") +for item in context: + print(f" - {item}") +``` + +This example demonstrates explicit configuration of all components. In practice, you can rely on defaults for most parameters and only customize what you need. diff --git a/docs-site/src/content/docs/byokg-rag/faq.mdx b/docs-site/src/content/docs/byokg-rag/faq.mdx new file mode 100644 index 00000000..32452d3e --- /dev/null +++ b/docs-site/src/content/docs/byokg-rag/faq.mdx @@ -0,0 +1,399 @@ +--- +title: FAQ +--- + +## Overview + +This document answers common questions about the byokg-rag library and provides guidance on troubleshooting, optimization, and best practices. + +## Common Questions + +### Which graph store should I choose? + +Choose your graph store based on deployment requirements and scale: + +**Amazon Neptune Analytics** is best for: +- Production workloads requiring fast analytical queries +- Applications needing native vector search for entity linking +- Serverless deployments without infrastructure management +- Integration with AWS analytics services + +**Amazon Neptune Database** is best for: +- Transactional workloads requiring ACID guarantees +- Applications needing high availability with automatic failover +- Workloads requiring read replicas for scaling +- Mixed transactional and analytical queries + +**Local Graph Store** is best for: +- Development and prototyping +- Testing with small datasets (< 10,000 nodes) +- Learning and experimentation +- Environments without AWS access + +TIP: Start with the local graph store for development, then migrate to Neptune Analytics for production deployments. + +### How do I optimize query performance? + +Optimize performance through these strategies: + +**1. Adjust iteration counts** + +Reduce `iterations` and `cypher_iterations` parameters to minimize LLM calls: + +```python +context = query_engine.query( + query="Your question", + iterations=1, # Reduce from default of 2 + cypher_iterations=1 +) +``` + +**2. Limit retriever parameters** + +Reduce the scope of graph exploration: + +```python +triplet_retriever = AgenticRetriever( + llm_generator=llm_generator, + graph_traversal=graph_traversal, + graph_verbalizer=triplet_verbalizer, + max_num_relations=3, # Reduce from default of 5 + max_num_entities=2, # Reduce from default of 3 + max_num_iterations=2, # Reduce from default of 3 + max_num_triplets=30 # Reduce from default of 50 +) +``` + +**3. Use appropriate indexes** + +Choose the fastest index type for your use case: +- Fuzzy string index: Fastest, no external dependencies +- Dense index: Slower but better semantic matching +- Graph-store index: Integrated with Neptune Analytics + +**4. Enable direct query linking** + +Skip LLM-based entity extraction for simple queries: + +```python +query_engine = ByoKGQueryEngine( + graph_store=graph_store, + direct_query_linking=True # Use semantic similarity directly +) +``` + +**5. Optimize LLM configuration** + +Use faster models or reduce token limits: + +```python +llm_generator = BedrockGenerator( + model_name="anthropic.claude-3-haiku-20240307-v1:0", # Faster model + max_tokens=2048 # Reduce from default of 4096 +) +``` + +### What LLM models are supported? + +The byokg-rag library supports Amazon Bedrock models through the `BedrockGenerator` class. Recommended models: + +**Claude 3.5 Sonnet** (Recommended) +- Model ID: `anthropic.claude-3-5-sonnet-20240620-v1:0` +- Best balance of performance and cost +- Strong reasoning capabilities for KGQA + +**Claude 3.7 Sonnet** (Latest) +- Model ID: `anthropic.claude-3-7-sonnet-20250219-v1:0` +- Latest capabilities and improvements +- Higher cost than 3.5 Sonnet + +**Claude 3 Opus** +- Model ID: `anthropic.claude-3-opus-20240229-v1:0` +- Highest capability for complex reasoning +- Highest cost and latency + +**Claude 3 Haiku** +- Model ID: `anthropic.claude-3-haiku-20240307-v1:0` +- Fastest and lowest cost +- Suitable for simple queries + +To use a different model: + +```python +llm_generator = BedrockGenerator( + model_name="anthropic.claude-3-haiku-20240307-v1:0", + region_name="" +) +``` + +NOTE: Ensure the model is available in your AWS region. Check the [Bedrock model availability](https://docs.aws.amazon.com/bedrock/latest/userguide/models-regions.html) documentation. + +### How do I handle authentication errors? + +Authentication errors typically indicate IAM permission issues. Follow these steps: + +**1. Verify AWS credentials** + +Ensure your environment has valid AWS credentials: + +```bash +aws sts get-caller-identity +``` + +**2. Check IAM permissions** + +Verify your IAM role or user has the required permissions: + +- `bedrock:InvokeModel` for LLM access +- `neptune-graph:ReadDataViaQuery` for Neptune Analytics +- `neptune-db:ReadDataViaQuery` for Neptune Database +- `s3:GetObject` and `s3:PutObject` for S3 operations + +**3. Verify resource access** + +Ensure your credentials can access the specific resources: + +```python +import boto3 + +# Test Neptune Analytics access +client = boto3.client('neptune-graph', region_name='') +response = client.get_graph(graphIdentifier='') +print(response) + +# Test Bedrock access +client = boto3.client('bedrock-runtime', region_name='') +# This will fail if you don't have access +``` + +**4. Check network connectivity** + +For Neptune Database, ensure your application runs in the correct VPC with appropriate security groups. + +### Can I use byokg-rag with my existing knowledge graph? + +Yes, byokg-rag works with existing knowledge graphs. Requirements: + +**Graph Structure** +- Property graph model (nodes and edges with properties) +- Compatible with openCypher query language (for Neptune) + +**Data Loading** + +For Neptune Analytics: +```python +graph_store = NeptuneAnalyticsGraphStore( + graph_identifier="", + region="" +) +``` + +For Neptune Database: +```python +graph_store = NeptuneDBGraphStore( + endpoint_url="https://:8182", + region="" +) +``` + +For local development: +```python +graph_store = LocalKGStore() +graph_store.read_from_csv( + nodes_file="your_nodes.csv", + edges_file="your_edges.csv" +) +``` + +**Schema Requirements** + +The graph store must provide schema information. Neptune Analytics and Neptune Database automatically expose schema. For custom graph stores, implement the `get_schema()` method. + +### How many iterations should I configure? + +The optimal iteration count depends on query complexity: + +**Simple queries (1 iteration)** +- Direct fact lookup: "What is the capital of France?" +- Single-hop relationships: "Who directed Inception?" + +**Moderate queries (2 iterations, default)** +- Two-hop reasoning: "What movies did the director of Inception also direct?" +- Multiple entity queries: "Which actors appeared in both Inception and Interstellar?" + +**Complex queries (3-5 iterations)** +- Multi-hop reasoning: "What awards did actors from Christopher Nolan films win?" +- Aggregation queries: "How many Nobel Prize winners worked at the same institution?" + +**Trade-offs:** +- More iterations: Better coverage, higher latency, higher cost +- Fewer iterations: Faster responses, lower cost, may miss relevant information + +Start with the default (2 iterations) and adjust based on your query complexity and performance requirements. + +### What's the difference between KGLinker and CypherKGLinker? + +**KGLinker** (Multi-Strategy Retrieval) +- Uses multiple retrieval strategies: agentic, path-based, query-based +- Extracts entities from natural language using LLM +- Combines results from different retrieval approaches +- Best for: Complex queries requiring diverse retrieval strategies + +**CypherKGLinker** (Cypher-Focused Retrieval) +- Specializes in generating and executing openCypher queries +- Iteratively refines queries based on results +- Focuses on structured query generation +- Best for: Queries that map well to graph patterns + +**Usage:** + +Multi-strategy retrieval: +```python +kg_linker = KGLinker( + llm_generator=llm_generator, + graph_store=graph_store +) + +query_engine = ByoKGQueryEngine( + graph_store=graph_store, + kg_linker=kg_linker +) +``` + +Cypher-focused retrieval: +```python +cypher_linker = CypherKGLinker( + llm_generator=llm_generator, + graph_store=graph_store +) + +query_engine = ByoKGQueryEngine( + graph_store=graph_store, + cypher_kg_linker=cypher_linker +) +``` + +Combined approach: +```python +query_engine = ByoKGQueryEngine( + graph_store=graph_store, + kg_linker=kg_linker, + cypher_kg_linker=cypher_linker # Tries Cypher first, falls back to multi-strategy +) +``` + +## Known Limitations + +### Retrieval Strategy Limitations + +**Agentic Retrieval** +- Requires multiple LLM calls, increasing latency and cost +- May explore irrelevant paths in very large graphs +- Performance depends on LLM reasoning capabilities + +**Scoring-Based Retrieval** +- Requires semantic similarity computation for all candidate triplets +- May be slow for graphs with high-degree nodes (many edges per node) +- Effectiveness depends on embedding quality + +**Path-Based Retrieval** +- Requires explicit metapath specification from LLM +- May miss relevant paths not matching specified patterns +- Performance degrades with very long paths (> 5 hops) + +**Query-Based Retrieval** +- Requires LLM to generate syntactically correct queries +- May fail on complex graph schemas with many node/edge types +- Query generation quality varies by LLM model + +### Graph Store Limitations + +**Neptune Analytics** +- Vector search requires embeddings to be loaded as node properties +- Very complex queries may timeout (default: 60 seconds) +- Regional availability varies (check AWS documentation) +- Bulk loading requires S3 staging + +**Neptune Database** +- VPC-only access (no public endpoints) +- Schema refresh requires recreating graph store instance +- Concurrent query limits depend on instance size +- Read replicas needed for high query concurrency + +**Local Graph Store** +- In-memory only, limited by available RAM +- No persistence across restarts +- No support for complex query languages +- Single-process access only + +### Performance Considerations + +**Large Graphs (> 1M nodes)** +- Entity linking may be slow without proper indexing +- Consider using graph-store indexes for Neptune Analytics +- Limit exploration depth to avoid excessive traversal + +**High Query Volume** +- LLM rate limits may throttle requests +- Consider caching frequently asked questions +- Use read replicas for Neptune Database + +**Long-Running Queries** +- Queries with many iterations may timeout +- Reduce iteration counts or exploration parameters +- Consider breaking complex queries into simpler sub-queries + +## Troubleshooting + +### Query returns empty results + +**Possible causes:** +1. Entity linking failed to find relevant entities +2. Graph schema doesn't match query expectations +3. Insufficient iterations for multi-hop reasoning + +**Solutions:** +- Enable debug logging to see entity linking results +- Verify graph schema matches your domain +- Increase iteration count for complex queries +- Try direct query linking: `direct_query_linking=True` + +### LLM timeout errors + +**Possible causes:** +1. Input exceeds token limits +2. Network connectivity issues +3. Bedrock service throttling + +**Solutions:** +- Reduce `max_input_tokens` parameter +- Reduce graph context size by limiting retrievers +- Implement exponential backoff retry logic +- Check AWS service health dashboard + +### High latency + +**Possible causes:** +1. Too many LLM calls (high iteration counts) +2. Large graph traversals +3. Slow entity linking + +**Solutions:** +- Reduce iteration counts +- Limit retriever parameters (max_num_relations, max_num_entities) +- Use faster index types (fuzzy string vs. dense) +- Use faster LLM models (Claude Haiku) + +### Memory errors with local graph store + +**Possible causes:** +1. Graph too large for available RAM +2. Too many triplets retained in context + +**Solutions:** +- Use Neptune Analytics or Neptune Database instead +- Reduce `max_num_triplets` parameter +- Filter graph data to relevant subset +- Increase available memory + +For additional support, refer to the [example notebooks](../../examples/byokg-rag/) or consult the AWS documentation for Neptune and Bedrock services. diff --git a/docs-site/src/content/docs/byokg-rag/graph-retrievers.mdx b/docs-site/src/content/docs/byokg-rag/graph-retrievers.mdx new file mode 100644 index 00000000..7c9a93a6 --- /dev/null +++ b/docs-site/src/content/docs/byokg-rag/graph-retrievers.mdx @@ -0,0 +1,246 @@ +--- +title: Graph Retrievers +--- + +Graph retrievers implement the various retrieval strategies used by BYOKG-RAG to find relevant information from knowledge graphs. Each retriever specializes in a different approach to graph exploration and information extraction. + +### Topics + + - [Overview](#overview) + - [Entity Linker](#entity-linker) + - [Agentic Retriever](#agentic-retriever) + - [Graph Scoring Retriever](#graph-scoring-retriever) + - [Path Retriever](#path-retriever) + - [Graph Query Retriever](#graph-query-retriever) + - [Usage examples](#usage-examples) + +### Overview + +The graph retrievers component provides multiple strategies for retrieving relevant information from knowledge graphs. Each retriever implements the abstract `GRetriever` interface and specializes in different aspects of graph exploration: + +- **Entity Linker** - Links query entities to graph nodes using various matching strategies +- **Agentic Retriever** - Uses LLM-guided iterative exploration for dynamic graph traversal +- **Graph Scoring Retriever** - Applies scoring and ranking to multi-hop graph traversal +- **Path Retriever** - Specializes in finding and verbalizing paths between entities +- **Graph Query Retriever** - Executes structured graph queries and verbalizes results + +### Entity Linker + +The Entity Linker performs two-step linking to connect natural language entities with graph nodes. + +#### Architecture + +The `EntityLinker` class extends the abstract `Linker` base class and provides entity matching capabilities: + +```python +from graphrag_toolkit.byokg_rag.graph_retrievers import EntityLinker + +entity_linker = EntityLinker( + retriever=your_entity_matcher, + topk=3 +) +``` + +#### Key methods + +**`link(query_extracted_entities, retriever, topk, id_selector, return_dict)`** + +Links extracted entities to graph nodes using the configured retriever. + +Parameters: +- `query_extracted_entities` (List[str]): List of entity lists to perform linking on +- `retriever` (object, optional): Entity retriever to use for lookup +- `topk` (int, optional): Number of items to return per entity +- `id_selector` (list, optional): Allowlist of entity IDs to consider +- `return_dict` (bool): Whether to return detailed results or just entity IDs + +Returns: +- If `return_dict=True`: List of dictionaries with detailed linking results +- If `return_dict=False`: List of matched entity IDs + +### Agentic Retriever + +The Agentic Retriever implements an iterative exploration strategy using LLM-guided decision making to dynamically explore the knowledge graph. + +#### Architecture + +```python +from graphrag_toolkit.byokg_rag.graph_retrievers import AgenticRetriever + +agentic_retriever = AgenticRetriever( + llm_generator=your_llm, + graph_traversal=your_traversal_component, + graph_verbalizer=your_verbalizer, + pruning_reranker=your_reranker, + max_num_relations=5, + max_num_entities=3, + max_num_iterations=3, + max_num_triplets=50 +) +``` + +#### Retrieval process + +1. **Start with source nodes** - Begin exploration from provided starting points +2. **Iterative exploration** - Use LLM to select relevant relations and entities +3. **Pruning and reranking** - Apply scoring to filter and rank results +4. **Context building** - Accumulate verbalized triplets as context +5. **Early termination** - Stop when LLM determines sufficient information is found + +#### Key methods + +**`retrieve(query, source_nodes, history_context)`** + +Performs iterative graph exploration guided by LLM decisions. + +**`relation_search_prune(query, entities, max_num_relations)`** + +Searches and prunes relations based on relevance to the query. + +### Graph Scoring Retriever + +The Graph Scoring Retriever uses multi-hop traversal combined with scoring and reranking to efficiently retrieve relevant information. + +#### Architecture + +```python +from graphrag_toolkit.byokg_rag.graph_retrievers import GraphScoringRetriever + +scoring_retriever = GraphScoringRetriever( + graph_traversal=your_traversal_component, + graph_verbalizer=your_verbalizer, + graph_reranker=your_reranker, + pruning_reranker=your_pruning_reranker +) +``` + +#### Key methods + +**`retrieve(query, source_nodes, hops, topk, max_num_relations, max_num_triplets)`** + +Retrieves information using multi-hop traversal with pruning and reranking. + +Parameters: +- `query` (str): The search query +- `source_nodes` (list): Starting nodes for traversal +- `hops` (int): Number of hops to traverse (default: 2) +- `topk` (int): Maximum results to return +- `max_num_relations` (int): Maximum relations after pruning +- `max_num_triplets` (int): Maximum triplets after pruning + +### Path Retriever + +The Path Retriever specializes in finding and verbalizing paths in the knowledge graph, supporting both metapath-based traversal and shortest path finding. + +#### Architecture + +```python +from graphrag_toolkit.byokg_rag.graph_retrievers import PathRetriever + +path_retriever = PathRetriever( + graph_traversal=your_traversal_component, + path_verbalizer=your_path_verbalizer +) +``` + +#### Key methods + +**`follow_paths(source_nodes, metapaths)`** + +Follows predefined metapaths from source nodes. + +**`shortest_paths(source_nodes, target_nodes)`** + +Finds shortest paths between source and target nodes. + +**`retrieve(source_nodes, metapaths, target_nodes)`** + +Combines metapath traversal and shortest path finding. + +### Graph Query Retriever + +The Graph Query Retriever executes structured graph queries and verbalizes the results. + +#### Architecture + +```python +from graphrag_toolkit.byokg_rag.graph_retrievers import GraphQueryRetriever + +query_retriever = GraphQueryRetriever( + graph_store=your_graph_store +) +``` + +#### Key methods + +**`retrieve(graph_query, return_answers)`** + +Executes a graph query and returns verbalized results. + +Parameters: +- `graph_query` (str): The graph query to execute +- `return_answers` (bool): Whether to return answers along with results + +Returns: +- If `return_answers=True`: Tuple of (verbalized results, raw answers) +- If `return_answers=False`: List of verbalized results + +### Usage examples + +#### Entity linking example + +```python +# Initialize entity linker +entity_linker = EntityLinker(retriever=entity_matcher, topk=5) + +# Link extracted entities +extracted_entities = [["aspirin", "headache"], ["drug", "treatment"]] +linking_results = entity_linker.link( + query_extracted_entities=extracted_entities, + return_dict=True +) + +# Access linking results +for result in linking_results: + hits = result['hits'] + for hit in hits: + entity_ids = hit['document_id'] + entities = hit['document'] + scores = hit['match_score'] +``` + +#### Agentic retrieval example + +```python +# Initialize agentic retriever +agentic_retriever = AgenticRetriever( + llm_generator=llm, + graph_traversal=traversal, + graph_verbalizer=verbalizer, + max_num_iterations=3 +) + +# Perform iterative exploration +query = "What are the side effects of aspirin?" +source_nodes = ["aspirin_node_id"] +retrieved_triplets = agentic_retriever.retrieve( + query=query, + source_nodes=source_nodes +) +``` + +#### Multi-strategy combination + +```python +# Combine multiple retrievers for comprehensive coverage +retrievers = { + 'agentic': AgenticRetriever(...), + 'scoring': GraphScoringRetriever(...), + 'path': PathRetriever(...), + 'query': GraphQueryRetriever(...) +} + +# Use different retrievers based on query type +def retrieve_with_strategy(query, source_nodes, strategy='agentic'): + retriever = retrievers[strategy] + return retriever.retrieve(query, source_nodes) diff --git a/docs-site/src/content/docs/byokg-rag/indexing.mdx b/docs-site/src/content/docs/byokg-rag/indexing.mdx new file mode 100644 index 00000000..d08d1503 --- /dev/null +++ b/docs-site/src/content/docs/byokg-rag/indexing.mdx @@ -0,0 +1,248 @@ +--- +title: Indexing +--- + +## Overview + +Indexing in byokg-rag enables efficient entity linking by mapping natural language mentions to knowledge graph nodes. The system supports three complementary index types that work together to match entities from user queries to graph entities with varying degrees of precision and semantic understanding. + +Entity linking is a critical step in Knowledge Graph Question Answering (KGQA). When a user asks a question, the system must identify which entities in the knowledge graph are relevant. Indexes provide fast lookup mechanisms to find candidate entities based on string similarity, semantic meaning, or direct graph storage. + +This document covers: + +- Dense indexes for semantic similarity matching +- Fuzzy string indexes for approximate string matching +- Graph-store indexes for embedding-based retrieval directly from Neptune Analytics +- Guidance on selecting the appropriate index for your use case + +## Dense Index + +### Purpose + +Dense indexes use embeddings to find entities based on semantic similarity rather than exact string matches. This approach captures meaning and context, allowing the system to link entities even when the query uses different wording than the entity labels in the graph. + +### Architecture + +The dense index stores vector embeddings of entity labels and uses similarity search to find the closest matches to a query embedding. The system supports local FAISS-based indexes for development and testing. + +**LocalFaissDenseIndex** provides an in-memory vector index using FAISS (Facebook AI Similarity Search). It computes embeddings for entity labels and stores them in a FAISS index structure that enables fast approximate nearest neighbor search. + +### AWS Services + +Dense indexes require an embedding model to generate vector representations. The system integrates with: + +- **Amazon Bedrock** - Provides access to foundation models for generating embeddings + +### IAM Permissions + +To use dense indexes with Amazon Bedrock embeddings, you need the following IAM permissions: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "bedrock:InvokeModel" + ], + "Resource": "arn:aws:bedrock:::foundation-model/*" + } + ] +} +``` + +NOTE: Replace `` with your AWS region (e.g., us-west-2). + +### Configuration + +Configure a local FAISS dense index: + +```python +from graphrag_toolkit.byokg_rag.indexing import LocalFaissDenseIndex, LangChainEmbedding +from langchain_aws import BedrockEmbeddings + +# Set up embedding model +bedrock_embeddings = BedrockEmbeddings( + model_id="amazon.titan-embed-text-v2:0", + region_name="" +) +embedding = LangChainEmbedding(bedrock_embeddings) + +# Create dense index +dense_index = LocalFaissDenseIndex( + embedding=embedding, + distance_type="l2", # Options: "l2", "cosine" + embedding_dim=1024 # Must match embedding model dimension +) + +# Add entities to index +entities = ["Albert Einstein", "Marie Curie", "Isaac Newton"] +dense_index.add(entities) + +# Query the index +results = dense_index.query("physicist who developed relativity", topk=3) +``` + +**Parameters:** + +- `embedding` - Embedding instance that generates vector representations +- `distance_type` - Distance metric for similarity ("l2" or "cosine") +- `embedding_dim` - Dimension of embedding vectors (must match model output) + +## Fuzzy String Index + +### Purpose + +Fuzzy string indexes handle variations in entity names through approximate string matching. This approach is effective for typos, abbreviations, and minor spelling differences without requiring embeddings or semantic understanding. + +### Architecture + +The fuzzy string index uses the `thefuzz` library to compute string similarity scores between query text and entity labels. It supports configurable matching thresholds and can filter candidates based on string length differences. + +**FuzzyStringIndex** provides fast approximate string matching using Levenshtein distance and other string similarity algorithms. It maintains an in-memory mapping of entity labels and returns matches ranked by similarity score. + +### Configuration + +Configure a fuzzy string index: + +```python +from graphrag_toolkit.byokg_rag.indexing import FuzzyStringIndex + +# Create fuzzy string index +fuzzy_index = FuzzyStringIndex() + +# Add entities to index +entities = ["Albert Einstein", "Marie Curie", "Isaac Newton"] +fuzzy_index.add(entities) + +# Query with fuzzy matching +results = fuzzy_index.match( + inputs=["Albert Einstien", "Mary Curie"], # Note: typos + topk=1, + max_len_difference=4 +) +``` + +**Parameters:** + +- `topk` - Number of top matches to return per query +- `max_len_difference` - Maximum allowed length difference between query and candidate +- `id_selector` - Optional function to filter candidates before matching + +TIP: Fuzzy string matching works best for entity names with consistent structure. For highly variable entity descriptions, consider using dense indexes instead. + +## Graph Store Index + +### Purpose + +Graph-store indexes store embeddings directly in the graph database, eliminating the need for separate index infrastructure. This approach is available for Amazon Neptune Analytics, which supports vector storage and similarity search natively. + +### Architecture + +**NeptuneAnalyticsGraphStoreIndex** stores entity embeddings as node properties in Neptune Analytics and uses the graph database's built-in vector search capabilities. This provides a unified storage layer for both graph structure and semantic embeddings. + +### AWS Services + +Graph-store indexes require: + +- **Amazon Neptune Analytics** - Graph database with native vector search support +- **Amazon Bedrock** - Embedding model for generating vectors +- **Amazon S3** - Storage for embedding data during bulk loading + +### IAM Permissions + +To use graph-store indexes with Neptune Analytics, you need: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "neptune-graph:ReadDataViaQuery", + "neptune-graph:GetGraph" + ], + "Resource": "arn:aws:neptune-graph:::graph/" + }, + { + "Effect": "Allow", + "Action": [ + "bedrock:InvokeModel" + ], + "Resource": "arn:aws:bedrock:::foundation-model/*" + }, + { + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject" + ], + "Resource": "arn:aws:s3:::/*" + } + ] +} +``` + +NOTE: Replace ``, ``, ``, and `` with your specific values. + +### Configuration + +Configure a Neptune Analytics graph-store index: + +```python +from graphrag_toolkit.byokg_rag.graphstore import NeptuneAnalyticsGraphStore +from graphrag_toolkit.byokg_rag.indexing import NeptuneAnalyticsGraphStoreIndex, LangChainEmbedding +from langchain_aws import BedrockEmbeddings + +# Set up graph store +graph_store = NeptuneAnalyticsGraphStore( + graph_identifier="", + region="" +) + +# Set up embedding model +bedrock_embeddings = BedrockEmbeddings( + model_id="amazon.titan-embed-text-v2:0", + region_name="" +) +embedding = LangChainEmbedding(bedrock_embeddings) + +# Create graph-store index +graph_index = NeptuneAnalyticsGraphStoreIndex( + graphstore=graph_store, + embedding=embedding, + distance_type="l2", + embedding_s3_save_path="s3:///embeddings/" +) + +# Query the index +results = graph_index.query("physicist who developed relativity", topk=3) +``` + +**Parameters:** + +- `graphstore` - NeptuneAnalyticsGraphStore instance +- `embedding` - Embedding instance for generating vectors +- `distance_type` - Distance metric for similarity ("l2" or "cosine") +- `embedding_s3_save_path` - S3 path for storing embeddings during bulk operations + +## Index Selection Guide + +Choose the appropriate index type based on your requirements: + +| Index Type | Best For | Pros | Cons | +|------------|----------|------|------| +| Dense Index | Semantic matching, paraphrases, synonyms | Captures meaning, handles varied wording | Requires embedding model, higher latency | +| Fuzzy String Index | Typos, abbreviations, exact name variations | Fast, no external dependencies | Limited to string similarity, no semantic understanding | +| Graph Store Index | Neptune Analytics deployments, unified storage | No separate index infrastructure, integrated with graph | Requires Neptune Analytics, S3 for bulk loading | + +**Recommendations:** + +- Use **fuzzy string index** as the default for most applications. It provides good performance with minimal setup. +- Add **dense index** when queries use varied terminology or when entity labels are inconsistent. +- Use **graph-store index** when deploying on Neptune Analytics to simplify infrastructure. +- Combine multiple indexes for comprehensive coverage. The entity linker can use multiple indexes in sequence. + +TIP: Start with fuzzy string matching and add semantic indexes only if you observe poor entity linking performance in testing. diff --git a/docs-site/src/content/docs/byokg-rag/multi-strategy-retrieval.mdx b/docs-site/src/content/docs/byokg-rag/multi-strategy-retrieval.mdx new file mode 100644 index 00000000..87c01475 --- /dev/null +++ b/docs-site/src/content/docs/byokg-rag/multi-strategy-retrieval.mdx @@ -0,0 +1,235 @@ +--- +title: Multi-Strategy Retrieval +--- + +BYOKG-RAG implements a multi-strategy retrieval approach that combines different methods for extracting relevant information from knowledge graphs. The system uses iterative processing with LLM-guided entity extraction, path discovery, and query generation to provide comprehensive question answering. + +### Topics + + - [Overview](#overview) + - [Retrieval strategies](#retrieval-strategies) + - [Iterative processing](#iterative-processing) + - [Context management](#context-management) + - [Task completion](#task-completion) + - [Configuration](#configuration) + +### Overview + +The multi-strategy retrieval system in BYOKG-RAG operates through the `ByoKGQueryEngine` using the `KGLinker` component. Unlike traditional single-pass retrieval systems, it employs an iterative approach that builds context progressively through multiple retrieval strategies: + +1. **Entity Extraction and Linking** - Identifies and links entities from natural language to graph nodes +2. **Agentic Triplet Retrieval** - Uses LLM-guided exploration to find relevant triplets +3. **Path-based Retrieval** - Follows metapaths between entities to discover relationships +4. **Query-based Retrieval** - Executes structured graph queries (Cypher, SPARQL) + +### Retrieval strategies + +#### Entity extraction and linking + +The system begins by extracting entities from the natural language question using the KGLinker: + +```python +# Extract entities using LLM +artifacts = kg_linker.parse_response(response) +if "entity-extraction" in artifacts: + linked_entities = entity_linker.link(artifacts["entity-extraction"], return_dict=False) +``` + +**Process:** +1. LLM extracts entities from the question +2. EntityLinker matches extracted entities to graph nodes using fuzzy string matching +3. Linked entities serve as starting points for graph traversal + +**Entity linking strategies:** +- **Fuzzy string matching** - Default approach using string similarity +- **Semantic similarity** - Optional direct query linking using embeddings +- **Exact matching** - Direct node ID or label matching + +#### Agentic triplet retrieval + +Uses the `AgenticRetriever` to perform LLM-guided exploration of the knowledge graph: + +```python +if source_entities and triplet_retriever: + triplet_context = triplet_retriever.retrieve(query, source_entities) + self._add_to_context(retrieved_context, triplet_context) +``` + +**Characteristics:** +- **Iterative exploration** - Makes decisions at each step based on current context +- **Relevance-guided** - Uses LLM to select most relevant relations and entities +- **Context-aware** - Builds upon previously retrieved information +- **Early termination** - Stops when sufficient information is found + +#### Path-based retrieval + +Extracts and follows metapaths between entities to discover multi-hop relationships: + +```python +if "path-extraction" in artifacts and explored_entities and path_retriever: + metapaths = [[component.strip() for component in path.split("->")] + for path in artifacts["path-extraction"]] + path_context = path_retriever.retrieve(list(explored_entities), metapaths, linked_answers) +``` + +**Features:** +- **Metapath extraction** - LLM identifies relevant path patterns +- **Structured traversal** - Follows predefined relationship sequences +- **Multi-hop reasoning** - Connects entities through intermediate nodes +- **Path verbalization** - Converts graph paths to natural language + +#### Query-based retrieval + +Executes structured graph queries generated by the LLM: + +```python +for query_type in ["opencypher", "opencypher-neptune-rdf", "opencypher-neptune"]: + if query_type in artifacts and graph_query_executor: + graph_query = " ".join(artifacts[query_type]) + context = graph_query_executor.retrieve(graph_query, return_answers=False) +``` + +**Supported query types:** +- **OpenCypher** - Standard Cypher queries for property graphs +- **OpenCypher Neptune RDF** - Neptune-specific RDF queries +- **OpenCypher Neptune** - Neptune-optimized Cypher queries + +### Iterative processing + +The multi-strategy retrieval operates through iterative refinement: + +#### Iteration flow + +```python +for iteration in range(iterations): + # Use different prompts for different iterations + if iteration == 0: + task_prompts = self.kg_linker_prompts + else: + task_prompts = self.kg_linker_prompts_iterative + + # Generate response with accumulated context + response = self.kg_linker.generate_response( + question=query, + schema=self.schema, + graph_context="\n".join(retrieved_context), + task_prompts=task_prompts + ) +``` + +#### Progressive context building + +Each iteration builds upon the previous context: + +1. **First iteration** - Uses standard task prompts with no prior context +2. **Subsequent iterations** - Uses iterative prompts with accumulated context +3. **Context accumulation** - New information is added to existing context +4. **Deduplication** - Prevents redundant information from being added + +#### Adaptive prompting + +The system uses different prompt strategies: + +- **Initial prompts** (`kg_linker_prompts`) - Designed for fresh question analysis +- **Iterative prompts** (`kg_linker_prompts_iterative`) - Optimized for context-aware refinement + +### Context management + +#### Context aggregation + +The system maintains context through the `_add_to_context` method: + +```python +def _add_to_context(self, context_list: List[str], new_items: List[str]) -> None: + """Add new items to context list while maintaining order and avoiding duplicates.""" + seen = set(context_list) + for item in new_items: + if item not in seen: + context_list.append(item) + seen.add(item) +``` + +**Features:** +- **Deduplication** - Prevents redundant information +- **Order preservation** - Maintains chronological order of discovery +- **Incremental building** - Adds new information progressively + +#### Entity tracking + +The system tracks explored entities across iterations: + +```python +explored_entities: Set[str] = set() +# Update with newly linked entities +explored_entities.update(linked_entities) +``` + +This prevents redundant exploration and enables progressive discovery. + +### Task completion + +#### Completion detection + +The system monitors for completion signals in LLM responses: + +```python +task_completion = parse_response(response, r"(.*?)") +if "FINISH" in " ".join(task_completion): + break +``` + +#### Early termination + +The retrieval process can terminate early when: +- LLM indicates task completion with "FINISH" signal +- Sufficient information has been gathered +- Maximum iterations reached + +#### Completion strategies + +- **Explicit completion** - LLM explicitly signals completion +- **Implicit completion** - No new entities or information found +- **Iteration limit** - Maximum iteration count reached + +### Configuration + +#### Iteration control + +```python +# Configure iteration counts +context = query_engine.query( + question="Your question here", + iterations=3 # Number of multi-strategy iterations +) +``` + +#### Direct query linking + +Enable semantic similarity-based entity linking: + +```python +query_engine = ByoKGQueryEngine( + graph_store=graph_store, + direct_query_linking=True # Enable semantic entity linking +) +``` + +#### Custom component configuration + +```python +# Custom triplet retriever configuration +from graph_retrievers import AgenticRetriever + +custom_triplet_retriever = AgenticRetriever( + llm_generator=llm, + graph_traversal=traversal, + graph_verbalizer=verbalizer, + max_num_iterations=4, + max_num_relations=10 +) + +query_engine = ByoKGQueryEngine( + graph_store=graph_store, + triplet_retriever=custom_triplet_retriever +) +``` diff --git a/docs-site/src/content/docs/byokg-rag/overview.mdx b/docs-site/src/content/docs/byokg-rag/overview.mdx new file mode 100644 index 00000000..9854e817 --- /dev/null +++ b/docs-site/src/content/docs/byokg-rag/overview.mdx @@ -0,0 +1,95 @@ +--- +title: Overview +--- + +The graphrag-toolkit [byokg-rag](https://github.com/awslabs/graphrag-toolkit/blob/main/byokg-rag/) library provides a framework for Knowledge Graph Question Answering (KGQA) that combines Large Language Models (LLMs) with existing knowledge graphs. The system allows applications to bring their own knowledge graph and perform complex question answering using multiple retrieval strategies. + + - [Graph stores and model providers](#graph-stores-and-model-providers) + - [Multi-strategy retrieval](#multi-strategy-retrieval) + - [Agentic retrieval](#agentic-retrieval) + - [Scoring-based retrieval](#scoring-based-retrieval) + - [Path-based retrieval](#path-based-retrieval) + - [Query-based retrieval](#query-based-retrieval) + - [System components](#system-components) + - [Query processing](#query-processing) + - [Getting started](#getting-started) + +### Graph stores and model providers + +The byokg-rag library depends on two backend systems: a _graph store_ and a _foundation model provider_. The graph store manages the knowledge graph data structure and provides interfaces for graph traversal and querying. The foundation model provider hosts the Large Language Models (LLMs) used for question understanding, entity linking, and answer generation. + +The library supports Amazon Neptune graph databases that provide schema information and query execution capabilities. The default LLM provider is Amazon Bedrock with Claude 3.5 Sonnet, though it can be extended to support other LLM providers. + +### Multi-strategy retrieval + +The byokg-rag library implements a multi-strategy approach to information retrieval from knowledge graphs. Unlike traditional single-strategy approaches, it combines four complementary retrieval methods to provide comprehensive coverage of relevant information. + +#### Agentic retrieval + +Agentic retrieval uses LLM-powered agents to dynamically explore the knowledge graph based on the question context. The agents make decisions about which graph paths to follow, adapting their exploration strategy based on intermediate findings. This approach is particularly effective for complex, multi-step reasoning tasks where the optimal retrieval path cannot be predetermined. + +#### Scoring-based retrieval + +Scoring-based retrieval assigns relevance scores to graph triplets based on their relationship to the user question. The system uses scoring functions relying on semantic similarity. Triplets (edges) are ranked by their scores, and the top-k results are selected for answer generation. + +#### Path-based retrieval + +Path-based retrieval focuses on multi-hop reasoning by following structured paths through the knowledge graph. The system identifies relevant metapath patterns and traverses the graph following these patterns to connect entities through intermediate nodes. This approach is effective for questions that require understanding complex relationships and dependencies between entities. + +#### Query-based retrieval + +Query-based retrieval translates natural language questions into structured graph queries (e.g., Cypher, SPARQL) and executes them directly against the knowledge graph. This approach provides precise, efficient access to specific information when the question can be mapped to well-defined query patterns. + +### System components + +The byokg-rag framework consists of several key components: + +1. **Graph Store** ([src/graphrag_toolkit/byokg_rag/graphstore](https://github.com/awslabs/graphrag-toolkit/blob/main/byokg-rag/src/graphrag_toolkit/byokg_rag/graphstore)) - Manages the knowledge graph data structure and provides interfaces for graph traversal and querying. + +2. **Graph Connectors** ([src/graphrag_toolkit/byokg_rag/graph_connectors](https://github.com/awslabs/graphrag-toolkit/blob/main/byokg-rag/src/graphrag_toolkit/byokg_rag/graph_connectors)) - Links natural language queries to graph entities and paths using LLMs. Includes KGLinker for basic linking functionality and CypherKGLinker for Cypher-specific operations. + +3. **Graph Retrievers** ([src/graphrag_toolkit/byokg_rag/graph_retrievers](https://github.com/awslabs/graphrag-toolkit/blob/main/byokg-rag/src/graphrag_toolkit/byokg_rag/graph_retrievers)) - Implements the various retrieval strategies: + - **Entity Linker** - Matches entities from text to graph nodes using exact matching, fuzzy string matching, and semantic similarity + - **Triplet Retriever** - Retrieves relevant triplets from the graph and verbalizes them in natural language format + - **Path Retriever** - Finds paths between entities following metapath patterns for structured traversal + +4. **Query Engine** ([src/graphrag_toolkit/byokg_rag/byokg_query_engine.py](https://github.com/awslabs/graphrag-toolkit/blob/main/byokg-rag/src/graphrag_toolkit/byokg_rag/byokg_query_engine.py)) - Orchestrates all components to process natural language questions and generate answers based on retrieved information. + +### Query processing + +Query processing in byokg-rag follows an iterative pipeline through the `ByoKGQueryEngine`: + +1. **Initialization** - Set up context lists and entity tracking for the retrieval process +2. **Direct Query Linking** (optional) - Use semantic similarity to link the query directly to graph entities +3. **Cypher-based Retrieval** (if CypherKGLinker provided) - Generate and execute Cypher queries with iterative refinement +4. **Multi-Strategy Retrieval** (if KGLinker provided) - Use iterative LLM-guided retrieval: + - Extract entities from natural language using LLM + - Link extracted entities to graph nodes using fuzzy string matching + - Retrieve triplets using agentic exploration from source entities + - Follow metapaths extracted by LLM between entities + - Execute structured graph queries generated by LLM +5. **Context Management** - Combine results with deduplication and order preservation +6. **Task Completion** - Monitor for LLM completion signals or reach maximum iterations + +The system's performance has been evaluated across multiple knowledge graph benchmarks: + +| KGQA Hit (%) | Wiki-KG | Temp-KG | Med-KG | +|--------------|---------|---------|--------| +| Agent | 77.8 | 57.3 | 59.2 | +| BYOKG-RAG | 80.1 | 65.5 | 65.0 | + +### Getting started + +You can get started with byokg-rag by installing the package and running the demo notebook: + +```bash +pip install https://github.com/awslabs/graphrag-toolkit/archive/refs/tags/v3.15.5.zip#subdirectory=byokg-rag +``` + +The repository includes several [example notebooks](https://github.com/awslabs/graphrag-toolkit/blob/main/examples/byokg-rag/) that demonstrate how to use the library with different graph stores and datasets: + +- [Local Graph Demo](https://github.com/awslabs/graphrag-toolkit/blob/main/examples/byokg-rag/byokg_rag_demo_local_graph.ipynb) - Getting started with local graph databases +- [Neptune Analytics Demo](https://github.com/awslabs/graphrag-toolkit/blob/main/examples/byokg-rag/byokg_rag_neptune_analytics_demo.ipynb) - Using Amazon Neptune Analytics +- [Neptune Analytics with Cypher](https://github.com/awslabs/graphrag-toolkit/blob/main/examples/byokg-rag/byokg_rag_neptune_analytics_demo_cypher.ipynb) - Cypher-based retrieval with Neptune Analytics +- [Neptune Database Demo](https://github.com/awslabs/graphrag-toolkit/blob/main/examples/byokg-rag/byokg_rag_neptune_db_cluster_demo.ipynb) - Using Amazon Neptune Database clusters +- [Neptune Analytics Embeddings](https://github.com/awslabs/graphrag-toolkit/blob/main/examples/byokg-rag/byokg_rag_neptune_analytics_embeddings.ipynb) - Working with embeddings in Neptune Analytics diff --git a/docs-site/src/content/docs/byokg-rag/query-engine.mdx b/docs-site/src/content/docs/byokg-rag/query-engine.mdx new file mode 100644 index 00000000..1f3aa778 --- /dev/null +++ b/docs-site/src/content/docs/byokg-rag/query-engine.mdx @@ -0,0 +1,299 @@ +--- +title: Query Engine +--- + +The `ByoKGQueryEngine` is the central orchestrating component that coordinates graph connectors, retrievers, and LLMs to process natural language questions and generate answers from knowledge graphs. It handles the high-level flow of query processing while delegating LLM-specific tasks to the KG Linker. + +### Overview + +The `ByoKGQueryEngine` orchestrates the interaction between multiple components to answer questions over knowledge graphs. It supports two main modes of operation: + +1. **Cypher-based retrieval** - Uses CypherKGLinker for direct query generation and execution +2. **Multi-strategy retrieval** - Uses KGLinker with multiple retrieval strategies (agentic, path-based, query-based) + +The engine can operate with either mode independently or combine both approaches for comprehensive question answering. + +### Architecture + +The query engine integrates the following components: + +- **Graph Store** - Provides access to graph data and schema information +- **Entity Linker** - Links natural language entities to graph nodes +- **Triplet Retriever** - Retrieves relevant triplets using agentic exploration +- **Path Retriever** - Finds and verbalizes paths between entities +- **Graph Query Executor** - Executes structured graph queries +- **KG Linker** - Handles LLM-based entity extraction and query understanding +- **Cypher KG Linker** - Specialized for Cypher query generation (optional) + +### Initialization + +#### Basic initialization + +```python +from graphrag_toolkit.byokg_rag.byokg_query_engine import ByoKGQueryEngine + +# Minimal initialization (uses defaults) +query_engine = ByoKGQueryEngine(graph_store=your_graph_store) +``` + +#### Full initialization with custom components + +```python +query_engine = ByoKGQueryEngine( + graph_store=graph_store, + entity_linker=custom_entity_linker, + triplet_retriever=custom_triplet_retriever, + path_retriever=custom_path_retriever, + graph_query_executor=custom_query_executor, + llm_generator=custom_llm, + kg_linker=custom_kg_linker, + cypher_kg_linker=custom_cypher_linker, + direct_query_linking=False +) +``` + +#### Default component initialization + +When components are not provided, the engine initializes defaults: + +**Entity Linker**: Uses `FuzzyStringIndex` with all graph nodes +```python +from indexing import FuzzyStringIndex +from graph_retrievers import EntityLinker + +string_index = FuzzyStringIndex() +string_index.add(graph_store.nodes()) +entity_retriever = string_index.as_entity_matcher() +entity_linker = EntityLinker(entity_retriever) +``` + +**Triplet Retriever**: Uses `AgenticRetriever` with graph traversal +```python +from graph_retrievers import AgenticRetriever, GTraversal, TripletGVerbalizer + +graph_traversal = GTraversal(graph_store) +graph_verbalizer = TripletGVerbalizer() +triplet_retriever = AgenticRetriever( + llm_generator=llm_generator, + graph_traversal=graph_traversal, + graph_verbalizer=graph_verbalizer +) +``` + +**Path Retriever**: Uses `PathRetriever` with path verbalization +```python +from graph_retrievers import PathRetriever, GTraversal, PathVerbalizer + +graph_traversal = GTraversal(graph_store) +path_verbalizer = PathVerbalizer() +path_retriever = PathRetriever( + graph_traversal=graph_traversal, + path_verbalizer=path_verbalizer +) +``` + +### Query processing + +#### Main query method + +```python +def query(self, query: str, iterations: int = 2, cypher_iterations: int = 2) -> Tuple[List[str], List[str]] +``` + +The `query` method processes questions through the retrieval pipeline and returns retrieved context. + +**Parameters:** +- `query` (str): The search query +- `iterations` (int): Number of retrieval iterations for multi-strategy approach (default: 2) +- `cypher_iterations` (int): Number of Cypher generation retries (default: 2) + +**Returns:** +- Tuple of (retrieved context, final answers) as lists of strings + +#### Query processing flow + +1. **Initialize context** - Set up empty context lists and entity tracking +2. **Direct query linking** (optional) - Use semantic similarity for initial entity linking +3. **Cypher-based retrieval** (if CypherKGLinker provided) - Generate and execute Cypher queries +4. **Multi-strategy retrieval** (if KGLinker provided) - Use iterative entity extraction and retrieval +5. **Context aggregation** - Combine results from all strategies + +### Cypher-based retrieval + +When a `cypher_kg_linker` is provided, the engine performs Cypher-based retrieval: + +#### Process flow + +1. **Generate Cypher response** - Use CypherKGLinker to generate linking and query artifacts +2. **Execute linking queries** - Process `opencypher-linking` artifacts for entity discovery +3. **Execute main queries** - Process `opencypher` artifacts for answer retrieval +4. **Handle failures** - Provide feedback for failed queries to improve subsequent iterations +5. **Iterate** - Repeat for specified number of `cypher_iterations` + +#### Error handling + +The engine provides feedback for failed Cypher queries: + +```python +if len(answers) == 0: + cypher_context_with_feedback.append( + "No executable results for the above. Please improve cypher generation " + "in the future by focusing more on the given schema and the relations " + "between node types." + ) +``` + +### Multi-strategy retrieval + +When a `kg_linker` is provided, the engine performs multi-strategy retrieval: + +#### Iterative process + +1. **Generate LLM response** - Use KGLinker to extract entities, paths, and queries +2. **Link entities** - Connect extracted entities to graph nodes +3. **Retrieve triplets** - Use AgenticRetriever for contextual triplet extraction +4. **Process paths** - Follow extracted metapaths between entities +5. **Execute queries** - Run structured graph queries (Cypher, SPARQL) +6. **Check completion** - Stop if task completion signal is detected + +#### Task completion + +The engine checks for completion signals in LLM responses: + +```python +task_completion = parse_response(response, r"(.*?)") +if "FINISH" in " ".join(task_completion): + break +``` + +#### Iterative prompting + +The engine uses different prompts for different iterations: +- **First iteration**: Uses standard task prompts +- **Subsequent iterations**: Uses iterative prompts that build on previous context + +### Usage examples + +#### Basic usage + +```python +# Initialize with graph store +query_engine = ByoKGQueryEngine(graph_store=graph_store) + +# Process a question +question = "What are the side effects of aspirin?" +context = query_engine.query(question) + +print("Retrieved context:") +for item in context: + print(f"- {item}") +``` + +#### Cypher-focused usage + +```python +from graph_connectors import CypherKGLinker + +# Initialize with Cypher support +cypher_linker = CypherKGLinker(llm_generator, graph_store) +query_engine = ByoKGQueryEngine( + graph_store=graph_store, + cypher_kg_linker=cypher_linker +) + +# Process question with Cypher iterations +question = "Find all drugs that interact with aspirin" +context = query_engine.query(question, cypher_iterations=3) +``` + +#### Multi-strategy with custom components + +```python +# Custom entity linker with semantic similarity +from indexing import SemanticIndex +semantic_index = SemanticIndex(embedding_model) +semantic_index.add(graph_store.nodes()) +entity_linker = EntityLinker(semantic_index.as_entity_matcher()) + +# Initialize with custom components +query_engine = ByoKGQueryEngine( + graph_store=graph_store, + entity_linker=entity_linker, + direct_query_linking=True # Enable semantic entity linking +) + +# Process with multiple iterations +context = query_engine.query(question, iterations=3) +``` + +#### Response generation + +```python +# Generate final response from retrieved context +question = "What causes headaches?" +context = query_engine.query(question) + +# Generate answer using retrieved context +answers, full_response = query_engine.generate_response( + query=question, + graph_context="\n".join(context) +) + +print("Generated answers:") +for answer in answers: + print(f"- {answer}") +``` + +#### Combining both approaches + +```python +# Initialize with both KG Linker and Cypher KG Linker +query_engine = ByoKGQueryEngine( + graph_store=graph_store, + kg_linker=kg_linker, + cypher_kg_linker=cypher_linker +) + +# The engine will first try Cypher-based retrieval, +# then fall back to multi-strategy retrieval +context = query_engine.query(question) +``` + +### Configuration options + +#### Direct query linking + +Enable semantic similarity-based entity linking: + +```python +query_engine = ByoKGQueryEngine( + graph_store=graph_store, + direct_query_linking=True +) +``` + +#### Custom LLM configuration + +```python +from llm import BedrockGenerator + +custom_llm = BedrockGenerator( + model_name='us.anthropic.claude-3-5-sonnet-20240620-v1:0', + region_name='us-west-2' +) + +query_engine = ByoKGQueryEngine( + graph_store=graph_store, + llm_generator=custom_llm +) +``` + +#### Iteration control + +```python +# Fine-tune iteration counts for different strategies +context = query_engine.query( + question, + iterations=2, # Multi-strategy iterations + cypher_iterations=2 # Cypher retry iterations +) diff --git a/docs-site/src/content/docs/index.mdx b/docs-site/src/content/docs/index.mdx new file mode 100644 index 00000000..9375d10b --- /dev/null +++ b/docs-site/src/content/docs/index.mdx @@ -0,0 +1,67 @@ +--- +title: GraphRAG Toolkit +description: A Python toolkit for building graph-enhanced generative AI applications on AWS. +template: splash +hero: + title: | + Graph-enhanced
+ retrieval for
+ generative AI. + tagline: An open-source Python toolkit from AWS Labs for building hierarchical lexical graphs and querying them with multi-strategy retrieval. + actions: + - text: Get started + link: /graphrag-toolkit/lexical-graph/overview/ + icon: right-arrow + variant: primary + - text: View on GitHub + link: https://github.com/awslabs/graphrag-toolkit + icon: external + variant: minimal +--- + +import { Card, CardGrid, Code } from '@astrojs/starlight/components'; + + + + Automate the construction of a hierarchical lexical graph from unstructured documents, then query it with semantic-guided or traversal-based retrieval. + + + Bring your own knowledge graph. Plug an existing graph into a multi-strategy KGQA pipeline without re-extracting anything. + + + Graph stores: Amazon Neptune (DB and Analytics), Neo4j, FalkorDB. Vector stores: Neptune, OpenSearch, Postgres, S3 Vectors. + + + Apache 2.0, developed in the open by AWS Labs. + + + +## Install + + + +## Quickstart + + diff --git a/docs-site/src/content/docs/lexical-graph/aws-profile.mdx b/docs-site/src/content/docs/lexical-graph/aws-profile.mdx new file mode 100644 index 00000000..c4b93bb0 --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/aws-profile.mdx @@ -0,0 +1,113 @@ +--- +title: AWS Profile Configuration +--- + +This guide explains how to configure and use **AWS named profiles** in the lexical-graph by leveraging the `GraphRAGConfig` class. + +## What is an AWS Profile? + +AWS CLI and SDKs allow the use of named profiles to manage different sets of credentials. Each profile typically contains: +- Access key ID +- Secret access key +- (Optional) Session token +- (Optional) Default region + +These profiles are stored in: +- `~/.aws/credentials` +- `~/.aws/config` + +--- + +## How `GraphRAGConfig` Uses AWS Profiles + +### 1. **Automatic Detection** +If no profile is explicitly provided, `GraphRAGConfig` attempts to use: +```python +os.environ.get("AWS_PROFILE") +``` + +If that's not set, it will fall back to the default AWS behavior. + +--- + +### 2. **Explicit Profile Setting** + +You can programmatically set a profile: + +```python +from graphrag_toolkit.config import GraphRAGConfig + +GraphRAGConfig.aws_profile = "padmin" +``` + +This automatically resets any previously cached clients or sessions to ensure all AWS service interactions use the new credentials. + +--- + +### 3. **Where Profiles are Used** + +When you call: + +```python +GraphRAGConfig.session +``` + +or use properties like: + +```python +GraphRAGConfig.bedrock +GraphRAGConfig.s3 +GraphRAGConfig.rds +``` + +the SDK creates the respective clients using the active profile and region. + +--- + +## Example with Environment Variables + +You can export the profile and region before running your app: + +```bash +export AWS_PROFILE=padmin +export AWS_REGION=us-east-1 +python my_app.py +``` + +Or set them inline: + +```bash +AWS_PROFILE=padmin AWS_REGION=us-east-1 python my_app.py +``` + +--- + +## Profile-Based Multi-Account Testing + +To test across AWS accounts: +```python +GraphRAGConfig.aws_profile = "dev-profile" +GraphRAGConfig.aws_region = "us-west-2" + +bedrock = GraphRAGConfig.bedrock # Will use dev-profile in us-west-2 +``` + +--- + +## Common Pitfalls + +- **Missing Profile**: Ensure the profile exists in `~/.aws/credentials` and is not misspelled. +- **Access Denied**: Check IAM permissions for the services you're trying to access. +- **Region mismatch**: Bedrock may only be available in specific regions (e.g., `us-east-1`). + +--- + +## Summary + +| Use Case | How to Do It | +|-----------------------------|------------------------------------------------------------| +| Default profile | Rely on environment variables or default config | +| Programmatic override | `GraphRAGConfig.aws_profile = "my-profile"` | +| Switch regions | `GraphRAGConfig.aws_region = "us-east-2"` | +| Full override | Set both profile and region before invoking `.session` | +| Create boto3 clients | Use `.bedrock`, `.s3`, or `.rds` properties | diff --git a/docs-site/src/content/docs/lexical-graph/batch-extraction.mdx b/docs-site/src/content/docs/lexical-graph/batch-extraction.mdx new file mode 100644 index 00000000..2395aa3e --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/batch-extraction.mdx @@ -0,0 +1,189 @@ +--- +title: Batch Extraction +--- + +### Topics + + - [Overview](#overview) + - [Using batch inference with the LexicalGraphIndex](#using-batch-inference-with-the-lexicalgraphindex) + - [Setup](#setup) + - [Batch extraction job requirements](#batch-extraction-job-requirements) + +### Overview + +You can use [Amazon Bedrock batch inference](https://docs.aws.amazon.com/bedrock/latest/userguide/batch-inference.html) in the extract stage of the indexing process to improve extraction performance for large datasets. + +See [Configuring Batch Extraction](/graphrag-toolkit/lexical-graph/configuring-batch-extraction/) for details on configuring batch extraction for large ingests. + +### Using batch inference with the LexicalGraphIndex + +To use batch inference in the extract stage of the indexing process, create a `BatchConfig` object and supply it to the `LexicalGraphIndex` as part of the [`IndexingConfig`](/graphrag-toolkit/lexical-graph/indexing/#configuring-the-extract-and-build-stages): + +```python +import os + +from graphrag_toolkit.lexical_graph import LexicalGraphIndex +from graphrag_toolkit.lexical_graph import GraphRAGConfig, IndexingConfig +from graphrag_toolkit.lexical_graph.storage import GraphStoreFactory +from graphrag_toolkit.lexical_graph.storage import VectorStoreFactory +from graphrag_toolkit.lexical_graph.indexing.extract import BatchConfig + +from llama_index.core import SimpleDirectoryReader + +def batch_extract_and_load(): + + GraphRAGConfig.extraction_batch_size = 1000 + + batch_config = BatchConfig( + region='us-west-2', + bucket_name='my-bucket', + key_prefix='batch-extract', + role_arn='arn:aws:iam::111111111111:role/my-batch-inference-role', + max_batch_size=40000 + ) + + indexing_config = IndexingConfig(batch_config=batch_config) + + with ( + GraphStoreFactory.for_graph_store(os.environ['GRAPH_STORE']) as graph_store, + VectorStoreFactory.for_vector_store(os.environ['VECTOR_STORE']) as vector_store + ): + + graph_index = LexicalGraphIndex( + graph_store, + vector_store, + indexing_config=indexing_config + ) + + reader = SimpleDirectoryReader(input_dir='path/to/directory') + docs = reader.load_data() + + graph_index.extract_and_build(docs, show_progress=True) + +batch_extract_and_load() +``` + +When using batch extraction, update the `GraphRAGConfig.extraction_batch_size` configuration parameter so that a large number of source documents are passed to a batch inference job in a single batch. In the example above, `GraphRAGConfig.extraction_batch_size` has been set to `1000`, meaning that 1000 source documents will be chunked simultaneously, and these chunks then sent to the batch inference job. If there are 10-50 chunks per document, the batch inference job here will process several thousand records in a single batch, up to a maximum of 40,000 records (the configured `max_batch_size` value). + +### Setup + +Before running batch extraction for the first time, you must fulfill the following prerequisites: + + - Create an Amazon S3 bucket in the AWS Region where you will be running batch extraction + - [Create a custom service role for batch inference](https://docs.aws.amazon.com/bedrock/latest/userguide/batch-iam-sr.html) with access to the S3 bucket (and permission to invoke an inference profile, if necessary) + - Update the IAM identity under which the indexing process runs to allow it to to [submit and manage batch inference jobs](https://docs.aws.amazon.com/bedrock/latest/userguide/batch-inference-prereq.html#batch-inference-permissions) and pass the custom serice role to Bedrock + +In the examples below, replace `` with your AWS account ID, `` with the name of the AWS Region where you will be running batch extraction, `` with the ID of the foundation model in Amazon Bedrock that you want to use for batch extraction, and `` with the ARN of your new custom service role. + +#### Custom service role + +[Create a custom service role for batch inference](https://docs.aws.amazon.com/bedrock/latest/userguide/batch-iam-sr.html) with the following trust relationship: + +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Service": "bedrock.amazonaws.com" + }, + "Action": "sts:AssumeRole", + "Condition": { + "StringEquals": { + "aws:SourceAccount": "" + }, + "ArnEquals": { + "aws:SourceArn": "arn:aws:bedrock:::model-invocation-job/*" + } + } + } + ] +} +``` + +Create and attach a policy to your custom service role that [allows access to the Amazon S3 bucket where batch inference input and output files will be stored](https://docs.aws.amazon.com/bedrock/latest/userguide/batch-iam-sr.html#batch-iam-sr-identity): + +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetObject", + "s3:ListBucket", + "s3:PutObject" + ], + "Resource": [ + "arn:aws:s3:::", + "arn:aws:s3:::/*" + ], + "Condition": { + "StringEquals": { + "aws:ResourceAccount": [ + "" + ] + } + } + } + ] +} +``` + +To run batch inference with an inference profile, the service role [must have permissions to invoke the inference profile in an AWS Region](https://docs.aws.amazon.com/bedrock/latest/userguide/batch-iam-sr.html#batch-iam-sr-ip), in addition to the model in each Region in the inference profile. + +#### Update IAM identity + +You will also need to update the IAM identity under which the indexing process runs (not the custom service role) to allow it to to [submit and manage batch inference jobs](https://docs.aws.amazon.com/bedrock/latest/userguide/batch-inference-prereq.html#batch-inference-permissions): + +``` +{ + "Version": "2012-10-17", + "Statement": [ + ... + + { + "Effect": "Allow", + "Action": [ + "bedrock:CreateModelInvocationJob", + "bedrock:GetModelInvocationJob", + "bedrock:ListModelInvocationJobs", + "bedrock:StopModelInvocationJob" + ], + "Resource": [ + "arn:aws:bedrock:::foundation-model/", + "arn:aws:bedrock:::model-invocation-job/*" + ] + } + ] +} +``` + +Add the `iam:PassRole` permission so that the IAM identity under which the indexing process runs can pass the custom service role to Bedrock: + +``` +{ + "Effect": "Allow", + "Action": [ + "iam:PassRole" + ], + "Resource": "" +} +``` + +### Batch extraction job requirements + +Each batch extraction job must follow Amazon Bedrock's [batch inference quotas](https://docs.aws.amazon.com/bedrock/latest/userguide/batch-inference-data.html). The lexical-graph's batch extraction feature uses one input file per job. + +#### Key requirements + + - Each batch job needs 100-50,000 records + - Jobs with fewer than 100 records are processed individually, not in batch + - The feature doesn't check input file sizes — jobs will fail if they exceed Bedrock quotas + +#### Worker configuration + +Batch extraction can use multiple workers that trigger concurrent batch jobs: + + - If (workers × concurrent batches) exceeds Bedrock quotas, jobs will wait until capacity is available diff --git a/docs-site/src/content/docs/lexical-graph/configuration.mdx b/docs-site/src/content/docs/lexical-graph/configuration.mdx new file mode 100644 index 00000000..794f7bfe --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/configuration.mdx @@ -0,0 +1,304 @@ +--- +title: Configuration +--- + +import { Aside } from '@astrojs/starlight/components'; +import StoreUrlBuilder from '../../../components/StoreUrlBuilder.tsx'; + +## Graph store connection string builder + +Pick a backend and fill in the fields — this is the same URL you'll pass to `GraphStoreFactory.for_graph_store(...)`. + + + +## Overview + +The lexical-graph provides a `GraphRAGConfig` object that allows you to configure the LLMs and embedding models used by the indexing and retrieval processes, as well as the parallel and batch processing behaviours of the indexing pipelines. (The lexical-graph doesn't use the LlamaIndex `Settings` object: attributes configured in `Settings` will have no impact in the graphrag-toolkit.) + +The lexical-graph also allows you to set the logging level and apply logging filters from within your application. + +### GraphRAGConfig + +`GraphRAGConfig` is a module-level singleton (not a class to instantiate). It is created once at import time ([`config.py`](https://github.com/awslabs/graphrag-toolkit/blob/main/lexical-graph/src/graphrag_toolkit/lexical_graph/config.py#L1171)) and shared across the process. Set attributes directly on the imported object: + +```python +from graphrag_toolkit.lexical_graph import GraphRAGConfig + +GraphRAGConfig.aws_region = 'eu-west-1' +GraphRAGConfig.extraction_llm = 'anthropic.claude-3-5-sonnet-20241022-v2:0' +``` + +Setting `aws_profile` or `aws_region` automatically clears all cached boto3 clients. + + + +The configuration includes the following parameters: + +| Parameter | Description | Default Value | Environment Variable | +| ------------- | ------------- | ------------- | ------------- | +| `extraction_llm` | LLM used to perform graph extraction (see [LLM configuration](#llm-configuration)) | `us.anthropic.claude-3-7-sonnet-20250219-v1:0` | `EXTRACTION_MODEL` | +| `response_llm` | LLM used to generate responses (see [LLM configuration](#llm-configuration)) | `us.anthropic.claude-3-7-sonnet-20250219-v1:0` | `RESPONSE_MODEL` | +| `embed_model` | Embedding model used to generate embeddings for indexed data and queries (see [Embedding model configuration](#embedding-model-configuration)) | `cohere.embed-english-v3` | `EMBEDDINGS_MODEL` | +| `embed_dimensions` | Number of dimensions in each vector | `1024` | `EMBEDDINGS_DIMENSIONS` | +| `extraction_num_workers` | The number of parallel processes to use when running the extract stage | `2` | `EXTRACTION_NUM_WORKERS` | +| `extraction_num_threads_per_worker` | The number of threads used by each process in the extract stage | `4` | `EXTRACTION_NUM_THREADS_PER_WORKER` | +| `extraction_batch_size` | The number of input nodes to be processed in parallel across all workers in the extract stage | `4` | `EXTRACTION_BATCH_SIZE` | +| `build_num_workers` | The number of parallel processes to use when running the build stage | `2` | `BUILD_NUM_WORKERS` | +| `build_batch_size` | The number of input nodes to be processed in parallel across all workers in the build stage | `4` | `BUILD_BATCH_SIZE` | +| `build_batch_write_size` | The number of elements to be written in a bulk operation to the graph and vector stores (see [Batch writes](#batch-writes)) | `25` | `BUILD_BATCH_WRITE_SIZE` | +| `batch_writes_enabled` | Determines whether, on a per-worker basis, to write all elements (nodes and edges, or vectors) emitted by a batch of input nodes as a bulk operation, or singly, to the graph and vector stores (see [Batch writes](#batch-writes)) | `True` | `BATCH_WRITES_ENABLED` | +| `include_domain_labels` | Determines whether entities will have a domain-specific label (e.g. `Company`) as well as the [graph model's](/graphrag-toolkit/lexical-graph/graph-model/#entity-relationship-tier) `__Entity__` label | `False` | `INCLUDE_DOMAIN_LABELS` | +| `include_local_entities` | Whether to include local-context entities in the graph | `False` | `INCLUDE_LOCAL_ENTITIES` | +| `include_classification_in_entity_id` | Whether to include an entity's classification in its graph node id | `True` | `INCLUDE_CLASSIFICATION_IN_ENTITY_ID` | +| `enable_versioning` | Whether to enable versioned updates (see [Versioned Updates](/graphrag-toolkit/lexical-graph/versioned-updates/)) | `False` | `ENABLE_VERSIONING` | +| `enable_cache` | Determines whether the results of LLM calls to models on Amazon Bedrock are cached to the local filesystem (see [Caching Amazon Bedrock LLM responses](#caching-amazon-bedrock-llm-responses)) | `False` | `ENABLE_CACHE` | +| `aws_profile` | AWS CLI named profile used to authenticate requests to Bedrock and other services | *None* | `AWS_PROFILE` | +| `aws_region` | AWS region used to scope Bedrock service calls | *Default boto3 session region* | `AWS_REGION` | + +The following parameters configure the rerankers used by query retrievers: + +| Parameter | Description | Default | Environment Variable | +| ------------- | ------------- | ------------- | ------------- | +| `reranking_model` | Local reranker model (mixedbread-ai) | `mixedbread-ai/mxbai-rerank-xsmall-v1` | `RERANKING_MODEL` | +| `bedrock_reranking_model` | Amazon Bedrock reranker model | `cohere.rerank-v3-5:0` | `BEDROCK_RERANKING_MODEL` | + +The following parameter applies only when using Amazon OpenSearch Serverless as a vector store: + +| Parameter | Description | Default | Environment Variable | +| ------------- | ------------- | ------------- | ------------- | +| `opensearch_engine` | OpenSearch kNN engine | `nmslib` | `OPENSEARCH_ENGINE` | + +The following parameters configure local filesystem paths for container/EKS deployments: + +| Parameter | Description | Default | Environment Variable | +| ------------- | ------------- | ------------- | ------------- | +| `local_output_dir` | Local staging directory for batch files and temporary extraction outputs | `output` | `LOCAL_OUTPUT_DIR` | +| `log_output_dir` | Directory prefix for log files (when filename is relative) | *None* | `LOG_OUTPUT_DIR` | + +To set a configuration parameter in your application code: + +```python +from graphrag_toolkit.lexical_graph import GraphRAGConfig + +GraphRAGConfig.response_llm = 'anthropic.claude-3-haiku-20240307-v1:0' +GraphRAGConfig.extraction_num_workers = 4 +``` + +You can also set any of these via environment variables using the variable names in the tables above. + +#### LLM configuration + +The `extraction_llm` and `response_llm` configuration parameters accept three different types of value: + + - You can pass an instance of a LlamaIndex `LLM` object. This allows you to configure the lexical-graph for LLM backends other than Amazon Bedrock. + - You can pass the model id of an Amazon Bedrock model or [inference profile](https://docs.aws.amazon.com/bedrock/latest/userguide/inference-profiles.html). For example: `anthropic.claude-3-7-sonnet-20250219-v1:0` (model id) or `us.anthropic.claude-3-7-sonnet-20250219-v1:0` (inference profile). + - You can pass a JSON string representation of a LlamaIndex `BedrockConverse` instance. For example: + + ``` + { + "model": "anthropic.claude-3-7-sonnet-20250219-v1:0", + "temperature": 0.0, + "max_tokens": 4096 + } + ``` + +#### Embedding model configuration + +The `embed_model` configuration parameter accepts three different types of value: + + - You can pass an instance of a LlamaIndex `BaseEmbedding` object. This allows you to configure the lexical-graph for embedding backends other than Amazon Bedrock. + - You can pass the model name of an Amazon Bedrock model. For example: `amazon.titan-embed-text-v1`. + - You can pass a JSON string representation of a LlamaIndex `BedrockEmbedding` instance. For example: + + ``` + { + "model_name": "amazon.titan-embed-text-v2:0" + } + ``` + +When configuring an embedding model, you must also set the `embed_dimensions` configuration parameter to match the model's output dimensions. For example: + +```python +GraphRAGConfig.embed_model = '{"model_name": "amazon.titan-embed-text-v2:0"}' +GraphRAGConfig.embed_dimensions = 512 +``` + +##### Nova 2 Multimodal Embeddings + +Amazon Nova 2 multimodal embedding models (`amazon.nova-2-multimodal-embeddings-v1:0`) use a different API format than standard Bedrock embedding models. To use Nova 2 models, you must explicitly import and instantiate the `Nova2MultimodalEmbedding` class. + +**Usage:** + +```python +from graphrag_toolkit.lexical_graph import GraphRAGConfig +from graphrag_toolkit.lexical_graph.utils.bedrock_utils import Nova2MultimodalEmbedding + +GraphRAGConfig.embed_model = Nova2MultimodalEmbedding('amazon.nova-2-multimodal-embeddings-v1:0') +GraphRAGConfig.embed_dimensions = 3072 +``` + +**API Format Differences:** + +Standard Bedrock embeddings (Titan, Cohere) use: +```json +{"inputText": "text to embed"} +``` + +Nova 2 multimodal embeddings require: +```json +{ + "taskType": "SINGLE_EMBEDDING", + "singleEmbeddingParams": { + "embeddingDimension": 3072, + "embeddingPurpose": "TEXT_RETRIEVAL", + "text": { + "truncationMode": "END", + "value": "text to embed" + } + } +} +``` + +**Configuration Parameters:** + +| Parameter | Description | Default | Valid Values | +| --------- | ----------- | ------- | ------------ | +| `embed_dimensions` | Vector dimensions | `3072` | `1024`, `3072` | +| `embed_purpose` | Embedding optimization purpose | `TEXT_RETRIEVAL` | `TEXT_RETRIEVAL`, `GENERIC_RETRIEVAL`, `DOCUMENT_RETRIEVAL`, `CLASSIFICATION`, `CLUSTERING` | +| `truncation_mode` | How to handle text exceeding max length | `END` | `END`, `NONE` | + +**Advanced Configuration:** + +To configure Nova 2 multimodal embeddings with custom parameters: + +```python +from graphrag_toolkit.lexical_graph import GraphRAGConfig +from graphrag_toolkit.lexical_graph.utils.bedrock_utils import Nova2MultimodalEmbedding + +embedding = Nova2MultimodalEmbedding( + model_name='amazon.nova-2-multimodal-embeddings-v1:0', + embed_dimensions=3072, + embed_purpose='TEXT_RETRIEVAL', + truncation_mode='END' +) + +GraphRAGConfig.embed_model = embedding +GraphRAGConfig.embed_dimensions = 3072 +``` + +**Features:** +- Handles Nova 2's unique API format automatically +- Includes retry logic for transient Bedrock errors +- Custom pickle support for multiprocessing scenarios +- Lazy client initialization using GraphRAGConfig.session +- Empty text validation to prevent API errors + +#### Batch writes + +The lexical-graph uses microbatching to progress source data through the extract and build stages. + + - In the extract stage a batch of source nodes is processed in parallel by one or more workers, with each worker performing chunking, proposition extraction and topic/statement/fact/entity extraction over its allocated source nodes. For a given batch of source nodes, the extract stage emits a collection of chunks derived from those source nodes. + - In the build stage, chunks from the extract stage are broken down into smaller *indexable* nodes representing sources, chunks, topics, statements and facts. These indexable nodes are then processed by the graph construction and vector indexing handlers. + +The `batch_writes_enabled` configuration parameter determines whether all of the indexable nodes derived from a batch of incoming chunks are written to the graph and vector stores singly, or as a bulk operation. Bulk/batch operations tend to improve the throughput of the build stage, at the expense of some additonal latency with regard to this data becoming available to query. + +#### Caching Amazon Bedrock LLM responses + +If you're using Amazon Bedrock, you can use the local filesystem to cache and reuse LLM responses. Set `GraphRAGConfig.enable_cache` to `True`. LLM responses will then be saved in clear text to a `cache` directory. Subsequent invocations of the same model with the exact same prompt will return the cached response. + +Note that streaming responses from the query engine are _not_ cached. + +The `cache` directory can grow very large, particularly if you are caching extraction responses for a very large ingest. The lexical-graph will not manage the size of this directory or delete old entries. If you enable the cache, ensure you clear or prune the cache directory regularly. + +### Logging configuration + +The `graphrag_toolkit` provides two methods for configuring logging in your application. These methods allow you to set logging levels, apply filters to include or exclude specific modules or messages, and customize logging behavior: + +- `set_logging_config` +- `set_advanced_logging_config` + +#### set_logging_config + +The `set_logging_config` method allows you to configure logging with a basic set of options, such as logging level and module filters. Wildcards are supported for module names, and you can pass either a single string or a list of strings for included or excluded modules. You can optionally provide a `filename` to write log output to a file in addition to stdout. For example: + +```python +from graphrag_toolkit.lexical_graph import set_logging_config + +set_logging_config( + logging_level='DEBUG', # or logging.DEBUG + debug_include_modules='graphrag_toolkit.lexical_graph.storage', # single string or list of strings + debug_exclude_modules=['opensearch', 'boto'], # single string or list of strings + filename='output.log' # optional: also write logs to a file +) +``` + +#### set_advanced_logging_config + +The `set_advanced_logging_config` method provides more advanced logging configuration options, including the ability to specify filters for included and excluded modules or messages based on logging levels. Wildcards are supported for module names and included messages, and you can pass either a single string or a list of strings for modules or messages. This method offers greater flexibility and control over the logging behavior. + +##### Parameters + +| Parameter | Type | Description | Default Value | +|---------------------|-------------------------------|---------------------------------------------------------------------------------------------|----------------| +| `logging_level` | `str` or `int` | The logging level to apply (e.g., `'DEBUG'`, `'INFO'`, `logging.DEBUG`, etc.). | `logging.INFO` | +| `included_modules` | `dict[int, str \| list[str]]` | Modules to include in logging, grouped by logging level. Wildcards are supported. | `None` | +| `excluded_modules` | `dict[int, str \| list[str]]` | Modules to exclude from logging, grouped by logging level. Wildcards are supported. | `None` | +| `included_messages` | `dict[int, str \| list[str]]` | Specific messages to include in logging, grouped by logging level. Wildcards are supported. | `None` | +| `excluded_messages` | `dict[int, str \| list[str]]` | Specific messages to exclude from logging, grouped by logging level. | `None` | +| `filename` | `str` | If provided, log output is also written to this file in addition to stdout. | `None` | + +##### Example Usage + +Here is an example of how to use `set_advanced_logging_config`: + +```python +import logging +from graphrag_toolkit.lexical_graph import set_advanced_logging_config + +set_advanced_logging_config( + logging_level=logging.DEBUG, + included_modules={ + logging.DEBUG: 'graphrag_toolkit', # single string or list of strings + logging.INFO: '*', # wildcard supported + }, + excluded_modules={ + logging.DEBUG: ['opensearch', 'boto', 'urllib'], # single string or list of strings + logging.INFO: ['opensearch', 'boto', 'urllib'], # wildcard supported + }, + excluded_messages={ + logging.WARNING: 'Removing unpickleable private attribute', # single string or list of strings + } +) +``` + +### AWS profile configuration + +You can explicitly configure the AWS CLI profile and region to use when initializing Bedrock clients or other AWS service clients in `GraphRAGConfig`. This ensures compatibility across local development, EC2/ECS environments, or federated environments such as AWS SSO. + +You may set the AWS profile and region in your application code: + +```python +from graphrag_toolkit.lexical_graph import GraphRAGConfig + +GraphRAGConfig.aws_profile = 'padmin' +GraphRAGConfig.aws_region = 'us-east-1' +``` + +Alternatively, use environment variables: + +```bash +export AWS_PROFILE=padmin +export AWS_REGION=us-east-1 +``` + +If no profile or region is set explicitly, the system falls back to environment variables or the default AWS CLI configuration. + +See [Using AWS Profiles in `GraphRAGConfig`](/graphrag-toolkit/lexical-graph/aws-profile/) for more details on configuring and using AWS named profiles. + +#### Resilient clients and SSO token refresh + +All boto3 clients created by `GraphRAGConfig` are wrapped in a `ResilientClient` ([`config.py:94`](https://github.com/awslabs/graphrag-toolkit/blob/main/lexical-graph/src/graphrag_toolkit/lexical_graph/config.py#L94)). On `ExpiredToken`, `RequestExpired`, or `InvalidClientTokenId` errors the client is refreshed automatically and the call is retried. + +When an AWS SSO profile is in use, the client wrapper also validates the SSO token age. If the token is more than one hour old, it runs `aws sso login` automatically before retrying. This is relevant for long-running indexing jobs and any environment where SSO sessions can expire mid-run. diff --git a/docs-site/src/content/docs/lexical-graph/configuring-batch-extraction.mdx b/docs-site/src/content/docs/lexical-graph/configuring-batch-extraction.mdx new file mode 100644 index 00000000..56fec07f --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/configuring-batch-extraction.mdx @@ -0,0 +1,94 @@ +--- +title: Configuring Batch Extraction +--- + +### Topics + + - [Overview](#overview) + - [BatchConfig parameters](#batchconfig-parameters) + - [Required parameters](#required-parameters) + - [bucket_name](#bucket_name) + - [region](#region) + - [role_arn](#role_arn) + - [Optional parameters](#optional-parameters) + - [key_prefix](#key_prefix) + - [max_batch_size](#max_batch_size) + - [max_num_concurrent_batches](#max_num_concurrent_batches) + - [s3_encryption_key_id](#s3_encryption_key_id) + - [VPC security parameters (optional)](#vpc-security-parameters-optional) + - [subnet_ids](#subnet_ids) + - [security_group_ids](#security_group_ids) + - [File management](#file-management) + - [delete_on_success](#delete_on_success) + - [Optimizing batch extraction performance](#optimizing-batch-extraction-performance) + +### Overview + +### BatchConfig parameters + +The `BatchConfig` object manages the configuration settings for Amazon Bedrock batch inference jobs. Here's a detailed explanation of each parameter: + +#### Required parameters + +##### `bucket_name` + +You must specify the name of an Amazon S3 bucket where your batch processing files (both input and output) will be stored. + +##### `region` + +You need to provide the AWS Region name (such as "us-east-1") where both your S3 bucket is located and where the Amazon Bedrock batch inference job will run. + +##### `role_arn` + +This is the Amazon Resource Name (ARN) for the service role that handles batch inference operations. You can either create a default service role through the console or follow the instructions in the [Create a service role for batch inference](https://docs.aws.amazon.com/bedrock/latest/userguide/batch-iam-sr.html) documentation. + +#### Optional parameters + +##### `key_prefix` + +If desired, you can specify an S3 key prefix for organizing your input and output files. + +##### `max_batch_size` + +Controls how many records (chunks) can be included in each batch inference job. The default value is `25000` records. + +##### `max_num_concurrent_batches` + +Determines how many batch inference jobs can run simultaneously per worker. This setting works in conjunction with `GraphRAGConfig.extraction_num_workers`. The default is `3` concurrent batches per worker. + +##### `s3_encryption_key_id` + +You can provide the unique identifier for an encryption key to secure the output data in S3. + +#### VPC security parameters (optional) + +For more information about VPC protection, see [Protect batch inference jobs using a VPC](https://docs.aws.amazon.com/bedrock/latest/userguide/batch-vpc). + +##### `subnet_ids` + +An array of subnet IDs within your Virtual Private Cloud (VPC) for protecting batch inference jobs. + +##### `security_group_ids` + +An array of security group IDs within your VPC for protecting batch inference jobs. + +#### File management + +##### `delete_on_success` + +Controls whether input and output JSON files are automatically deleted from the local filesystem after successful batch job completion. By default, this is set to `True`. Note that this setting does not affect files stored in S3, which are preserved regardless. + +### Optimizing batch extraction performance + +The most important settings for controlling batch extraction performance are: + + - `GraphRAGConfig.extraction_batch_size`: Sets how many source documents go to the extraction pipeline. When calculating this value, consider that the total number of chunks (source documents × average chunks per document) should be sufficient to fill your planned simultaneous batch jobs. + - `GraphRAGConfig.extraction_num_workers`: Sets how many CPUs run batch jobs simultaneously. + - `BatchConfig.max_num_concurrent_batches`: Sets how many concurrent batch jobs each worker runs. + - `BatchConfig.max_batch_size`: Sets the maximum number of chunks per batch job. + +To maximize the efficiency of batch extraction, follow these three key principles: + + - **Maximize file capacity** Each batch job file can hold up to 50,000 records. However, Amazon Bedrock enforces input file size limits, typically between 1-5 GB. Check the specific limits for your model in the Amazon Bedrock service quotas section (see the **Batch inference job size** quotas in the [Amazon Bedrock service quotas section](https://docs.aws.amazon.com/general/latest/gr/bedrock.html#limits_bedrock ) for the limits particular to the model you are using). Note that the toolkit doesn't automatically verify file sizes, so jobs may fail if they exceed these quotas. You may need to use fewer records than the maximum limit to stay within file size boundaries. Configure the `BatchConfig.max_batch_size` to set the maximum number of records per batch job. + - **Use larger, fewer files** Focus on using a minimal number of large files rather than splitting the work across many smaller ones. For example, it's more efficient to process 40,000 records in a single job than to divide them into four parallel jobs of 10,000 records each. + - **Leverage parallel processing** Take advantage of parallel job execution using `GraphRAGConfig.extraction_num_workers` and `BatchConfig.max_num_concurrent_batches`. The total number of jobs (number of workers × number of concurrent batches) must stay within Bedrock's quota of 20 combined in-progress and submitted batch inference jobs per region. If you exceed this limit, additional jobs will wait in the queue until capacity becomes available. diff --git a/docs-site/src/content/docs/lexical-graph/external-properties.mdx b/docs-site/src/content/docs/lexical-graph/external-properties.mdx new file mode 100644 index 00000000..74f08ea8 --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/external-properties.mdx @@ -0,0 +1,85 @@ +--- +title: External Properties +--- + +## Overview + +Added a flexible external properties feature that allows adding any business-specific properties from source document metadata to chunk nodes in the graph database. + +## Changes Made + +### 1. Configuration (`lexical-graph/src/graphrag_toolkit/lexical_graph/config.py`) +- Added `chunk_external_properties` property to `GraphRAGConfig` +- Accepts dictionary mapping chunk property names to source metadata keys +- Supports environment variable: `CHUNK_EXTERNAL_PROPERTIES` (JSON format) +- Default: `None` (feature disabled) + +### 2. Chunk Node Builder (`lexical-graph/src/graphrag_toolkit/lexical_graph/indexing/build/chunk_node_builder.py`) +- Extracts multiple properties from validated source metadata when configured +- Iterates through property mapping and adds each available property +- Adds to chunk metadata: `metadata['chunk']['metadata'][property_name]` (nested structure matching source metadata) +- Uses `_get_source_info_metadata()` to ensure only valid (non-collection-based) metadata is used + +### 3. Chunk Graph Builder (`lexical-graph/src/graphrag_toolkit/lexical_graph/indexing/build/chunk_graph_builder.py`) +- Stores all external properties as properties on chunk nodes +- Reads from nested `metadata['chunk']['metadata']` dictionary +- Dynamically generates SET statements for each property +- Uses: `SET chunk.property_name = params.property_name` + +## Usage + +```python +from graphrag_toolkit.lexical_graph import GraphRAGConfig +from llama_index.core.schema import Document + +# Configure multiple properties +GraphRAGConfig.chunk_external_properties = { + 'article_code': 'article_id', + 'document_type': 'doc_type', + 'department': 'dept_code' +} + +# Create document with metadata +doc = Document( + text="Your content...", + metadata={ + 'article_id': 'ART-2024-001', + 'doc_type': 'research', + 'dept_code': 'ENG' + } +) + +# Build graph - chunks will have all configured properties +``` + +## Query Examples + +```cypher +// Find chunks by article code +MATCH (chunk:__Chunk__ {article_code: 'ART-2024-001'}) +RETURN chunk + +// Find chunks by document type +MATCH (chunk:__Chunk__ {document_type: 'research'}) +RETURN chunk + +// Complex multi-property query +MATCH (chunk:__Chunk__) +WHERE chunk.document_type = 'research' + AND chunk.department = 'ENG' +RETURN chunk +``` + +## Files Modified + +- `lexical-graph/src/graphrag_toolkit/lexical_graph/config.py` +- `lexical-graph/src/graphrag_toolkit/lexical_graph/indexing/build/chunk_node_builder.py` +- `lexical-graph/src/graphrag_toolkit/lexical_graph/indexing/build/chunk_graph_builder.py` + +## Key Features + +- **Flexible**: Support any number of properties +- **Configurable**: Dictionary-based mapping +- **Graceful**: Handles missing metadata keys +- **Backward Compatible**: No breaking changes +- **Safe**: Uses validated source metadata to avoid write failures diff --git a/docs-site/src/content/docs/lexical-graph/faq.mdx b/docs-site/src/content/docs/lexical-graph/faq.mdx new file mode 100644 index 00000000..f585c6ac --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/faq.mdx @@ -0,0 +1,58 @@ +--- +title: FAQ +--- + + - [Errors and warnings](#errors-and-warnings) + - [ModelError: An error occurred (AccessDeniedException) when calling the InvokeModel operation: \ is not authorized to perform: bedrock:InvokeModel](#modelerror-an-error-occurred-accessdeniedexception-when-calling-the-invokemodel-operation-identity-is-not-authorized-to-perform-bedrockinvokemodel) + - [ModelError: An error occurred (AccessDeniedException) when calling the InvokeModel operation: You don't have access to the model with the specified model ID](#modelerror-an-error-occurred-accessdeniedexception-when-calling-the-invokemodel-operation-you-dont-have-access-to-the-model-with-the-specified-model-id) + - [WARNING:graph_store:Retrying query in x seconds because it raised ConcurrentModificationException](#warninggraph_storeretrying-query-in-x-seconds-because-it-raised-concurrentmodificationexception) + +### Errors and warnings + +#### ModelError: An error occurred (AccessDeniedException) when calling the InvokeModel operation: \ is not authorized to perform: bedrock:InvokeModel + +If the AWS Identity and Access Management (IAM) identity under which your application is running does not have permission to invoke an Amazon Bedrock foundation model, you will get an error similar to the following: + +``` +graphrag_toolkit.errors.ModelError: An error occurred (AccessDeniedException) when calling the InvokeModel operation: is not authorized to perform: bedrock:InvokeModel on resource: arn:aws:bedrock:us-west-2::foundation-model/anthropic.claude-3-5-haiku-20241022-v1:0 because no identity-based policy allows the bedrock:InvokeModel action [Model config: {"system_prompt": null, "pydantic_program_mode": "default", "model": "anthropic.claude-3-5-haiku-20241022-v1:0", "temperature": 0.0, "max_tokens": 4096, "context_size": 200000, "profile_name": null, "max_retries": 10, "timeout": 60.0, "additional_kwargs": {}, "class_name": "Bedrock_LLM"}] +``` + +To fix, ensure you have [enabled access](https://docs.aws.amazon.com/bedrock/latest/userguide/model-access.html) to the appropriate foundation models in Amazon Bedrock, and then update the IAM policy associated with the identity: + +``` +{ + "Effect": "Allow", + "Action": [ + "bedrock:InvokeModel" + ], + "Resource": [ + "arn:aws:bedrock:us-west-2::foundation-model/anthropic.claude-3-5-haiku-20241022-v1:0" + ] +} +``` + +--- + +#### ModelError: An error occurred (AccessDeniedException) when calling the InvokeModel operation: You don't have access to the model with the specified model ID + +Access to Amazon Bedrock foundation models isn't granted by default. If you have not enabled access to a foundation model, you will get an error similar to the following: + +``` +graphrag_toolkit.errors.ModelError: An error occurred (AccessDeniedException) when calling the InvokeModel operation: You don't have access to the model with the specified model ID. [Model config: {"system_prompt": null, "pydantic_program_mode":"default", "model": "anthropic.claude-3-7-sonnet-20250219-v1:0", "temperature": 0.0, "max_tokens": 4096, "context_size": 200000, "profile_name": null, "max_retries": 10, "timeout": 60.0, "additional_kwargs": {}, "class_name": "Bedrock_LLM"}] +``` + +To fix, [enable access](https://docs.aws.amazon.com/bedrock/latest/userguide/model-access.html) to the appropriate foundation models in Amazon Bedrock, and then [grant IAM permissions to the model](#modelerror-an-error-occurred-accessdeniedexception-when-calling-the-invokemodel-operation-identity-is-not-authorized-to-perform-bedrockinvokemodel). + +--- + +#### Importing the package patches llama_index async internals + +When you import `graphrag_toolkit.lexical_graph`, the package patches `llama_index.core.async_utils.asyncio_run` unconditionally ([`__init__.py:34`](https://github.com/awslabs/graphrag-toolkit/blob/main/lexical-graph/src/graphrag_toolkit/lexical_graph/__init__.py#L34)). The patch makes LlamaIndex's internal async runner work inside Jupyter notebooks by re-using the existing event loop instead of creating a new one. If no running loop is found, it falls back to `asyncio.run()`. This can interact unexpectedly with other code using LlamaIndex in the same process, particularly if that code relies on `asyncio_run` starting a clean event loop. There is currently no opt-out. + +--- + +#### WARNING:graph_store:Retrying query in x seconds because it raised ConcurrentModificationException + +While indexing data in Amazon Neptune Database, Neptune can sometimes issue a `ConcurrentModificationException`. This occurs because multiple workers are attempting to [update the same set of vertices](https://docs.aws.amazon.com/neptune/latest/userguide/transactions-exceptions.html). The GraphRAG Toolkit automatically retries transactionsb that are cancelled because of a `ConcurrentModificationException`. If the maximum number of retries is exceeded and the indexing fails, consider reducing the number of workers in the build stage using [`GraphRAGConfig.build_num_workers`](/graphrag-toolkit/lexical-graph/configuration/#graphragconfig). + +--- diff --git a/docs-site/src/content/docs/lexical-graph/graph-model.mdx b/docs-site/src/content/docs/lexical-graph/graph-model.mdx new file mode 100644 index 00000000..ad437e08 --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/graph-model.mdx @@ -0,0 +1,127 @@ +--- +title: Graph Model +--- + +### Topics + + - [Overview](#overview) + - [A three-tiered lexical graph](#a-three-tiered-lexical-graph) + - [Units of context](#units-of-context) + - [Lineage tier](#lineage-tier) + - [Entity-Relationship tier](#entity-relationship-tier) + - [Summarisation tier](#summarisation-tier) + - [Facts](#facts) + - [Statements](#statements) + - [Topics](#topics) + +### Overview + +The lexical-graph uses a form of hierarchical [lexical graph](https://graphr.ag/reference/knowledge-graph/lexical-graph-hierarchical-structure/), auto-generated from unstructured sources, whose job is to help question-answering systems retrieve information which is *semantically dissimilar from the question*, but nonetheless *relevant to the answer*. + +#### A three-tiered lexical graph + +The lexical graph has three tiers: + + - [**Lineage**](#lineage-tier) - Sources, chunks, and the relations between them. + - [**Summarisation**](#summarisation-tier) - Hierarchical summarisations and lexical units at different levels of granularity. + - [**Entity-Relationship**](#entity-relationship-tier) - Individual entities and relations extracted from the underlying sources. + +![Lexical graph](../../../content/images/lexical-graph.png) + +#### Units of context + +When using a lexical graph in a RAG application, the question arises: what size lexical unit should form the basis of the context? + +For many RAG applications, the primary unit of context is the *chunk*: that is, the context window is formed of one or more chunks retrieved from the corpus. Different chunking strategies produce differently sized chunks: there's no one-size-fits-all definition of a chunk. For the purpose of this documentation, however, we take a chunk to be something larger than an individual sentence, but smaller than an entire document. + +For the graphrag-toolkit, the primary unit of context is not the chunk, but the *statement*, which is a standalone assertion or proposition. Source documents are broken into chunks, and from these chunks are extracted statements. Statements are thematically grouped by topic, and supported by facts. At question-answering time, the lexical-graph [retrieves](/graphrag-toolkit/lexical-graph/querying/) groups of statements (and sometimes topics and/or facts), and presents them in the context window to the LLM. + +Graphs can help question-answering systems retrieve information which is semantically dissimilar from the question, but nonetheless relevant to the answer. Retrieval through semantic similarity remains an important strategy, and context that is semantically similar to the question will often comprise the foundation of a good answer. But similarity-based retrieval is not always sufficient for generating a nuanced response, and in many circumstances it will also be necessary to find and return information that cannot be found by vector similarity search alone, in order to present the LLM with a more differentiated context that can help it develop comparisons, arguments, and summaries. The relationships in a graph provide a means by which a retrieval process can find this additional relevant information. + +Graph topology and the degree of connectivity in the graph play an important role in finding relevant information. If everything is linked to everything else, it becomes difficult to extract particularly relevant units of context from within a sea of irrelevancy. If, on the other hand, linking between elements in the graph is low, there are relatively few opportunities for discovering golden nuggets of relevant but nonetheless semantically dissimilar information. The graphrag-toolkit's graph model assigns local and global connectivity roles to different elements in the graph: topics provide thematic connectivity between statements derived from the same source; facts provide connectivity between statements derived from different sources. + +### Lineage tier + +This tier consists of `__Source__` nodes and `__Chunk__` nodes. A source node contains metadata describing a source document (e.g. author, URL, publication date). The exact metadata varies depending on the source. Chunks contain the actual chunked text (and its embedding). Chunks are linked to previous, next, parent and child chunks. + +### Entity-Relationship tier + +This consists of `__Entity__` nodes and `__RELATION__` relationships. Entities have a value (e.g. 'Amazon') and a classification (e.g. 'Company'). Relationships have a value (e.g. 'WORKS_FOR'). + +The entities in the entity-relationship tier act as entry points into the graph for bottom-up, keyword-based (exact match) searches. + +Every entity is associated with at least one `__Fact__`, either as a `__SUBJECT__` or `__OBJECT__`. Entities can fulfill multiple roles: an entity may act as the subject for one fact, and the object for another fact. + +Extraction uses a lightly guided strategy whereby the extraction process is seeded with a list of preferred entity classifications. The LLM is instructed to use an existing classification from the list before creating new ones. Any new classifications introduced by the LLM are then carried forward to subsequent invocations. This approach reduces but doesn't eliminate unwanted variations in entity classification. + +Relationship values are currently unguided (though relatively concise). + +### Summarisation tier + +This currently comprises `__Topic__`, `__Statement__` and `__Fact__` nodes. Proceeding from the bottom up: + +#### Facts + +A fact summarises a single triplet or triple-like unit of meaning. For example: + +``` +Property Graph model ACCESSED WITH openCypher +``` + +There are two types of fact: subject-predicate-object (SPO) facts, and subject-predicate-complement (SPC) facts. SPO facts are connected to entities in the subject and object positions. SPC facts are connected to subject entities only. Here's an example of an SPC fact: + +``` +Neptune Analytics PURPOSE analyze graph data +``` + +SPO facts are connected to other facts via `__NEXT__` relationships, where the object entity of a first fact acts as the subject entity for a subsequent fact. + +Facts provide *connectivity across different sources*. It's not uncommon for an individual fact to be mentioned multiple times in the underlying corpus: for example, in a news articles dataset, a particular fact might be repeated in different news articles reporting on the same story. In the graph, there will be a single node to represent this specific fact. From this node it is then possible to traverse via statements, topics and chunks to all the places where that particular fact is mentioned. + +Every fact `__SUPPORTS__` at least one statement. A fact can support multiple statements, belonging to the same or different topics and sources. + +Facts can, optionally, be embedded – and so as well as enhancing connectivity, they can also be used to provide a low-level, vector-based entry point into the graph. + +#### Statements + +A statement or assertion extracted from the underlying sources. Statements are the *primary unit of context returned to the question-answering LLM in the context window* – that is, the context comprises collections of statements grouped by source and topic. + +Statements are typically associated with one or more facts (both SPO and SPC facts). For example: + +``` +Statement +--------- +Neptune supports open graph APIs for property graphs (Gremlin and openCypher) and RDF graphs (SPARQL) + +Facts +----- +SPARQL FOR RDF graphs +SPARQL API FOR RDF graphs +openCypher API FOR property graphs +Gremlin FOR property graphs +Gremlin API FOR property graphs +openCypher FOR property graphs +``` + +In some circumstances a statement may include one or more contextual *details* in addition to, or instead of, any associated facts. These contextual details take the same triplet form as facts, but they lack any entity (subject or object) relations, and so are inlined as a property in the statement node. + +Statements are grouped beneath topics (see below). Within a particular topic, statements are also joined in a linked list via `__PREVIOUS__` relationships, making it easy to retrieve previous (and subsequent) statements belonging to the same underlying source. + +Statements act as the primary unit of context for question answering. They are connected transitively to other statements via both facts and topics. + +Statements can, optionally, be embedded, and so can act as higher-level entry points in the graph based on a vector search. Semantic-guided search uses statement embeddings to guide its search strategies. Statement embeddings also allow statements to be used in a 'baseline RAG' manner to retrieve relatively small pieces of context for answering simple questions. + +#### Topics + +A topic is a theme or area of focus within a specific source document. Source documents will typically have several topics. For example, one of the source documents in our Neptune documentation example has the following topics: + +``` +Neptune Analytics +Loading Graph Data into Amazon Neptune Analytics +``` + +Topics are scoped to individual source documents so as to provide connectivity across chunks within a single source. It's common for several chunks from the same source to be connected to the same topic. + +Topics increase *connectivity between relevant chunks within a single source*, and provide a simple document-level summary mechanism. + +Topics can, optionally, be embedded, and so can act as higher-level entry points in the graph based on a vector search. A topic embedding represents the topic name and all the statements belonging to that topic. diff --git a/docs-site/src/content/docs/lexical-graph/graph-store-falkor-db.mdx b/docs-site/src/content/docs/lexical-graph/graph-store-falkor-db.mdx new file mode 100644 index 00000000..225037ad --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/graph-store-falkor-db.mdx @@ -0,0 +1,83 @@ +--- +title: FalkorDB Graph Store +--- + +### Topics + + - [Overview](#overview) + - [Install package](#install-package) + - [Registering FalkorDB as a graph store](#registering-falkordb-as-a-graph-store) + - [Creating a FalkorDB graph store](#creating-a-falkordb-graph-store) + +### Overview + +You can use FalkorDB as a graph store. + +### Install package + +The FalkorDB graph store is contained in a separate contributor package. To install it: + +``` +!pip install https://github.com/awslabs/graphrag-toolkit/archive/refs/tags/v3.15.5.zip#subdirectory=lexical-graph-contrib/falkordb +``` + +### Registering FalkorDB as a graph store + +Before creating a FalkorDB graph store, you must register the `FalkorDBGraphStoreFactory` with the `GraphStoreFactory`: + +```python +from graphrag_toolkit.lexical_graph.storage import GraphStoreFactory +from graphrag_toolkit_contrib.lexical_graph.storage.graph.falkordb import FalkorDBGraphStoreFactory + +GraphStoreFactory.register(FalkorDBGraphStoreFactory) + +``` + +### Creating a FalkorDB graph store + +You can use the `GraphStoreFactory.for_graph_store()` static factory method to create an instance of a FalkorDB graph store. + +The FalkorDB graph store currently supports [semantic-guided search](/graphrag-toolkit/lexical-graph/semantic-guided-search/). It does not support [traversal-based search](/graphrag-toolkit/lexical-graph/traversal-based-search/). + +To create a [FalkorDB Cloud](https://app.falkordb.cloud/) graph store, supply a connection string that begins `falkordb://`, followed by the FalkorDB endpoint: + +```python +from graphrag_toolkit.lexical_graph.storage import GraphStoreFactory +from graphrag_toolkit_contrib.lexical_graph.storage.graph.falkordb import FalkorDBGraphStoreFactory + +falkordb_connection_info = 'falkordb://your-falkordb-endpoint' + +GraphStoreFactory.register(FalkorDBGraphStoreFactory) + +with GraphStoreFactory.for_graph_store(falkordb_connection_info) as graph_store: + ... + +``` + +You may also need to pass a username and password, and specify whether or not to use SSL: + +```python +from graphrag_toolkit.lexical_graph.storage import GraphStoreFactory + +falkordb_connection_info = 'falkordb://' + +with GraphStoreFactory.for_graph_store( + falkordb_connection_info, + username='', + password='', + ssl=True + ) as graph_store: + + ... +``` + +To create a local FalkorDB graph store, supply a connection string that has only `falkordb://`; + +```python +from graphrag_toolkit.lexical_graph.storage import GraphStoreFactory + +falkordb_connection_info = 'falkordb://' + +with GraphStoreFactory.for_graph_store(falkordb_connection_info) as graph_store: + ... +``` diff --git a/docs-site/src/content/docs/lexical-graph/graph-store-neo4j.mdx b/docs-site/src/content/docs/lexical-graph/graph-store-neo4j.mdx new file mode 100644 index 00000000..99728668 --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/graph-store-neo4j.mdx @@ -0,0 +1,33 @@ +--- +title: Neo4j Graph Store +--- + +### Topics + + - [Overview](#overview) + - [Creating a Neo4j graph store](#creating-a-neo4j-graph-store) + +### Overview + +You can use [Neo4j](https://neo4j.com/docs) as a graph store. + +### Creating a Neo4j graph store + +Use the `GraphStoreFactory.for_graph_store()` static factory method to create an instance of a Neo4j graph store. + +To create a Neo4j graph store, supply a connection string that begins with one of the [Neo4j URI schemes](https://neo4j.com/docs/api/python-driver/5.28/api.html#uri) (e.g. `neo4j://`) in accordance with the following format: + +``` +[scheme]://[user[:password]@][host][:port][/dbname][?routing_context] +``` + +For example: + +```python +from graphrag_toolkit.lexical_graph.storage import GraphStoreFactory + +neo4j_connection_info = 'neo4j://neo4j:!zfg%dGGh@example.com:7687' + +with GraphStoreFactory.for_graph_store(neo4j_connection_info) as graph_store: + ... +``` diff --git a/docs-site/src/content/docs/lexical-graph/graph-store-neptune-analytics.mdx b/docs-site/src/content/docs/lexical-graph/graph-store-neptune-analytics.mdx new file mode 100644 index 00000000..ad32108e --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/graph-store-neptune-analytics.mdx @@ -0,0 +1,27 @@ +--- +title: Neptune Analytics Graph Store +--- + +### Topics + + - [Overview](#overview) + - [Creating a Neptune Analytics graph store](#creating-a-neptune-analytics-graph-store) + +### Overview + +You can use Amazon Neptune Analytics as a graph store. + +### Creating a Neptune Analytics graph store + +Use the `GraphStoreFactory.for_graph_store()` static factory method to create an instance of a Neptune Analytics graph store. + +To create a Neptune Analytics graph store, supply a connection string that begins `neptune-graph://`, followed by the graph's identifier: + +```python +from graphrag_toolkit.lexical_graph.storage import GraphStoreFactory + +neptune_connection_info = 'neptune-graph://g-jbzzaqb209' + +with GraphStoreFactory.for_graph_store(neptune_connection_info) as graph_store: + ... +``` diff --git a/docs-site/src/content/docs/lexical-graph/graph-store-neptune-db.mdx b/docs-site/src/content/docs/lexical-graph/graph-store-neptune-db.mdx new file mode 100644 index 00000000..a69188ab --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/graph-store-neptune-db.mdx @@ -0,0 +1,50 @@ +--- +title: Neptune Database Graph Store +--- + +### Topics + + - [Overview](#overview) + - [Creating a Neptune Database graph store](#creating-a-neptune-database-graph-store) + - [Connecting to Neptune via a proxy](#connecting-to-neptune-via-a-proxy) + +### Overview + +You can use Amazon Neptune Database as a graph store. The lexical-graph requires [Neptune engine version](https://docs.aws.amazon.com/neptune/latest/userguide/engine-releases.html) 1.4.1.0 or later. + +### Creating a Neptune Database graph store + +Use the `GraphStoreFactory.for_graph_store()` static factory method to create an instance of a Neptune Database graph store. + +To create a Neptune Database graph store (engine version 1.4.1.0 or later), supply a connection string that begins `neptune-db://`, followed by an [endpoint](https://docs.aws.amazon.com/neptune/latest/userguide/feature-overview-endpoints.html): + +```python +from graphrag_toolkit.lexical_graph.storage import GraphStoreFactory + +neptune_connection_info = 'neptune-db://mydbcluster.cluster-123456789012.us-east-1.neptune.amazonaws.com:8182' + +with GraphStoreFactory.for_graph_store(neptune_connection_info) as graph_store: + ... +``` + +#### Connecting to Neptune via a proxy + +To connect to Neptune via a proxy (e.g. a load balancer), you must supply a config dictionary to the `GraphStoreFactory.for_graph_store()` factory method, with a `proxies` dictionary of proxy servers to use by protocol or endpoint: + +```python +from graphrag_toolkit.lexical_graph.storage import GraphStoreFactory + +neptune_connection_info = 'neptune-db://mydbcluster.cluster-123456789012.us-east-1.neptune.amazonaws.com:8182' + +config = { + 'proxies': { + 'http': 'http://proxy-hostname:80' + } +} + +with GraphStoreFactory.for_graph_store( + neptune_connection_info, + config=config + ) as graph_store: + ... +``` diff --git a/docs-site/src/content/docs/lexical-graph/hybrid-deployment.mdx b/docs-site/src/content/docs/lexical-graph/hybrid-deployment.mdx new file mode 100644 index 00000000..4445dd36 --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/hybrid-deployment.mdx @@ -0,0 +1,47 @@ +--- +title: Hybrid Deployment +--- + +### Topics + + - [Overview](#overview) + - [Stores and model providers](#stores-and-model-providers) + - [Indexing and querying](#indexing-and-querying) + - [Indexing](#indexing) + +### Overview + +Hybrid deployment enables flexible deployment: high-throughput LLM inference via SageMaker and Bedrock, and cost-effective local development using containerized graph/vector stores. + +### Stores and model providers + +The `lexical-graph` library depends on three backend systems: a [*graph store*](/graphrag-toolkit/lexical-graph/storage-model/#graph-store), a [*vector store*](/graphrag-toolkit/lexical-graph/storage-model/#vector-store), and a *foundation model provider*. The graph store enables storage and querying of a lexical graph built from unstructured, text-based sources. The vector store contains one or more indexes with embeddings for selected graph elements, which help identify starting points for graph queries. The foundation model provider hosts the Large Language Models (LLMs) used for extraction and embedding. + +The library provides built-in support for: + +* Graph stores: [Amazon Neptune Database](https://docs.aws.amazon.com/neptune/latest/userguide/intro.html), [Amazon Neptune Analytics](https://docs.aws.amazon.com/neptune-analytics/latest/userguide/what-is-neptune-analytics.html), and local [FalkorDB](https://falkordb.com/) (via Docker) +* Vector stores: [Amazon OpenSearch Serverless](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/serverless.html), [PostgreSQL with `pgvector`](https://github.com/pgvector/pgvector), Neptune Analytics, and local [PostgreSQL with `pgvector`](https://github.com/pgvector/pgvector) +* Foundation model provider: [Amazon Bedrock](https://aws.amazon.com/bedrock/) + +This hybrid configuration enables flexible deployment: high-throughput LLM inference via SageMaker and Bedrock, and cost-effective local development using containerized graph/vector stores. + +### Indexing and querying + +The lexical-graph library implements two high-level processes: [_indexing_](/graphrag-toolkit/lexical-graph/indexing/) and [_querying_](/graphrag-toolkit/lexical-graph/querying/). The indexing process ingests and extracts information from unstuctured, text-based source documents and then builds a graph and accompanying vector indexes. The query process retrieves content from the graph and vector indexes, and then supplies this content as context to an LLM to answer a user question. + +#### Indexing + +Indexing is split into two pipeline stages: **Extract** and **Build**. + +The **Extract** stage runs **locally using Docker**: + +* Loads and chunks documents +* Performs two LLM-based extraction steps: + + * *Proposition extraction*: Converts chunked text into well-formed statements + * *Topic/entity/fact extraction*: Identifies relations and concepts +* Stores the extracted results in an **AWS S3 bucket**, serving as the transport medium between stages + +The **Build** stage remains unchanged. + +![Indexing](../../../content/images/hybrid-extract-and-build.png) diff --git a/docs-site/src/content/docs/lexical-graph/indexing.mdx b/docs-site/src/content/docs/lexical-graph/indexing.mdx new file mode 100644 index 00000000..0bcba2bf --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/indexing.mdx @@ -0,0 +1,413 @@ +--- +title: Indexing +--- + +import { Aside } from '@astrojs/starlight/components'; + +## Overview + +There are two stages to indexing: extract, and build. The lexical-graph uses separate pipelines for each of these stages, plus micro-batching, to provide a continous ingest capability. This means that your graph will start being populated soon after extraction begins. + +You can run the extract and build pipelines together, to provide for the continuous ingest described above. Or you can run the two pipelines separately, extracting first to file-based chunks, and then later building a graph from these chunks. + +The `LexicalGraphIndex` allows you to run the extract and build pipelines together or separately. See the [Using the LexicalGraphIndex to construct a graph](#using-the-lexicalgraphindex-to-construct-a-graph) section below. + +Indexing supports [multi-tenancy](/graphrag-toolkit/lexical-graph/multi-tenancy/), whereby you can store separate lexical graphs in the same backend graph and vector stores. + +#### Extract + +The extraction stage is, by default, a three-step process: + + 1. The source documents are broken down into chunks. + 2. For each chunk, an LLM extracts a set of propositions from the unstructured content. This proposition extraction helps 'clean' the content and improve the subsequent entity/topic/statement/fact extraction by breaking complex sentences into simpler sentences, replacing pronouns with specific names, and replacing acronyms where possible. These propositions are added to the chunk's metadata under the `aws::graph::propositions` key. + 3. Following the proposition extraction, a second LLM call extracts entities, relations, topics, statements and facts from the set of extracted propositions. These details are added to the chunk's metadata under the `aws::graph::topics` key. + +Only the third step here is mandatory. If your source data has already been chunked, you can omit step 1. If you're willing to trade a reduction in LLM calls and improved performance for a reduction in the quality of the entity/topic/statement/fact extraction, you can omit step 2. + +Extraction uses a lightly guided strategy whereby the extraction process is seeded with a list of preferred entity classifications. The LLM is instructed to use an existing classification from the list before creating new ones. Any new classifications introduced by the LLM are then carried forward to subsequent invocations. This approach reduces but doesn't eliminate unwanted variations in entity classification. + +The list of `DEFAULT_ENTITY_CLASSIFICATIONS` used to seed the extraction process can be found [here](https://github.com/awslabs/graphrag-toolkit/blob/main/src/graphrag_toolkit/indexing/constants.py). If these classifications are not appropriate to your workload you can replace them (see the [Configuring the extract and build stages](#configuring-the-extract-and-build-stages) section below). + +Relationship values are currently unguided (though relatively concise). + +#### Build + +In the build stage, the LlamaIndex chunk nodes emitted from the extract stage are broken down further into a stream of individual source, chunk, topic, statement and fact LlamaIndex nodes. Graph construction and vector indexing handlers process these nodes to build and index the graph content. Each of these nodes has an `aws::graph::index` metadata item containing data that can be used to index the node in a vector store (though only the chunk and statement nodes are actually indexed in the current implementation). + +### Using the LexicalGraphIndex to construct a graph + +The `LexicalGraphIndex` provides a convenient means of constructing a graph – via either continuous ingest, or separate extract and build stages. When constructing a `LexicalGraphIndex` you must supply a graph store and a vector store (see [Storage Model](/graphrag-toolkit/lexical-graph/storage-model/) for more details). In the examples below, the graph store and vector store connection strings are fetched from environment variables. + +The `LexicalGraphIndex` constructor has an `extraction_dir` named argument. This is the path to a local directory to which intermediate artefacts (such as [checkpoints](#checkpoints)) will be written. By default, the value of `extraction_dir` is set to the value of `GraphRAGConfig.local_output_dir`, which defaults to `'output'`. For containerized deployments (EKS/Kubernetes), you can configure this via the `LOCAL_OUTPUT_DIR` environment variable or by setting `GraphRAGConfig.local_output_dir` programmatically. See [Configuration](/graphrag-toolkit/lexical-graph/configuration/) for more details. + +#### Continous ingest + +Use `LexicalGraphIndex.extract_and_build()` to extract and build a graph in a manner that supports continous ingest. + +The extraction stage consumes LlamaIndex nodes – either documents, which will be chunked during extraction, or pre-chunked text nodes. Use a LlamaIndex reader to [load source documents](https://docs.llamaindex.ai/en/stable/understanding/loading/loading/). The example below uses a LlamaIndex `SimpleWebReader` to load several HTML pages. + +```python title="continuous_ingest.py" {22,30} "extract_and_build" +import os + +from graphrag_toolkit.lexical_graph import LexicalGraphIndex +from graphrag_toolkit.lexical_graph.storage import GraphStoreFactory +from graphrag_toolkit.lexical_graph.storage import VectorStoreFactory + +from llama_index.readers.web import SimpleWebPageReader + +doc_urls = [ + 'https://docs.aws.amazon.com/neptune/latest/userguide/intro.html', + 'https://docs.aws.amazon.com/neptune-analytics/latest/userguide/what-is-neptune-analytics.html', + 'https://docs.aws.amazon.com/neptune-analytics/latest/userguide/neptune-analytics-features.html', + 'https://docs.aws.amazon.com/neptune-analytics/latest/userguide/neptune-analytics-vs-neptune-database.html' +] + +docs = SimpleWebPageReader( + html_to_text=True, + metadata_fn=lambda url:{'url': url} +).load_data(doc_urls) + +with ( + GraphStoreFactory.for_graph_store(os.environ['GRAPH_STORE']) as graph_store, + VectorStoreFactory.for_vector_store(os.environ['VECTOR_STORE']) as vector_store +): + + graph_index = LexicalGraphIndex( + graph_store, + vector_store + ) + + graph_index.extract_and_build(docs) +``` + + + +The diff below shows what changes when you split the pipelines: + +```diff lang="python" +- graph_index.extract_and_build(docs) ++ extracted_docs = S3BasedDocs( ++ region=os.environ['AWS_REGION'], ++ bucket_name=os.environ['EXTRACTION_BUCKET'], ++ key_prefix='extracted', ++ ) ++ graph_index.extract(docs, handler=extracted_docs, show_progress=True) ++ graph_index.build(extracted_docs, show_progress=True) +``` + +#### Run the extract and build stages separately + +Using the `LexicalGraphIndex` you can perform the extract and build stages separately. This is useful if you want to extract the graph once, and then build it multiple times (in different environments, for example.) + +When you run the extract and build stages separately, you can persist the extracted documents to Amazon S3 or to the filesystem at the end of the extract stage, and then consume these same documents in the build stage. Use the graphrag-toolkit's `S3BasedDocss` and `FileBasedDocs` classes to persist and then retrieve JSON-serialized LlamaIndex nodes. + +The following example shows how to use a `S3BasedDocs` handler to persist extracted documents to an Amazon S3 bucket at the end of the extract stage: + +```python +import os + +from graphrag_toolkit.lexical_graph import LexicalGraphIndex +from graphrag_toolkit.lexical_graph.storage import GraphStoreFactory +from graphrag_toolkit.lexical_graph.storage import VectorStoreFactory +from graphrag_toolkit.lexical_graph.indexing.load import S3BasedDocs + +from llama_index.readers.web import SimpleWebPageReader + +extracted_docs = S3BasedDocs( + region='us-east-1', + bucket_name='my-bucket', + key_prefix='extracted', + collection_id='12345' +) + +with ( + GraphStoreFactory.for_graph_store(os.environ['GRAPH_STORE']) as graph_store, + VectorStoreFactory.for_vector_store(os.environ['VECTOR_STORE']) as vector_store +): + + graph_index = LexicalGraphIndex( + graph_store, + vector_store + ) + + doc_urls = [ + 'https://docs.aws.amazon.com/neptune/latest/userguide/intro.html', + 'https://docs.aws.amazon.com/neptune-analytics/latest/userguide/what-is-neptune-analytics.html', + 'https://docs.aws.amazon.com/neptune-analytics/latest/userguide/neptune-analytics-features.html', + 'https://docs.aws.amazon.com/neptune-analytics/latest/userguide/neptune-analytics-vs-neptune-database.html' + ] + + docs = SimpleWebPageReader( + html_to_text=True, + metadata_fn=lambda url:{'url': url} + ).load_data(doc_urls) + + graph_index.extract(docs, handler=extracted_docs) +``` + +Following the extract stage, you can then build the graph from the previously extracted documents. Whereas in the extract stage the `S3BasedDocs` object acted as a handler to persist extracted documents, in the build stage the `S3BasedDocs` object acts as a source of LlamaIndex nodes, and is thus passed as the first argument to the `build()` method: + +```python +import os + +from graphrag_toolkit.lexical_graph import LexicalGraphIndex +from graphrag_toolkit.lexical_graph.storage import GraphStoreFactory +from graphrag_toolkit.lexical_graph.storage import VectorStoreFactory +from graphrag_toolkit.lexical_graph.indexing.load import S3BasedDocs + +docs = S3BasedDocs( + region='us-east-1', + bucket_name='my-bucket', + key_prefix='extracted', + collection_id='12345' +) + +with ( + GraphStoreFactory.for_graph_store(os.environ['GRAPH_STORE']) as graph_store, + VectorStoreFactory.for_vector_store(os.environ['VECTOR_STORE']) as vector_store +): + + graph_index = LexicalGraphIndex( + graph_store, + vector_store + ) + + graph_index.build(docs) +``` + +The `S3BasedDocs` object has the following parameters: + +| Parameter | Description | Mandatory | +| ------------- | ------------- | ------------- | +| `region` | AWS Region in which the S3 bucket is located (e.g. `us-east-1`) | Yes | +| `bucket_name` | Amazon S3 bucket name | Yes | +| `key_prefix` | S3 key prefix | Yes | +| `collection_id` | Id for a particular collection of extracted documents. Optional: if no `collection_id` is supplied, the lexical-graph will create a timestamp value. Extracted documents will be written to `s3://///`. | No | +| `s3_encryption_key_id` | KMS key id (Key ID, Key ARN, or Key Alias) to use for object encryption. Optional: if no `s3_encryption_key_id` is supplied, the lexical-graph will encrypt objects in S3 using Amazon S3 managed keys. | No | + +If you use Amazon Web Services KMS keys to encrypt objects in S3, the identity under which the lexical-graph runs should include the following IAM policy. Replace `` with the ARN of the KMS key you want to use to encrypt objects: + +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": [ + "kms:GenerateDataKey", + "kms:Decrypt" + ], + "Resource": [ + "" + ], + "Effect": "Allow" + } + ] +} +``` + +If you want to persist extracted documents to the local filesystem instead of an S3 bucket, use a `FileBasedDocs` object instead: + +```python +from graphrag_toolkit.lexical_graph.indexing.load import FileBasedDocs + +chunks = FileBasedDocs( + docs_directory='./extracted/', + collection_id='12345' +) +``` + +The `FileBasedChunks` object has the following parameters: + +| Parameter | Description | Mandatory | +| ------------- | ------------- | ------------- | +| `docs_directory` | Root directory for the extracted documents | Yes | +| `collection_id` | Id for a particular collection of extracted documents. Optional: if no `collection_id` is supplied, the lexical-graph will create a timestamp value. Extracted documents will be written to `///`. | No | + + +#### Configuring the extract and build stages + +You can configure the number of workers and batch sizes for the extract and build stages of the `LexicalGraphIndex` using the `GraphRAGConfig` object. See [Configuration](/graphrag-toolkit/lexical-graph/configuration/) for more details on using the configuration object. + +Besides configuring the workers and batch sizes, you can also configure the indexing process with regard to chunking, proposition extraction and entity classification, and graph and vector store contents by passing an instance of `IndexingConfig` to the `LexicalGraphIndex` constructor: + +```python +from graphrag_toolkit.lexical_graph import LexicalGraphIndex, IndexingConfig, ExtractionConfig + +... + +graph_index = LexicalGraphIndex( + graph_store, + vector_store, + indexing_config = IndexingConfig( + chunking=None, + extraction=ExtractionConfig( + enable_proposition_extraction=False + ) + + ) +) +``` + +The `IndexingConfig` object has the following parameters: + +| Parameter | Description | Default Value | +| ------------- | ------------- | ------------- | +| `chunking` | A list of node parsers (e.g. LlamaIndex `SentenceSplitter`) to be used for chunking source documents. Set `chunking` to `None` to skip chunking. | `SentenceSplitter` with `chunk_size=256` and `chunk_overlap=25` | +| `extraction` | An `ExtractionConfig` object specifying extraction options | `ExtractionConfig` with default values | +| `build` | A `BuildConfig` object specifying build options | `BuildConfig` with default values | +| `batch_config` | Batch configuration to be used if performing [batch extraction](/graphrag-toolkit/lexical-graph/batch-extraction/). If `batch_config` is `None`, the toolkit will perform chunk-by-chunk extraction. | `None` | + +The `ExtractionConfig` object has the following parameters: + +| Parameter | Description | Default Value | +| ------------- | ------------- | ------------- | +| `enable_proposition_extraction` | Perform proposition extraction before extracting topics, statements, facts and entities | `True` | +| `preferred_entity_classifications` | Comma-separated list of preferred entity classifications used to seed the entity extraction | `DEFAULT_ENTITY_CLASSIFICATIONS` | +| `preferred_topics` | List of preferred topic names (or a callable that returns them) supplied to the LLM to seed topic extraction. Accepts the same type as `preferred_entity_classifications`. | `[]` | +| `infer_entity_classifications` | Determines whether to pre-process documents to identify significant domain entity classifications. Supply either `True` or `False`, or an `InferClassificationsConfig` object. When `True`, an `InferClassifications` step runs as a **pre-processor** before the main extraction loop — one extra LLM round-trip per batch, not per document. | `False` | +| `extract_propositions_prompt_template` | Prompt used to extract propositions from chunks. If `None`, the [default extract propositions template](https://github.com/awslabs/graphrag-toolkit/blob/main/lexical-graph/src/graphrag_toolkit/lexical_graph/indexing/prompts.py#L29-L72) is used. See [Custom prompts](#custom-prompts) below. | `None` | +| `extract_topics_prompt_template` | Prompt used to extract topics, statements and entities from chunks. If `None`, the [default extract topics template](https://github.com/awslabs/graphrag-toolkit/blob/main/lexical-graph/src/graphrag_toolkit/lexical_graph/indexing/prompts.py#L74-L191) is used. See [Custom prompts](#custom-prompts) below. | `None` | +| `extraction_llm` | LLM used to perform extraction and infer classifications. Accepts the model id of an Amazon Bedrock model, an Amazon Bedrock [inference profile](https://docs.aws.amazon.com/bedrock/latest/userguide/inference-profiles.html), a JSON string representation of a LlamaIndex `BedrockConverse` instance, or an instance of a LlamaIndex `LLM` object (see the [LLM configuration](/graphrag-toolkit/lexical-graph/configuration/#llm-configuration) section for more details). If `None`, the [`GraphRAG.extraction_llm`](/graphrag-toolkit/lexical-graph/configuration/#graphragconfig) configuration parameter is used. | `None` | + + +The `BuildConfig` object has the following parameters: + +| Parameter | Description | Default Value | +| ------------- | ------------- | ------------- | +| `build_filters` | A `BuildFilters` object to include or exclude specific node types during the build stage | `BuildFilters()` | +| `include_domain_labels` | Whether to add a domain-specific label (e.g. `Company`) to entity nodes in addition to `__Entity__` | `None` (falls back to `GraphRAGConfig.include_domain_labels`) | +| `include_local_entities` | Whether to include local-context entities in the graph | `None` (falls back to `GraphRAGConfig.include_local_entities`) | +| `source_metadata_formatter` | A `SourceMetadataFormatter` instance for customising source metadata written to the graph | `DefaultSourceMetadataFormatter()` | +| `enable_versioning` | Whether to enable versioned updates. Overrides `GraphRAGConfig.enable_versioning` when set. | `None` | + +The `InferClassificationsConfig` object has the following parameters: + +| Parameter | Description | Default Value | +| ------------- | ------------- | ------------- | +| `num_iterations` | Number of times to run the pre-processing over the source documents | 1 | +| `num_samples` | Number of chunks (selected at random) from which classifications are extracted per iteration | 5 | +| `prompt_template` | Prompt used to extract classifications from sampled chunks. If `None`, the [default domain entity classifications template](https://github.com/awslabs/graphrag-toolkit/blob/main/lexical-graph/src/graphrag_toolkit/lexical_graph/indexing/prompts.py#L4-L27) is used. See [Custom prompts](#custom-prompts) below. | `None` | + + +#### Custom prompts + +The extract stage uses up to three LLM prompts: + + - [**Domain entity classifications:**](https://github.com/awslabs/graphrag-toolkit/blob/main/lexical-graph/src/graphrag_toolkit/lexical_graph/indexing/prompts.py#L4-L27) Extracts significant domain entity classifications from a sample of source documents prior to processing the documents. These classificatiosn are then supplied to the extract topics prompt as the list of preferred entity classifications. + - [**Extract propositions:**](https://github.com/awslabs/graphrag-toolkit/blob/main/lexical-graph/src/graphrag_toolkit/lexical_graph/indexing/prompts.py#L29-L72) Extracts a set of standalone, well-formed propositions from a chunk. + - [**Extract topics:**](https://github.com/awslabs/graphrag-toolkit/blob/main/lexical-graph/src/graphrag_toolkit/lexical_graph/indexing/prompts.py#L74-L191) Extracts topics, statements and entities and their relations from either a set of propositions, or from the raw chunk text. + +Using the `ExtractionConfig` and `InferClassificationsConfig` you can customize one or more of these prompts. + +**Domain entity classifications:** + +The prompt template should included a `{text_chunks}` placeholder, into which the sampled chunks will be inserted. + +The template should return classifications in the following format: + +``` + +Classification1 +Classification2 +Classification3 + +``` + +**Extract propositions:** + +The prompt template should include a `{text}` placeholder, into which the chunk text will be inserted. + +The template should return propositions in the following format: + +``` +proposition +proposition +proposition +``` + +**Extract topics:** + +The prompt template should include a `{text}` placeholder, into which a set of propositions (or the raw chunk text) will be inserted, a `{preferred_topics}` placeholder, into which a list of topics will be inserted, and a `{preferred_entity_classifications}` placeholder, into which a liist of entity classifications will be inserted. + +The template should return extracted topics, statements, entities and relations in the following format: + +``` +topic: topic + + entities: + entity|classification + entity|classification + + proposition: [exact proposition text] + entity-attribute relationships: + entity|RELATIONSHIP|attribute + entity|RELATIONSHIP|attribute + + entity-entity relationships: + entity|RELATIONSHIP|entity + entity|RELATIONSHIP|entity + + proposition: [exact proposition text] + entity-attribute relationships: + entity|RELATIONSHIP|attribute + entity|RELATIONSHIP|attribute + + entity-entity relationships: + entity|RELATIONSHIP|entity + entity|RELATIONSHIP|entity +``` + + +#### Batch extraction + +You can use [Amazon Bedrock batch inference](https://docs.aws.amazon.com/bedrock/latest/userguide/batch-inference.html) with the extract stage of the indexing process. See [Batch Extraction](/graphrag-toolkit/lexical-graph/batch-extraction/) for more details. + +`BatchConfig` ([`indexing/extract/batch_config.py`](https://github.com/awslabs/graphrag-toolkit/blob/main/lexical-graph/src/graphrag_toolkit/lexical_graph/indexing/extract/batch_config.py)) accepts the following parameters: + +| Parameter | Description | Required | +| ------------- | ------------- | ------------- | +| `role_arn` | ARN of the IAM role Bedrock will assume to run batch jobs | Yes | +| `region` | AWS region where batch jobs will run | Yes | +| `bucket_name` | S3 bucket for batch job input/output | Yes | +| `key_prefix` | S3 key prefix for job files | No | +| `s3_encryption_key_id` | KMS key ID for S3 object encryption | No | +| `subnet_ids` | VPC subnet IDs for the batch job network configuration | No | +| `security_group_ids` | VPC security group IDs | No | +| `max_batch_size` | Maximum records per batch job (Bedrock limit: 50,000; jobs under 100 records are skipped and processed inline) | `25000` | +| `max_num_concurrent_batches` | Maximum concurrent batch jobs per worker | `3` | +| `delete_on_success` | Whether to delete S3 job files after a successful run | `True` | + +#### Metadata filtering + +You can add metadata to source documents on ingest, and then use this metadata to filter documents during the extract and build stages. Source metadata is also used for metadata filtering when querying a lexical graph. See the [Metadata Filtering](/graphrag-toolkit/lexical-graph/metadata-filtering/) section for more details. + +#### Versioned updates + +The lexical graphs supports [versioned updates](/graphrag-toolkit/lexical-graph/versioned-updates/). With versioned updates, if you re-ingest a document whose contents and/or metadata have changed since it was last extracted, any old documents will be archived, and the newly ingested document treated as the current version of the source document. + +#### Checkpoints + +The lexical-graph retries upsert operations and calls to LLMs and embedding models that don't succeed. However, failures can still happen. If an extract or build stage fails partway through, you typically don't want to reprocess chunks that have successfully made their way through the entire graph construction pipeline. + +To avoid having to reprocess chunks that have been successfully processed in a previous run, provide a `Checkpoint` instance to the `extract_and_build()`, `extract()` and/or `build()` methods. A checkpoint adds a checkpoint *filter* to steps in the extract and build stages, and a checkpoint *writer* to the end of the build stage. When a chunk is emitted from the build stage, after having been successfully handled by both the graph construction *and* vector indexing handlers, its id will be written to a save point in the graph index `extraction_dir`. If a chunk with the same id is subsequently introduced into either the extract or build stage, it will be filtered out by the checkpoint filter. + +The following example passes a checkpoint to the `extract_and_build()` method: + +```python +from graphrag_toolkit.lexical_graph.indexing.build import Checkpoint + +checkpoint = Checkpoint('my-checkpoint') + +... + +graph_index.extract_and_build(docs, checkpoint=checkpoint) +``` + +When you create a `Checkpoint`, you must give it a name. A checkpoint filter will only filter out chunks that were checkpointed by a checkpoint writer with the same name. If you use checkpoints when [running separate extract and build processes](#run-the-extract-and-build-stages-separately), ensure the checkpoints have different names. If you use the same name across separate extract and build processes, the build stage will ignore all the chunks created by the extract stage. + +Checkpoints do not provide any transactional guarantees. If a chunk is successfully processed by the graph construction handlers, but then fails in a vector indexing handler, it will not make it to the end of the build pipeline, and so will not be checkpointed. If the build stage is restarted, the chunk will be reprocessed by both the graph construction and vector indexing handlers. For stores that support upserts (e.g. Amazon Neptune Database and Amazon Neptune Analytics) this is not an issue. + +The lexical-graph does not clean up checkpoints. If you use checkpoints, periodically clean the checkpoint directory of old checkpoint files. + diff --git a/docs-site/src/content/docs/lexical-graph/metadata-filtering.mdx b/docs-site/src/content/docs/lexical-graph/metadata-filtering.mdx new file mode 100644 index 00000000..ff28afd6 --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/metadata-filtering.mdx @@ -0,0 +1,352 @@ +--- +title: Metadata Filtering +--- + +### Topics + + - [Overview](#overview) + - [Adding metadata when indexing](#adding-metadata-when-indexing) + - [Metadata and versioned updates](#metadata-and-versioned-uupdates) + - [Adding metadata to web pages](#adding-metadata-to-web-pages) + - [Adding metadata to JSON documents](#adding-metadata-to-json-documents) + - [Adding metadata to PDF documents](#adding-metadata-to-pdf-documents) + - [Restrictions](#restrictions) + - [Using metadata to filter queries](#using-metadata-to-filter-queries) + - [How are metadata filters applied?](#how-are-metadata-filters-applied) + - [Complex and nested filter expressions](#complex-and-nested-filter-expressions) + - [Supported filter operators](#supported-filter-operators) + - [Dates and datetimes](#dates-and-datetimes) + - [Using metadata to filter documents in the extract and build stages](#using-metadata-to-filter-documents-in-the-extract-and-build-stages) + - [Using metadata filtering in the extract stage](#using-metadata-filtering-in-the-extract-stage) + - [Using metadata filtering in the build stage](#using-metadata-filtering-in-the-build-stage) + - [Metadata and document identity](#metadata-and-document-identity) + - [Metadata filtering and multi-tenancy](#metadata-filtering-and-multi-tenancy) + + +### Overview + +Metadata filtering allows you to retrieve a constrained set of sources, topics and statements based on metadata filters and associated values when querying a lexical graph. + +Metadata is any data added to the metadata dictionary of a source document. Depending on the source document, examples of metadata may include `title`, `url`, `filepath`, `date published`, and `author`. A source document's metadata is then associated with any chunks, topics and statements extracted from that document. + +There are two parts to metadata filtering: + + - **Indexing** Add metadata to source documents passed to the indexing process + - **Querying** Supply metadata filters when querying a lexical graph + +You can also use metadata filtering to [filter documents and chunks during the extract and build stages](#using-metadata-to-filter-documents-in-the-extract-and-build-stages) of the indexing process. + +### Adding metadata when indexing + +The effectiveness of metadata filtering during querying is dependent on the quality of the metadata attached to source documents during ingestion. [Different loaders](https://docs.llamaindex.ai/en/stable/understanding/loading/loading/) have different mechanisms for adding metadata to ingested documents. Here are some examples. + +#### Metadata and versioned updates + +The lexical graphs supports [versioned updates](/graphrag-toolkit/lexical-graph/versioned-updates/). With versioned updates, if you re-ingest a document whose contents and/or metadata have changed since it was last extracted, any old documents will be archived, and the newly ingested document treated as the current version of the source document. + +Versioned updates uses a concept of _version-independent metadata fields_ to represent a documents' stable (i.e. version-independent) identify. When you index a document, you can specify which of that document's metadata fields represent its stable identify. For example, if a document has `title`, `author` and `last_updated` metadata fields, you might specify that a combination of the `title` and `author` metadata fields represent that document's stable identify. When the document is indexed, any previously indexed, non-versioned documents whose `title` and `author` field _values_ match those of the newly ingested document will be archived. + +When choosing which metadata to add to each source document that you ingest, bear in mind this use of metadata for versioning updates. Try to ensure that one of the fields, or a combination of multiple field values, constitute a stable identity. + + +#### Adding metadata to web pages + +The LlamaIndex `SimpleWebPageReader` accepts a function that takes a url and returns a metadata dictionary. The following example populates the metadata dictionary with the url and the date on which the page was accessed. + +```python +from datetime import date +from llama_index.readers.web import SimpleWebPageReader + +doc_urls = [ + 'https://docs.aws.amazon.com/neptune/latest/userguide/intro.html', + 'https://docs.aws.amazon.com/neptune-analytics/latest/userguide/what-is-neptune-analytics.html', + 'https://docs.aws.amazon.com/neptune-analytics/latest/userguide/neptune-analytics-features.html', + 'https://docs.aws.amazon.com/neptune-analytics/latest/userguide/neptune-analytics-vs-neptune-database.html' +] + +def web_page_metadata(url): + return { + 'url': url, + 'last_accessed_date': date.today() + } + +docs = SimpleWebPageReader( + html_to_text=True, + metadata_fn=web_page_metadata +).load_data(doc_urls) +``` + +#### Adding metadata to JSON documents + +The `JSONArrayReader` allows you to split a JSON array document into separate documents, one per element in the array, and extract metadata from each sub-document. The following example splits a JSON source document containing news articles into separate documents, one per article. The `get_text()` and `get_metadata()` functions extract each article's body text and associated metadata. + + +```python +from graphrag_toolkit.lexical_graph.indexing.load import JSONArrayReader + +def get_text(data): + return data.get('body', '') + +def get_metadata(data): + return { + field : data[field] + for field in ['title', 'author', 'source', 'published_date'] + if field in data + } + +docs = JSONArrayReader( + text_fn=get_text, + metadata_fn=get_metadata +).load_data('./articles.json') +``` + +#### Adding metadata to PDF documents + +The following example shows one way of loading PDF documents and attaching metadata to each document. + +```python +from pathlib import Path +from pypdf import PdfReader +from llama_index.core.schema import Document + +def get_pdf_docs(pdf_dir): + + pdf_dir_path = Path(pdf_dir) + + file_paths = [ + file_path for file_path in pdf_dir_path.iterdir() + if file_path.is_file() + ] + + for pdf_path in file_paths: + reader = PdfReader(pdf_path) + for page_num, page_content in enumerate(reader.pages): + doc = Document( + text=page_content.extract_text(), + metadata={ + 'filename': pdf_path.name, + 'page_num': page_num + } + ) + yield doc + +docs = get_pdf_docs('./pdfs') +``` + +#### Restrictions + +Metadata field values may comprise string, int, float, [date and datetime](#dates-and-datetimes) single values. Lists, arrays, sets and nested dictionaries are not supported. + +### Using metadata to filter queries + +The lexical graph uses the LlamaIndex vector store types `MetadataFilters`, `MetadataFilter`, `FilterOperator`, and `FilterCondition` to specify filter criteria. You supply these to a query engine in a `FilterConfig` object. The following example configures a traversal-based retriever to filter the lexical graph based on the url of source documents: + +```python +from graphrag_toolkit.lexical_graph import LexicalGraphQueryEngine +from graphrag_toolkit.lexical_graph.metadata import FilterConfig +from llama_index.core.vector_stores.types import FilterOperator, MetadataFilter + +query_engine = LexicalGraphQueryEngine.for_traversal_based_search( + graph_store, + vector_store, + filter_config = FilterConfig( + MetadataFilter( + key='url', + value='https://docs.aws.amazon.com/neptune/latest/userguide/intro.html', + operator=FilterOperator.EQ + ) + ) +) +``` + +#### How are metadata filters applied? + +Metadata filters that you supply to a query engine are applied at two points in the retrieval process: + + - The filters are applied to all vector store top-k queries. The vector store is typically used to find starting points for graph traversals: filters therefore effectively constrain a retriever's entry points into the graph. + - The filters are subsequently applied to all the results returned from the graph. + +By its very nature, a graph can often connect disparate sources: traversals can hop from topics and statements belonging to one source, to topics and statements associated with an entirely different source. It's not sufficient, therefore, to simply limit the starting points for a traversal; the retriever must also filter the results. The benefit of the dual application of a metadata filter is that it restricts the semantic similarity-based lookups that provide the start points of a query to a well-defined set of sources, but then allows the query to access structurally relevant but semantically dissimilar parts of the lexical graph, some of which may be allowed by the filter, some disallowed, before finally constraining the results to only those elements that pass the filter criteria. + +#### Complex and nested filter expressions + +The constructor of the `FilterConfig` object accepts either a `MetadataFilters` object, a single `MetadataFilter` or a list of `MetadataFilter` objects. + +A `MetadataFilters` object can hold a collection of `MetadataFilter` objects as well as other, nested `MetadataFilters` objects. Elements in a `MetadataFilters` object's `filters` collection are chained to form complex conditions using either a `FilterCondition.AND` or `FilterCondition.OR` condition. + +`MetadataFilters` also supports a third condition: `FilterCondition.NOT`. If you use the `FilterCondition.NOT` condition with a `MetadataFilters` object, the `filters` collection of that object must contain a single nested `MetadataFilters` object. + +The following example shows the use of a nested `MetadataFilters` object to express a complex condition: either the source must be from `https://docs.aws.amazon.com/neptune/latest/userguide/intro.html`, OR its publication date must fall between `2024-01-01` and `2024-12-31`: + +```python +FilterConfig( + MetadataFilters( + filters=[ + MetadataFilter( + key='url', + value='https://docs.aws.amazon.com/neptune/latest/userguide/intro.html', + operator=FilterOperator.EQ + ), + MetadataFilters( + filters=[ + MetadataFilter( + key='pub_date', + value='2024-01-01', + operator=FilterOperator.GT + ), + MetadataFilter( + key='pub_date', + value='2024-12-31', + operator=FilterOperator.LT + ) + ], + condition=FilterCondition.AND + ) + ], + condition=FilterCondition.OR + ) +) +``` + +The following example shows the use of a nested `MetadataFilters` object with a `FilterCondition.NOT` condition. Even though there is only one `MetadataFilter` that is being negated here, it must be nested inside a `MetadataFilters` object. + +```python +FilterConfig( + MetadataFilters( + filters=[ + MetadataFilters( + filters=[ + MetadataFilter( + key='url', + value='https://docs.aws.amazon.com/neptune/latest/userguide/intro.html', + operator=FilterOperator.EQ + ) + ] + ) + ], + condition=FilterCondition.NOT + ) +) +``` + +#### Supported filter operators + +The lexical graph supports the following filter operators: + +| Operator | Description | Data Types | +| ------------- | ------------- | ------------- | +| `EQ` | Equals – default operator | string, int, float, date/datetime | +| `GT` | Greater than | int, float, date/datetime | +| `LT` | Less than | int, float, date/datetime | +| `NE` | Not equal to | string, int, float, date/datetime | +| `GTE` | Greater than or equal to | int, float, date/datetime | +| `LTE` | Less than or equal to | int, float, date/datetime | +| `TEXT_MATCH` | Full text match (allows you to search for a specific substring, token or phrase within the text field) | string | +| `TEXT_MATCH_INSENSITIVE` | Full text match (case insensitive) | string | +| `IS_EMPTY` | The field does not exist || + +The following operators are not supported: + +| Operator | Description | Data Types | +| ------------- | ------------- | ------------- | +| `IN` | In array | string or number | +| `NIN` | Not in array | string or number | +| `ANY` | Contains any | array of strings | +| `ALL` | Contains all | array of strings | +| `CONTAINS` | Metadata array contains value (string or number) | | + +### Dates and datetimes + +Matadata filtering supports filtering by date and datetime values. There are two ways in which you can ensure datetime filtering is applied during indexing and querying: + + - Supply Python `date` or `datetime` objects in the metadata fields attached to source documents, and in the metadata filters applied when querying. + - Indicate that a field is to be treated as a datetime value by suffixing the field name with `_date` or `_datetime`. You can then supply either `date` or `datetime` objects, or string representations of dates and datetime values, when indexing and querying. + +In the build stage, Python `date` and `datetime` metadata values are converted to ISO-formatted datetime values before being persisted to the graph and vector stores. During querying, Python `date` and `datetime` metadata values are similarly converted to ISO-formatted datetime values before being applied in a filter. `date` and `datetime` Pyton objects explictly communicate that a value should be treated as a date or datetime. With this approach, you do not need to add a `_date` or `_datetime` suffix to a metadata field name. However, you must ensure that `date` and/or `datetime` objects are used both during indexing and querying: if one or other of these stages receives a string representation of a date or datetime, filtering may not work as intended. + +Metadata fields that end with `_date` or `_datetime` are converted to ISO-formatted datetime values before being persisted to the graph and vector stores. Similarly, the values of metadata filters whose keys end with `_date` or `_datetime` are converted to ISO-formatted datetime values before being evaluated. + +### Using metadata to filter documents in the extract and build stages + +Besides using metadata filtering to constrain the retrieval process, you can also use it to filter documents during the extract and build stages of the indexing process. + +#### Using metadata filtering in the extract stage + +You can filter the documents that pass through the extract stage by supplying filter criteria to the `extraction_filters` of an `ExtractionConfig` object. `extraction_filters` accepts either a `MetadataFilters` object, a single `MetadataFilter` or a list of `MetadataFilter` objects. + +The following example shows how to filter source documents so that only documents with an `email` metadata field containing an `amazon.com` email address proceeed through the extraction pipeline. All other source documents will be discarded. + +```python +from graphrag_toolkit.lexical_graph import LexicalGraphIndex, ExtractionConfig +from llama_index.core.vector_stores.types import FilterOperator, MetadataFilter + +graph_index = LexicalGraphIndex( + graph_store, + vector_store, + indexing_config=ExtractionConfig( + extraction_filters=MetadataFilter( + key='email', + value='amazon.com', + operator=FilterOperator.TEXT_MATCH + ) + ) +) +``` + +Use extraction stage metadata filtering if you only want to extract a lexical graph from a subset of documents, but can't control which documents are submitted to the ingestion process. + +#### Using metadata filtering in the build stage + +You can filter the documents that are used to build a lexical graph by supplying a `BuildFilters` object whose `source_filters` property contains filter criteria to a `BuildConfig` object. `source_filters` accepts either a `MetadataFilters` object, a single `MetadataFilter` or a list of `MetadataFilter` objects. + +The following example shows how to filter extracted documents so that only documents whose `url` metadata field contains `https://docs.aws.amazon.com/neptune/` will proceed through the build pipeline. All other extracted documents will be ignored. The resulting lexical graph is assigned to the `neptune` tenant. + +```python +from graphrag_toolkit.lexical_graph import LexicalGraphIndex, BuildConfig +from graphrag_toolkit.lexical_graph.indexing.build import BuildFilters +from llama_index.core.vector_stores.types import FilterOperator, MetadataFilter + +graph_index = LexicalGraphIndex( + graph_store, + vector_store, + indexing_config=BuildConfig( + build_filters=BuildFilters( + source_filters=MetadataFilter( + key='url', + value='https://docs.aws.amazon.com/neptune/', + operator=FilterOperator.TEXT_MATCH + ) + ) + ), + tenant_id='neptune' +) +``` + +Build-stage metadata filtering works well in an extract-once, build-many-times workload. You can extract the entire corpus to an `S3BasedDocs` sink or `FileBasedDocs` sink (see [Run the extract and build stages separately](/graphrag-toolkit/lexical-graph/indexing/#run-the-extract-and-build-stages-separately)), and then build multiple lexical graphs from the extracted documents. Using different sets of filtering criteria and the [multi-tenancy](/graphrag-toolkit/lexical-graph/multi-tenancy/) feature, you can build multiple, discrete lexical graphs with different contents from the same underlying sources. + +### Metadata and document identity + +The metadata associated with a source document comprises part of that document's identity. A source document's id is a function of the contents of the document and the metadata. Chunk, topic and statement ids are in turn a function of the source id. If you change a source document's metadata (adding or removing fields, or changing field values), and reprocess the document, it will be indexed into new source, chunk, topic and statement nodes in the lexical graph. + +### Metadata filtering and multi-tenancy + +Metadata filtering constrains retrieval to one or more subgraphs within a particular lexical graph. [Multi tenancy](/graphrag-toolkit/lexical-graph/multi-tenancy/) creates wholly separate lexical graphs within the same underlying graph and vector stores. Metadata filtering and multi-tenancy work well together. As [described above](#using-metadata-filtering-in-the-build-stage), you can use metadata filtering to build different tenant graphs from the same extracted corpus. You can also use metadata filtering and multi tenancy when querying. The following example applies metadata filtering to a query in the context of the `neptune` tenant's lexical graph: + +```python +from graphrag_toolkit.lexical_graph import LexicalGraphQueryEngine +from graphrag_toolkit.lexical_graph.metadata import FilterConfig +from llama_index.core.vector_stores.types import FilterOperator, MetadataFilter + +query_engine = LexicalGraphQueryEngine.for_traversal_based_search( + graph_store, + vector_store, + filter_config = FilterConfig( + MetadataFilter( + key='url', + value='https://docs.aws.amazon.com/neptune/latest/userguide/intro.html', + operator=FilterOperator.EQ + ) + ), + tenant_id='neptune' +) +``` diff --git a/docs-site/src/content/docs/lexical-graph/multi-tenancy.mdx b/docs-site/src/content/docs/lexical-graph/multi-tenancy.mdx new file mode 100644 index 00000000..7e25fdd5 --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/multi-tenancy.mdx @@ -0,0 +1,63 @@ +--- +title: Multi-Tenancy +--- + +### Topics + +- [Overview](#overview) +- [Tenant Id](#tenant-id) +- [Indexing and multi-tenancy](#indexing-and-multi-tenancy) +- [Querying and multi-tenancy](#querying-and-multi-tenancy) +- [Implementation details](#implementation-details) + +### Overview + +Multi-tenancy allows you to host multiple separate lexical graphs in the same underlying graph and vector stores. + +### Tenant Id + +To use the multi-tenancy feature, supply a tenant id when creating a `LexicalGraphIndex` or `LexicalGraphQueryEngine`. A tenant id is a string of 1–25 lowercase letters, numbers, and periods (periods cannot appear at the start or end). If you don't supply a tenant id, the index and query engine use the _default tenant_ (a tenant id value of `None`). + +See [`tenant_id.py`](../../lexical-graph/src/graphrag_toolkit/lexical_graph/tenant_id.py) for the validation logic. + +### Indexing and multi-tenancy + +The following example creates a `LexicalGraphIndex` for tenant `user123`: + +```python +from graphrag_toolkit.lexical_graph import LexicalGraphIndex + +graph_store = ... +vector_store = ... + +graph_index = LexicalGraphIndex( + graph_store, + vector_store, + tenant_id='user123' +) +``` + +**Important:** the extract stage always writes under the _default_ tenant, regardless of the tenant id you set. This is intentional — it lets you extract once and build for multiple tenants from the same extracted output. Only the build stage applies the tenant id. A warning is logged when a non-default tenant id is set ([`lexical_graph_index.py:445`](../../lexical-graph/src/graphrag_toolkit/lexical_graph/lexical_graph_index.py#L445)). + +### Querying and multi-tenancy + +The following example creates a `LexicalGraphQueryEngine` for tenant `user123`: + +```python +from graphrag_toolkit.lexical_graph import LexicalGraphQueryEngine + +graph_store = ... +vector_store = ... + +query_engine = LexicalGraphQueryEngine.for_traversal_based_search( + graph_store, + vector_store, + tenant_id='user123' +) +``` + +If a lexical graph does not exist for the specified tenant id, the retrievers return an empty result set. + +### Implementation details + +Multi-tenancy works by using tenant-specific node labels and index names. For example, chunk nodes for tenant `user123` are labelled `__Chunk__user123__`, and the chunk vector index is named `chunk_user123`. diff --git a/docs-site/src/content/docs/lexical-graph/nova-2-model-support.mdx b/docs-site/src/content/docs/lexical-graph/nova-2-model-support.mdx new file mode 100644 index 00000000..95ca8d8c --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/nova-2-model-support.mdx @@ -0,0 +1,401 @@ +--- +title: Nova 2 Model Support +--- + +## Overview + +This document explains how the lexical-graph toolkit supports Amazon Nova 2 series models in AWS Bedrock, including the architecture, implementation details, and usage patterns. + +## Background + +### The Problem + +Amazon Nova 2 series models (Lite, Micro, Pro, Premier, Pro Preview) were released after LlamaIndex's `BedrockConverse` class was implemented. LlamaIndex maintains a hardcoded list of supported models in `llama_index/llms/bedrock_converse/utils.py`, and Nova 2 models are not included in this list. This causes model validation to fail when attempting to use Nova 2 models. + +Additionally, Nova 2 models require using inference profile format (e.g., `us.amazon.nova-2-lite-v1:0`) instead of direct model IDs for on-demand throughput, which adds another layer of complexity. + +### The Solution + +Rather than waiting for LlamaIndex to update their model list or monkey-patching their validation logic, we implemented a custom `DirectBedrockLLM` class that: + +1. Uses boto3's `bedrock-runtime` client directly, bypassing LlamaIndex's model validation +2. Implements LlamaIndex's `LLM` interface for compatibility with existing code +3. Properly handles credential management through `GraphRAGConfig.session` +4. Supports pickling for multiprocessing workflows + +## Architecture + +### Component Overview + +``` +GraphRAGConfig + ├── _to_llm() method + │ ├── Checks if model is in NOVA_2_MODELS list + │ ├── If yes → DirectBedrockLLM + │ └── If no → BedrockConverse (LlamaIndex) + │ + └── session property + └── Provides boto3 session for AWS authentication + ├── IRSA in EKS (IAM Roles for Service Accounts) + └── SSO locally (AWS profiles) + +DirectBedrockLLM + ├── Implements LLM interface + ├── Uses boto3 bedrock-runtime client + ├── Gets credentials from GraphRAGConfig.session + └── Supports pickling via __getstate__/__setstate__ +``` + +### Decision Logic + +The `_to_llm()` method in `GraphRAGConfig` determines which LLM implementation to use: + +**DirectBedrockLLM is used when:** +- Model ID is in the `NOVA_2_MODELS` list +- Includes both model ID format (`amazon.nova-2-*`) and inference profile format (`us.amazon.nova-2-*`) + +**BedrockConverse (LlamaIndex) is used for:** +- All other Bedrock models (Claude, Titan, Cohere, etc.) +- Any model NOT in the `NOVA_2_MODELS` list + +## Implementation Details + +### Supported Nova 2 Models + +The following Nova 2 models are supported (defined in `config.py`): + +```python +NOVA_2_MODELS = [ + # Model IDs + 'amazon.nova-2-lite-v1:0', + 'amazon.nova-2-micro-v1:0', + 'amazon.nova-2-pro-v1:0', + 'amazon.nova-2-premier-v1:0', + 'amazon.nova-2-pro-preview-20251202-v1:0', + # Inference profile formats (required for on-demand throughput) + 'us.amazon.nova-2-lite-v1:0', + 'us.amazon.nova-2-micro-v1:0', + 'us.amazon.nova-2-pro-v1:0', + 'us.amazon.nova-2-premier-v1:0', + 'us.amazon.nova-2-pro-preview-20251202-v1:0', +] +``` + +### DirectBedrockLLM Class + +Located in `lexical-graph/src/graphrag_toolkit/lexical_graph/bedrock_llm.py`: + +**Key Features:** + +1. **LlamaIndex Compatibility**: Implements the `LLM` interface from LlamaIndex +2. **Credential Management**: Gets boto3 session from `GraphRAGConfig.session` +3. **Pickling Support**: Excludes client from pickle, recreates on unpickle +4. **Lazy Client Creation**: Client property creates client on-demand from session + +**Pickling Implementation:** + +```python +def __getstate__(self): + """Exclude client from pickle - will be recreated from GraphRAGConfig.session""" + state = self.__dict__.copy() + state['_client'] = None + return state + +def __setstate__(self, state): + """Restore state and recreate client from GraphRAGConfig.session""" + self.__dict__.update(state) + self._client = None # Will be lazily created via property + +@property +def client(self): + """Lazy client creation from GraphRAGConfig.session""" + if self._client is None: + from graphrag_toolkit.lexical_graph.config import GraphRAGConfig + self._client = GraphRAGConfig.session.client('bedrock-runtime') + return self._client +``` + +This approach ensures: +- Client is not pickled (which would fail) +- Client is recreated with proper credentials after unpickling +- Works seamlessly in multiprocessing environments + +### Configuration Integration + +The `_to_llm()` method in `GraphRAGConfig` handles model selection: + +```python +def _to_llm(self, llm: LLMType): + if isinstance(llm, LLM): + return llm + + # ... session setup ... + + if _is_json_string(llm): + config = json.loads(llm) + model_id = config['model'] + + # Check if this is a Nova 2 model + if model_id in NOVA_2_MODELS: + from graphrag_toolkit.lexical_graph.bedrock_llm import DirectBedrockLLM + logger.info(f"Using DirectBedrockLLM for Nova 2 model: {model_id}") + return DirectBedrockLLM( + model=model_id, + temperature=config.get('temperature', 0.0), + max_tokens=config.get('max_tokens', 4096) + ) + + # Use BedrockConverse for other models + return BedrockConverse(...) + + else: + # Check if this is a Nova 2 model + if llm in NOVA_2_MODELS: + from graphrag_toolkit.lexical_graph.bedrock_llm import DirectBedrockLLM + logger.info(f"Using DirectBedrockLLM for Nova 2 model: {llm}") + return DirectBedrockLLM( + model=llm, + temperature=0.0, + max_tokens=4096 + ) + + # Use BedrockConverse for other models + return BedrockConverse(...) +``` + +## Usage + +### Explicit Import and Instantiation + +To use Nova 2 multimodal embeddings, you must explicitly import and instantiate the class: + +```python +from graphrag_toolkit.lexical_graph import GraphRAGConfig +from graphrag_toolkit.lexical_graph.utils.bedrock_utils import Nova2MultimodalEmbedding + +GraphRAGConfig.embed_model = Nova2MultimodalEmbedding('amazon.nova-2-multimodal-embeddings-v1:0') +GraphRAGConfig.embed_dimensions = 3072 +``` + +### Advanced Configuration + +```python +from graphrag_toolkit.lexical_graph import GraphRAGConfig +from graphrag_toolkit.lexical_graph.utils.bedrock_utils import Nova2MultimodalEmbedding + +embedding = Nova2MultimodalEmbedding( + model_name='amazon.nova-2-multimodal-embeddings-v1:0', + embed_dimensions=3072, + embed_purpose='TEXT_RETRIEVAL', + truncation_mode='END' +) + +GraphRAGConfig.embed_model = embedding +GraphRAGConfig.embed_dimensions = 3072 +``` + +## IAM Permissions + +### Cross-Region Bedrock Access + +Nova 2 models use inference profiles which require specific IAM permissions: + +```python +# In infrastructure/platform/stacks/argo_workflow_access_stack.py +iam.PolicyStatement( + effect=iam.Effect.ALLOW, + actions=[ + "bedrock:InvokeModel", + "bedrock:InvokeModelWithResponseStream" + ], + resources=[ + # Inference profiles (without account ID) + "arn:aws:bedrock:*::inference-profile/*", + # Inference profiles (with account ID) + f"arn:aws:bedrock:*:{account}:inference-profile/*", + # Specific inference profile + f"arn:aws:bedrock:us-east-1::inference-profile/us.amazon.nova-2-lite-v1:0", + # Foundation models + f"arn:aws:bedrock:*::foundation-model/*", + ] +) +``` + +### Why Both ARN Patterns? + +AWS Bedrock inference profiles can have ARNs with or without account IDs: +- `arn:aws:bedrock:*::inference-profile/*` - Cross-account inference profiles +- `arn:aws:bedrock:*:{account}:inference-profile/*` - Account-specific inference profiles + +Including both ensures compatibility with all inference profile types. + +## Credential Management + +### Local Development (SSO) + +```bash +# Login to AWS SSO +aws sso login --profile primary + +# Set profile +export AWS_PROFILE=primary +export AWS_REGION=us-east-1 + +# Run extraction +python extract_script.py +``` + +### EKS (IRSA) + +In EKS, the service account is annotated with an IAM role: + +```yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: argo-workflows-server + namespace: argo-workflows + annotations: + eks.amazonaws.com/role-arn: arn:aws:iam::188967239867:role/ArgoWorkflowAccessRole +``` + +The `GraphRAGConfig.session` automatically uses IRSA credentials when running in EKS. + +## Validation + +### Successful Workflow Example + +```bash +# Submit test workflow +argo submit infrastructure/argo-workflows/templates/extract-bee-test-workflow.yaml \ + -n argo-workflows \ + --watch + +# Check logs +argo logs extract-bee-test-6z8nx -n argo-workflows + +# Output shows: +# [GraphRAGConfig] Using DirectBedrockLLM for Nova 2 model: us.amazon.nova-2-lite-v1:0 +# Successfully extracted 22 JSON files +``` + +### Verification Steps + +1. **Check model selection**: Look for log message indicating DirectBedrockLLM usage +2. **Verify output**: Check S3 for extracted JSON files +3. **Validate credentials**: Ensure no authentication errors in logs +4. **Test pickling**: Verify multiprocessing works without serialization errors + +## Comparison: Before vs After + +### Previous Implementation (Problematic) + +**Issues:** +- Client injection hacks in `llm_cache.py` +- Manual boto3 client creation bypassing proper credential management +- Monkey-patching to work around pickling issues +- Didn't respect IRSA/SSO authentication +- Fragile and hard to maintain + +### Current Implementation (Clean) + +**Benefits:** +- Clean separation of concerns +- Each LLM class manages its own client +- Proper credential management through `GraphRAGConfig.session` +- No hacks or workarounds +- Proper pickling support via `__getstate__`/`__setstate__` +- Works seamlessly with IRSA in EKS and SSO locally +- Extensible - easy to add more models or custom LLM implementations +- Maintainable architecture + +## Adding New Models + +To add support for new models that aren't in LlamaIndex's supported list: + +1. **Add to NOVA_2_MODELS list** (or create a new list): + +```python +# In config.py +NEW_MODELS = [ + 'amazon.new-model-v1:0', + 'us.amazon.new-model-v1:0', +] +``` + +2. **Update _to_llm() logic**: + +```python +if model_id in NOVA_2_MODELS or model_id in NEW_MODELS: + return DirectBedrockLLM(...) +``` + +3. **Update IAM permissions** if needed: + +```python +resources=[ + f"arn:aws:bedrock:*::inference-profile/us.amazon.new-model-v1:0", +] +``` + +## Troubleshooting + +### Model Not Found Error + +**Symptom**: `ValueError: Model 'amazon.nova-2-lite-v1:0' is not supported` + +**Solution**: Ensure model is in `NOVA_2_MODELS` list and you're using the inference profile format (`us.amazon.nova-2-lite-v1:0`) + +### Pickling Errors + +**Symptom**: `TypeError: cannot pickle 'botocore.client.BedrockRuntime' object` + +**Solution**: Verify `DirectBedrockLLM` is being used (check logs for "Using DirectBedrockLLM" message) + +### Authentication Errors + +**Symptom**: `UnauthorizedOperation` or `AccessDenied` + +**Solution**: +- Local: Run `aws sso login --profile primary` +- EKS: Verify IAM role has correct permissions and service account annotation + +### Cross-Region Access Denied + +**Symptom**: `AccessDenied` when using inference profiles + +**Solution**: Ensure IAM policy includes both ARN patterns: +- `arn:aws:bedrock:*::inference-profile/*` +- `arn:aws:bedrock:*:{account}:inference-profile/*` + +## Files Modified + +### Core Implementation +- `lexical-graph/src/graphrag_toolkit/lexical_graph/bedrock_llm.py` - NEW: DirectBedrockLLM class +- `lexical-graph/src/graphrag_toolkit/lexical_graph/config.py` - UPDATED: Model selection logic +- `lexical-graph/src/graphrag_toolkit/lexical_graph/__init__.py` - UPDATED: Export DirectBedrockLLM +- `lexical-graph/src/graphrag_toolkit/lexical_graph/utils/llm_cache.py` - FIXED: Removed client injection hack +- `lexical-graph/src/graphrag_toolkit/lexical_graph/utils/bedrock_patch.py` - DELETED: Obsolete monkey-patch approach + +### Infrastructure +- `infrastructure/platform/stacks/argo_workflow_access_stack.py` - UPDATED: IAM permissions +- `infrastructure/argo-workflows/templates/extract-bee-test-workflow.yaml` - UPDATED: Use Nova 2 model +- `infrastructure/post-deployment/scripts/images/refresh-lexical-graph-bee.sh` - Build script + +## References + +- [AWS Bedrock Inference Profiles](https://docs.aws.amazon.com/bedrock/latest/userguide/inference-profiles.html) +- [LlamaIndex Bedrock Integration](https://docs.llamaindex.ai/en/stable/examples/llm/bedrock/) +- [IRSA Documentation](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) + +## Conclusion + +The Nova 2 model support implementation provides a clean, maintainable solution for using Amazon's latest models in the lexical-graph toolkit. By implementing a custom LLM class that bypasses LlamaIndex's model validation while maintaining compatibility with the LlamaIndex interface, we achieve: + +- Full support for Nova 2 series models +- Proper credential management (IRSA/SSO) +- Multiprocessing compatibility +- Clean architecture without hacks +- Easy extensibility for future models + +This approach is significantly better than the previous implementation and provides a solid foundation for supporting new Bedrock models as they are released. diff --git a/docs-site/src/content/docs/lexical-graph/overview.mdx b/docs-site/src/content/docs/lexical-graph/overview.mdx new file mode 100644 index 00000000..665668a7 --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/overview.mdx @@ -0,0 +1,184 @@ +--- +title: Overview +--- + +import { Tabs, TabItem, Aside, Card, CardGrid } from '@astrojs/starlight/components'; + +The graphrag-toolkit [lexical-graph](https://github.com/awslabs/graphrag-toolkit/blob/main/lexical-graph/) library provides a framework for automating the construction of a [hierarchical lexical graph](/graphrag-toolkit/lexical-graph/graph-model/) (a graph representing textual elements at several levels of granularity extracted from source documents) from unstructured data, and composing question-answering strategies that query this graph when answering user questions. + + + +## Install + + + + ```sh + pip install graphrag-lexical-graph + ``` + + + ```sh + uv add graphrag-lexical-graph + ``` + + + ```sh + poetry add graphrag-lexical-graph + ``` + + + ```sh + pip install "https://github.com/awslabs/graphrag-toolkit/archive/refs/tags/graphrag-lexical-graph/v3.18.2.zip#subdirectory=lexical-graph" + ``` + + + +## At a glance + + + + Source → chunk → topic → statement → fact → entity, all linked. Retrieval can hop between any of these levels. + + + Graph: Amazon Neptune (DB and Analytics), Neo4j, FalkorDB. Vectors: Neptune, OpenSearch, Postgres, S3 Vectors. + + + Extract and build run as separate micro-batched pipelines so ingest is continuous and resumable. + + + Traversal-based search combines vector similarity with graph traversal. Semantic-guided search is also available. + + + + - [Store and model providers](#stores-and-model-providers) + - [Indexing and querying](#indexing-and-querying) + - [Indexing](#indexing) + - [Querying](#querying) + - [Multi tenancy](#multi-tenancy) + - [Metadata filtering](#metadata-filtering) + - [Versioned updates](#versioned-updates) + - [Model Context Protocol server](#model-context-protocol-server) + - [Security](#security) + - [Hybrid deployment](#hybrid-deployment) + - [Getting started](#getting-started) + +### Stores and model providers + +The lexical-graph library depends on three backend systems: a [_graph store_](/graphrag-toolkit/lexical-graph/storage-model/#graph-store), a [_vector store_](/graphrag-toolkit/lexical-graph/storage-model/#vector-store), and a _foundation model provider_. The graph store allows an application to store and query a lexical graph that has been extracted from unstructured, text-based sources. The vector store contains one or more indexes with emebddings for some of the elements in the lexical graph. These embeddings are primarily used to find starting points in the graph when the library runs a graph query. The foundation model provider hosts the Large Language Models (LLMs) and embedding models used to extract and embed information. + +The library has built-in graph store support for [Amazon Neptune Analytics](https://docs.aws.amazon.com/neptune-analytics/latest/userguide/what-is-neptune-analytics.html), [Amazon Neptune Database](https://docs.aws.amazon.com/neptune/latest/userguide/intro.html), and [Neo4j](https://neo4j.com/docs/), and built-in vector store support for Neptune Analytics, [Amazon OpenSearch Serverless](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/serverless.html), [Amazon S3 Vectors](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-vectors.html), and Postgres with the pgvector extension. It is configured to use Amazon Bedrock as its foundation model provider. Besides these defaults, the library can be extended to support other third-party backends. + +### Indexing and querying + +The lexical-graph library implements two high-level processes: [_indexing_](/graphrag-toolkit/lexical-graph/indexing/) and [_querying_](/graphrag-toolkit/lexical-graph/querying/). The indexing process ingests and extracts information from unstuctured, text-based source documents and then builds a graph and accompanying vector indexes. The query process retrieves content from the graph and vector indexes, and then supplies this content as context to an LLM to answer a user question. + +#### Indexing + +The indexing process is further split into two pipeline stages: [_extract_](/graphrag-toolkit/lexical-graph/indexing/#extract) and [_build_](/graphrag-toolkit/lexical-graph/indexing/#build). The extract stage ingests data from unstructured sources, chunks the content, and then uses an LLM to extract sets of topics, statements, facts and entities from these chunks. The build stage uses the results of the extract stage to populate a graph and create and index embeddings for some of the content. + +Extraction uses two LLM calls per chunk. The first 'cleans up' the content by extracting sets of well-formed, self-contained propositions from the chunked text. The second call then extracts topics, statements, facts, and entities and their relations from these propositions. Proposition extraction is optional: the second LLM call can be perfomed against the raw content, but the quality of the extraction tends to improve if the proposition extraction is performed first. + +The overall indexing process uses a micro-batching approach to progress data through the extract and build pipelines. This allows the host application to persist extracted information emitted by the extract pipeline, either to the filesystem or to Amazon S3, and/or inspect the contents, and if necessary filter and transform the extracted elements prior to consuming them in the build pipeline. Indexing can be run in a continuous-ingest fashion, or as separate extract and build steps. Both modes allow you to take advantage of Amazon Bedrock's batch inference capabilities to perform [batch extraction](/graphrag-toolkit/lexical-graph/batch-extraction/) over collections of documents. + +The following diagram shows a high-level view of the indexing process: + +![Indexing](https://github.com/awslabs/graphrag-toolkit/blob/main/images/extract-and-build.png) + +#### Querying + +[Querying](/graphrag-toolkit/lexical-graph/querying/) is a two-step process consisting of _retrieval_ and _generation_. Retrieval queries the graph and vector stores to fetch content relevant to answering a user question. Generation then supplies this content as context to an LLM to generate a response. The lexical-graph query engine allows an application to apply the retrieve operation by itself, which simply returns the search results fetched from the graph, or run an end-to-end query, which retrieves search results and then generates a response. + +The lexical-graph uses a [traversal-based search](/graphrag-toolkit/lexical-graph/traversal-based-search/) strategy for retrieving thematically related information distributed across multiple documents. + +The following diagram shows a high-level view of the end-to-end query process: + +![Querying](https://github.com/awslabs/graphrag-toolkit/blob/main/images/question-answering.png) + +Query steps: + + 1. The application submits a user question the lexical graph query engine. + 2. The engine generates an embedding for the user question. + 3. This embedding is used to perform a topK vector similarity search against embedded content in the vector store. + 4. The results of the similarity search are used to anchor one or more graph queries that retrieve relevant content from the graph. + 5. The engine supplies this retrieved content togther with the user question to an LLM, which generates a response. + 6. The query engine returns this response to the application. + +### Multi tenancy + +The lexical-graph library's [multi-tenancy](/graphrag-toolkit/lexical-graph/multi-tenancy/) feature allows an application to host multiple separate lexical graphs in the same underlying graph and vector stores. Tenant graphs may correspond to different domains, collections of documents, or individual users. + +### Metadata filtering + +The lexical-graph supports [metadata filtering](/graphrag-toolkit/lexical-graph/metadata-filtering/). Metadata filtering constrains the set of sources, topics and statements retrieved when querying a graph based on metadata filters and associated values. + +There are two parts to metadata filtering: + + - **Indexing** Add metadata to source documents passed to the indexing process + - **Querying** Supply metadata filters when querying a lexical graph + +Metadata filtering can also be used to [filter documents and chunks during the extract and build stages](/graphrag-toolkit/lexical-graph/metadata-filtering/#using-metadata-to-filter-documents-in-the-extract-and-build-stages) of the indexing process. + +#### Versioned updates + +The lexical graphs supports [versioned updates](/graphrag-toolkit/lexical-graph/versioned-updates/). With versioned updates, if you re-ingest a document whose contents and/or metadata have changed since it was last extracted, any old documents will be archived, and the newly ingested document treated as the current version of the source document. You can then query the current state of the graph and vector stores, or configure the query to retrieve documents that were current at a specific point in time. + +### Model Context Protocol server + +The [Model Context Protocol](https://modelcontextprotocol.io/introduction) (MCP) is an open protocol that standardizes how applications provide context to LLMs. + +The lexical-graph can create a 'catalog' of tools, one per tenant in a multi-tenant graph. Each tool is capable of answering domain-specific questions based on the data in its tenant graph. This catalog is advertised to clients via an MCP server. Clients (typically agents and LLMs) can then browse the catalog and choose appropriate tools for addressing their information goals. + +Each tool in the catalog is accompanied by an auto-generated description that helps a client understand the domain, scope, potential uses and kinds of questions covered by the tool. The catalog also includes a 'search' tool, which, given the name of an entity or concept, recommends one or more domain tools with knowledge of the search term. + +### Security + +Implementers using the lexical-graph library are responsible for securing access to the data sources they wish to index, and for provisioning and securing the underlying AWS resources, such as Neptune and OpenSearch, used by the library. The documentation includes [guidance](/graphrag-toolkit/lexical-graph/security/) on using AWS Identity and Access Management (IAM) policies to control access to Amazon Neptune, Amazon OpenSearch Serverless, and Amazon Bedrock. + +Irrespective of the policies applied to the identity under which the a lexical-graph application runs, the library always Sigv4 signs requests to AWS resources. Connections always use TLS version 1.3. + +### Hybrid deployment + +The overview above assumes that all operations, indexing and querying, take place in a cloud environment. However, the separation between the extract and build stages of the indexing process allows for hybrid deploment options, whereby cost-effective local development is accomplished using containerized graph and vector stores, with high-throughput LLM inference via SageMaker and Bedrock. See the [Hybrid Deployment](/graphrag-toolkit/lexical-graph/hybrid-deployment/) documentation for more detail. + +### Getting started + +You can get up-and-running with a fresh AWS environment using one of the [quickstart AWS CloudFormation templates](https://github.com/awslabs/graphrag-toolkit/blob/main/examples/lexical-graph/cloudformation-templates/) supplied with the repository. Each of the quickstart templates creates an Amazon SageMaker-hosted Jupyter notebook containing several [example notebooks](https://github.com/awslabs/graphrag-toolkit/blob/main/examples/lexical-graph/notebooks/) that show you how to use the library to index and query content. + +The resources deployed by the CloudFormation templates incur costs in your account. Remember to delete the stack when you've finished with it so that you don't incur any unnecessary charges. + +Choose from the following templates: + + - [`graphrag-toolkit-neptune-analytics.json`](https://github.com/awslabs/graphrag-toolkit/blob/main/examples/lexical-graph/cloudformation-templates/graphrag-toolkit-neptune-analytics.json) creates the following lexical-graph environment: + - Amazon Neptune Analytics graph + - Amazon SageMaker notebook + - [`graphrag-toolkit-neptune-analytics-opensearch-serverless.json`](https://github.com/awslabs/graphrag-toolkit/blob/main/examples/lexical-graph/cloudformation-templates/graphrag-toolkit-neptune-analytics-opensearch-serverless.json) creates the following lexical-graph environment: + - Amazon Amazon Neptune Analytics graph + - Amazon OpenSearch Serverless collection with a public endpoint + - Amazon SageMaker notebook + - [`graphrag-toolkit-neptune-analytics-aurora-postgres.json`](https://github.com/awslabs/graphrag-toolkit/blob/main/examples/lexical-graph/cloudformation-templates/graphrag-toolkit-neptune-analytics-aurora-postgres.json) creates the following lexical-graph environment: + - Amazon VPC with three private subnets, one public subnet, and an internet gateway + - Amazon Neptune Analytics graph + - Amazon Aurora Postgres Database cluster with a single serverless instance + - Amazon SageMaker notebook + - [`graphrag-toolkit-neptune-analytics-s3-vectors.json`](https://github.com/awslabs/graphrag-toolkit/blob/main/examples/lexical-graph/cloudformation-templates/graphrag-toolkit-neptune-analytics-s3-vectors.json) creates the following lexical-graph environment: + - Amazon Neptune Analytics graph + - Amazon SageMaker notebook + - Amazon S3 Vectors bucket + - [`graphrag-toolkit-neptune-db-opensearch-serverless.json`](https://github.com/awslabs/graphrag-toolkit/blob/main/examples/lexical-graph/cloudformation-templates/graphrag-toolkit-neptune-db-opensearch-serverless.json) creates the following lexical-graph environment: + - Amazon VPC with three private subnets, one public subnet, and an internet gateway + - Amazon Neptune Database cluster with a single Neptune serverless instance + - Amazon OpenSearch Serverless collection with a public endpoint + - Amazon SageMaker notebook + - [`graphrag-toolkit-neptune-db-aurora-postgres.json`](https://github.com/awslabs/graphrag-toolkit/blob/main/examples/lexical-graph/cloudformation-templates/graphrag-toolkit-neptune-db-aurora-postgres.json) creates the following lexical-graph environment: + - Amazon VPC with three private subnets, one public subnet, and an internet gateway + - Amazon Neptune Database cluster with a single Neptune serverless instance + - Amazon Aurora Postgres Database cluster with a single serverless instance + - Amazon SageMaker notebook + - [`graphrag-toolkit-neptune-db-s3-vectors.json`](https://github.com/awslabs/graphrag-toolkit/blob/main/examples/lexical-graph/cloudformation-templates/graphrag-toolkit-neptune-db-s3-vectors.json) creates the following lexical-graph environment: + - Amazon VPC with three private subnets, one public subnet, and an internet gateway + - Amazon Neptune Database cluster with a single Neptune serverless instance + - Amazon SageMaker notebook + - Amazon S3 Vectors bucket + diff --git a/docs-site/src/content/docs/lexical-graph/prompts.mdx b/docs-site/src/content/docs/lexical-graph/prompts.mdx new file mode 100644 index 00000000..4821b221 --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/prompts.mdx @@ -0,0 +1,192 @@ +--- +title: Custom Prompts +--- + +The GraphRAG Toolkit supports pluggable prompt providers to allow dynamic loading of prompt templates from various sources. All providers support AWS template integration for structured outputs and seamlessly handle document-graph query results through the `{query}` variable. + +### AWS Template Support + +All prompt providers support automatic AWS template loading and substitution: +- Use `{aws_template_structure}` placeholder in user prompts +- Templates are automatically loaded from S3 or local files (any format: txt, json, md, etc.) +- Enables structured outputs for compliance and automation + +### Document-Graph Integration + +The system seamlessly integrates with document-graph queries: +- Document-graph results flow through the `{query}` variable as text +- No special handling required - system is input-agnostic +- Supports complex knowledge graph traversal → RAG → LLM workflows + +### System vs User Prompts + +The GraphRAG Toolkit uses a two-prompt architecture following LlamaIndex ChatPromptTemplate: + +**System Prompt:** +- **Role**: Defines the AI's identity, expertise, and behavior +- **Content**: Instructions on how to act (e.g., "You are an AWS security expert") +- **Purpose**: Sets context, tone, and domain knowledge +- **Variables**: No dynamic variables - static instructions + +**User Prompt:** +- **Role**: Contains the actual task and dynamic content +- **Content**: Task instructions with variable placeholders +- **Purpose**: Processes input data and defines output format +- **Variables**: `{query}`, `{search_results}`, `{additionalContext}`, `{aws_template_structure}` + +**Example Structure:** +``` +System: "You are an AWS security expert specializing in compliance reporting." +User: "Generate evidence report for: {query} using context: {search_results}" +``` + +--- + +## Built-in Providers + +There are four built-in providers: + +### 1. StaticPromptProvider + +Use this when your system and user prompts are defined as constants in your codebase. + +```python +from graphrag_toolkit.lexical_graph.prompts.static_prompt_provider import StaticPromptProvider + +prompt_provider = StaticPromptProvider() +``` + +This provider uses the predefined constants `ANSWER_QUESTION_SYSTEM_PROMPT` and `ANSWER_QUESTION_USER_PROMPT`. AWS template placeholders are automatically removed if no template is available. + +--- + +### 2. FilePromptProvider + +Use this when your prompts are stored locally on disk. + +```python +from graphrag_toolkit.lexical_graph.prompts.file_prompt_provider import FilePromptProvider +from graphrag_toolkit.lexical_graph.prompts.prompt_provider_config import FilePromptProviderConfig + +prompt_provider = FilePromptProvider( + FilePromptProviderConfig(base_path="./prompts"), + system_prompt_file="system.txt", + user_prompt_file="user.txt", + aws_template_file="aws_template.json" # optional AWS template (any format) +) +``` + +The prompt files are read from a directory (`base_path`), and you can override the file names if needed. AWS templates are automatically loaded and substituted into `{aws_template_structure}` placeholders. + +--- + +### 3. S3PromptProvider + +Use this when your prompts are stored in an Amazon S3 bucket. + +```python +from graphrag_toolkit.lexical_graph.prompts.s3_prompt_provider import S3PromptProvider +from graphrag_toolkit.lexical_graph.prompts.prompt_provider_config import S3PromptProviderConfig + +prompt_provider = S3PromptProvider( + S3PromptProviderConfig( + bucket="ccms-prompts", + prefix="prompts", + aws_region="us-east-1", # optional if set via env + aws_profile="my-profile", # optional if using default profile + system_prompt_file="my_system.txt", # optional override + user_prompt_file="my_user.txt", # optional override + aws_template_file="aws_template.json" # optional AWS template (any format) + ) +) +``` + +Prompts are loaded using `boto3` and AWS credentials. AWS templates are automatically loaded from S3 and substituted into `{aws_template_structure}` placeholders. Ensure your environment or `~/.aws/config` is configured for SSO, roles, or keys. + +--- + +### 4. BedrockPromptProvider + +Use this when your prompts are stored and versioned using Amazon Bedrock prompt ARNs. + +```python +from graphrag_toolkit.lexical_graph.prompts.bedrock_prompt_provider import BedrockPromptProvider +from graphrag_toolkit.lexical_graph.prompts.prompt_provider_config import BedrockPromptProviderConfig + +prompt_provider = BedrockPromptProvider( + config=BedrockPromptProviderConfig( + system_prompt_arn="arn:aws:bedrock:us-east-1:123456789012:prompt/my-system", + user_prompt_arn="arn:aws:bedrock:us-east-1:123456789012:prompt/my-user", + system_prompt_version="DRAFT", + user_prompt_version="DRAFT", + aws_template_s3_bucket="my-templates", # optional S3 bucket for templates + aws_template_s3_key="templates/aws.json" # optional S3 key for templates (any format) + ) +) +``` + +This provider resolves prompt ARNs dynamically using STS and can fall back to S3 for AWS template loading. Templates are substituted into `{aws_template_structure}` placeholders. + +## Suggested Future Enhancements + +Here are several points that could be added to improve the [`prompt_provider_config.py`](./src/graphrag_toolkit/lexical_graph/prompts/prompt_provider_config.py) + +### 1. Unified PromptProviderRegistry or Factory + +- Introduce a registry that maps provider types to config classes, e.g.: + + ```python + registry = { + "static": StaticPromptProviderConfig, + "file": FilePromptProviderConfig, + "s3": S3PromptProviderConfig, + "bedrock": BedrockPromptProviderConfig + } + ``` + +- Enable initialization from a config dict: `registry[type](**params).build()` + +### 2. Config Serialization + +- Add `.to_dict()` and `.from_dict()` methods to each config class for CLI/JSON compatibility. +- Useful for web UIs or YAML-driven orchestration. + +### 3. Validation & Type Enforcement + +- Use Pydantic or `__post_init__()` methods to validate inputs (e.g., ARN format, S3 bucket name). +- Example: validate AWS region format or prompt ARN prefix. + +### 4. Logging Enhancements + +- Add verbose logging on each provider (e.g., which prompt path or ARN was loaded). +- Include diagnostics for STS calls and client creation failures. + +### 5. Caching Layer + +- Cache resolved prompt text in memory or on disk (especially for S3 and Bedrock). +- Avoid unnecessary repeated fetches in batch queries. + +### 6. Runtime Provider Switching + +- Allow query-time override of prompt provider (e.g., via `query_engine.query(..., prompt_provider=...)`). +- Enables experimentation with different prompt strategies. + +### 7. Prompt Fallback Strategy + +- Support fallback to defaults or static provider if S3/Bedrock fails. +- Enables robust operation in partially degraded environments. + +### 8. Custom Prompt Variables + +- Support variable interpolation in prompt templates (e.g., using `{tenant_id}` or `{user_role}`). +- Useful for multi-tenant or role-specific prompting. + +### 9. Multi-Language Prompt Support + +- Load prompt variants based on locale/language code. +- Supports internationalization of RAG applications. + +### 10. Bedrock Caching with Prompt Versioning + +- Cache based on `(ARN, version)` tuple. +- Useful when managing multiple versions in experiments or A/B testing. diff --git a/docs-site/src/content/docs/lexical-graph/querying.mdx b/docs-site/src/content/docs/lexical-graph/querying.mdx new file mode 100644 index 00000000..68a70444 --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/querying.mdx @@ -0,0 +1,133 @@ +--- +title: Querying +--- + +import { Tabs, TabItem, Aside } from '@astrojs/starlight/components'; + +The primary unit of context presented to the LLM is the *statement* — a standalone assertion or proposition extracted from a source chunk. Statements are grouped by topic and source, and that grouping is what the query engine presents to the LLM. + +The lexical-graph uses a [traversal-based search](/graphrag-toolkit/lexical-graph/traversal-based-search/) strategy that combines similarity search with graph traversal. A [semantic-guided search](/graphrag-toolkit/lexical-graph/semantic-guided-search/) approach also exists but is likely to be retired in a future release. + +Querying supports [metadata filtering](/graphrag-toolkit/lexical-graph/metadata-filtering/) and [multi-tenancy](/graphrag-toolkit/lexical-graph/multi-tenancy/). + +### Factory methods + + + + ```python title="query_traversal.py" {1,5} + from graphrag_toolkit.lexical_graph import LexicalGraphQueryEngine + + query_engine = LexicalGraphQueryEngine.for_traversal_based_search( + graph_store, + vector_store, + versioning=False, + ) + + response = query_engine.query("How does Neptune Analytics differ from Neptune DB?") + print(response) + ``` + + + ```python title="query_semantic.py" {1,5} "enable_versioning" + from graphrag_toolkit.lexical_graph import LexicalGraphQueryEngine + + query_engine = LexicalGraphQueryEngine.for_semantic_guided_search( + graph_store, + vector_store, + enable_versioning=False, + ) + + response = query_engine.query("How does Neptune Analytics differ from Neptune DB?") + print(response) + ``` + + + +Use `for_traversal_based_search()` for most workloads. Use `for_semantic_guided_search()` only if you specifically need the semantic-guided strategy. + +Both factory methods accept `graph_store`, `vector_store`, `tenant_id`, `post_processors`, `filter_config`, and `**kwargs`. The versioning parameter name differs between the two ([`lexical_graph_query_engine.py:67`](https://github.com/awslabs/graphrag-toolkit/blob/main/lexical-graph/src/graphrag_toolkit/lexical_graph/lexical_graph_query_engine.py#L67)): + +| Factory method | versioning parameter | +| --- | --- | +| `for_traversal_based_search` | `versioning` | +| `for_semantic_guided_search` | `enable_versioning` | + +You can also construct `LexicalGraphQueryEngine` directly, passing `system_prompt`, `user_prompt`, or a `prompt_provider` kwarg. See [Using Custom Prompt Providers](/graphrag-toolkit/lexical-graph/prompts/). + +### Context format + +The `context_format` kwarg controls how retrieved statements are serialised before being injected into the LLM prompt. Supported values ([`lexical_graph_query_engine.py:408`](https://github.com/awslabs/graphrag-toolkit/blob/main/lexical-graph/src/graphrag_toolkit/lexical_graph/lexical_graph_query_engine.py#L408)): + +| Value | Description | Default for | +| --- | --- | --- | +| `'json'` | JSON array of topic/statement objects | `__init__` direct construction | +| `'yaml'` | YAML representation of the same structure | — | +| `'xml'` | XML representation of the same structure | — | +| `'text'` | Plain text, one topic heading per group | `for_traversal_based_search` | +| `'bedrock_xml'` | Pre-formatted XML produced by a `BedrockContextFormat` post-processor | `for_semantic_guided_search` (hardcoded) | + +`for_semantic_guided_search` always uses `'bedrock_xml'` and ignores any `context_format` kwarg you pass. `for_traversal_based_search` defaults to `'text'` but accepts any of the values above. + +### Verbose mode + +The `verbose` kwarg (default `True`) controls answer length. When `True`, the LLM is instructed to answer fully; when `False`, concisely. This only affects the non-streaming code path ([`lexical_graph_query_engine.py:356`](https://github.com/awslabs/graphrag-toolkit/blob/main/lexical-graph/src/graphrag_toolkit/lexical_graph/lexical_graph_query_engine.py#L356)). + +```python +query_engine = LexicalGraphQueryEngine.for_traversal_based_search( + graph_store, + vector_store, + verbose=False +) +``` + +### Async querying + + + +It does not implement async querying — calling `await query_engine.aquery(...)` will raise a `NotImplementedError`. Use `query_engine.query(...)` instead ([`lexical_graph_query_engine.py:563`](https://github.com/awslabs/graphrag-toolkit/blob/main/lexical-graph/src/graphrag_toolkit/lexical_graph/lexical_graph_query_engine.py#L563)). + +### Managing indexed sources + +`LexicalGraphIndex` exposes three methods for inspecting and managing what has been indexed ([`lexical_graph_index.py:596`](https://github.com/awslabs/graphrag-toolkit/blob/main/lexical-graph/src/graphrag_toolkit/lexical_graph/lexical_graph_index.py#L596)): + +#### `get_stats()` + +Returns a dict with node counts and two graph connectivity metrics: + +```python +stats = graph_index.get_stats() +# { +# 'source': 12, 'chunk': 180, 'topic': 950, +# 'statement': 4200, 'fact': 3100, 'entity': 820, +# 'localConnectivity': 1.23456, +# 'globalConnectivity': 0.98765, +# ... +# } +``` + +#### `get_sources(...)` + +Queries the graph for source document metadata. Accepts a `source_id` (str), `source_ids` (list), `filter` (`FilterConfig`, dict, or list of dicts), an optional `versioning_config`, and an optional `order_by` field name or list. + +```python +sources = graph_index.get_sources(filter={'url': 'https://example.com/page'}) +``` + +#### `delete_sources(...)` + +Same filter API as `get_sources`. Removes matching sources from both the graph store and the vector store and returns the list of deleted source IDs. + +```python +deleted = graph_index.delete_sources(source_id='chunk::abc123') +``` + +--- + +See also: + +- [Traversal-Based Search](/graphrag-toolkit/lexical-graph/traversal-based-search/) +- [Configuring and Tuning Traversal-Based Search](/graphrag-toolkit/lexical-graph/traversal-based-search-configuration/) +- [Metadata Filtering](/graphrag-toolkit/lexical-graph/metadata-filtering/) +- [Multi-Tenancy](/graphrag-toolkit/lexical-graph/multi-tenancy/) diff --git a/docs-site/src/content/docs/lexical-graph/readers.mdx b/docs-site/src/content/docs/lexical-graph/readers.mdx new file mode 100644 index 00000000..f4e422d0 --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/readers.mdx @@ -0,0 +1,487 @@ +--- +title: Reader Providers +--- + +## Overview +The GraphRAG Toolkit provides a unified, extensible system for reading documents from a wide variety of sources. Reader providers abstract the details of document ingestion, allowing you to work with files, databases, APIs, cloud storage, and more using a consistent interface. + +## Architecture + +### Core Abstractions +- **ReaderProvider**: The abstract base class for all document readers. Every concrete reader implements the `read(input_source)` method, returning a list of `Document` objects. +- **BaseReaderProvider**: Implements both the GraphRAG `ReaderProvider` and LlamaIndex `BaseReader` interfaces, providing compatibility and a standard pattern for new readers. +- **LlamaIndexReaderProviderBase**: A simple wrapper for LlamaIndex readers, making it easy to adapt existing LlamaIndex readers to the GraphRAG system. +- **ValidatedReaderProviderBase**: Extends `LlamaIndexReaderProviderBase` with input, output, and configuration validation. + +### Configuration Classes +Each reader provider is paired with a configuration class (e.g., `PDFReaderConfig`, `WebReaderConfig`). These classes define the parameters required for each data source and use Python dataclasses for validation. + +## How to Use + +1. **Choose a provider and config** for your data source +2. **Instantiate the config** with the required parameters +3. **Create the provider** with the config +4. **Call `.read(input_source)`** to extract documents + +```python +from graphrag_toolkit.lexical_graph.indexing.load.readers import PDFReaderProvider, PDFReaderConfig + +config = PDFReaderConfig( + return_full_document=False, + metadata_fn=lambda path: {'source': 'pdf', 'file_path': path} +) +reader = PDFReaderProvider(config) +documents = reader.read("/path/to/file.pdf") +``` + +## Using Metadata with Readers + +Many reader providers support attaching custom metadata to each document via the `metadata_fn` parameter in the configuration class. The function should accept an input and return a dictionary of metadata. + +```python +def custom_metadata(path): + return { + "source": path, + "document_type": "technical_doc", + "project": "GraphRAG" + } + +config = PDFReaderConfig( + return_full_document=False, + metadata_fn=custom_metadata +) +``` + +## Built-in Providers + +### Document Readers +| Provider | Config | Description | Dependencies | +|----------|--------|-------------|--------------| +| `PDFReaderProvider` | `PDFReaderConfig` | PDF documents | `pymupdf`, `llama-index-readers-file` | +| `DocxReaderProvider` | `DocxReaderConfig` | Word documents | `python-docx` | +| `PPTXReaderProvider` | `PPTXReaderConfig` | PowerPoint files | `python-pptx` | +| `MarkdownReaderProvider` | `MarkdownReaderConfig` | Markdown files | Built-in | +| `CSVReaderProvider` | `CSVReaderConfig` | CSV files | Built-in | +| `JSONReaderProvider` | `JSONReaderConfig` | JSON/JSONL files | Built-in | +| `StreamingJSONLReaderProvider` | `StreamingJSONLReaderConfig` | Memory-efficient JSONL processing | Built-in | +| `StructuredDataReaderProvider` | `StructuredDataReaderConfig` | CSV/Excel files with streaming | `pandas`, `openpyxl`, `llama-index-readers-structured-data` | + +### Web and Knowledge Base Readers +| Provider | Config | Description | Dependencies | +|----------|--------|-------------|--------------| +| `WebReaderProvider` | `WebReaderConfig` | Web pages | `requests`, `beautifulsoup4` | +| `WikipediaReaderProvider` | `WikipediaReaderConfig` | Wikipedia articles | `wikipedia` | +| `YouTubeReaderProvider` | `YouTubeReaderConfig` | YouTube transcripts | `youtube-transcript-api` | + +### Cloud Storage Readers +| Provider | Config | Description | Dependencies | +|----------|--------|-------------|--------------| +| `S3DirectoryReaderProvider` | `S3DirectoryReaderConfig` | AWS S3 buckets | `boto3` | +| `DirectoryReaderProvider` | `DirectoryReaderConfig` | Local directories | Built-in | + +### Database Readers +| Provider | Config | Description | Dependencies | +|----------|--------|-------------|--------------| +| `DatabaseReaderProvider` | `DatabaseReaderConfig` | SQL databases | Database-specific drivers | + + +### Code and Repository Readers +| Provider | Config | Description | Dependencies | +|----------|--------|-------------|--------------| +| `GitHubReaderProvider` | `GitHubReaderConfig` | GitHub repositories | `PyGithub` | + + +### Specialized Readers +| Provider | Config | Description | Dependencies | +|----------|--------|-------------|--------------| +| `DocumentGraphReaderProvider` | `DocumentGraphReaderConfig` | Document graphs | Built-in | + + +## S3 Support + +The GraphRAG Toolkit provides two approaches for S3 integration: + +### 1. S3DirectoryReaderProvider (Recommended) +Modern S3 reader using LlamaIndex's S3Reader for direct S3 access: + +```python +from graphrag_toolkit.lexical_graph.indexing.load.readers import S3DirectoryReaderProvider, S3DirectoryReaderConfig + +# For a single file +config = S3DirectoryReaderConfig( + bucket="my-bucket", + key="documents/file.pdf", # Use 'key' for single file + metadata_fn=lambda path: {'source': 's3'} +) + +# For a directory/prefix +config = S3DirectoryReaderConfig( + bucket="my-bucket", + prefix="documents/", # Use 'prefix' for directory + metadata_fn=lambda path: {'source': 's3'} +) + +# Note: Use either 'key' OR 'prefix', not both +reader = S3DirectoryReaderProvider(config) +docs = reader.read() +``` + +### 2. Legacy S3BasedDocs +Legacy system for S3 document storage and retrieval (still supported): + +```python +from graphrag_toolkit.lexical_graph.indexing.load import S3BasedDocs + +s3_docs = S3BasedDocs( + region="us-east-1", + bucket_name="my-bucket", + key_prefix="documents/", + collection_id="my-collection" +) + +# Iterate through stored documents +for doc in s3_docs: + # Process document + pass +``` + +### S3 Authentication +S3 access uses `GraphRAGConfig.session` for AWS credentials. Configure via: +- AWS credentials file (`~/.aws/credentials`) +- Environment variables (`AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`) +- IAM roles (when running on AWS) +- AWS SSO profiles + +### S3 Streaming for Large Files +The `StructuredDataReaderProvider` supports streaming large S3 files to avoid downloading: + +```python +config = StructuredDataReaderConfig( + stream_s3=True, # Enable streaming + stream_threshold_mb=100, # Stream files > 100MB + pandas_config={"sep": ","} +) +``` + +## Streaming JSONL Processing + +The `StreamingJSONLReaderProvider` is designed for memory-efficient processing of large JSONL files. Unlike the standard `JSONReaderProvider` which loads all lines into memory, this provider processes files line-by-line with constant memory usage. + +### Key Features + +- **Memory Efficient**: Processes files line-by-line without loading entire file into memory +- **Batch Processing**: Yields documents in configurable batches for efficient downstream processing +- **S3 Support**: Works seamlessly with both local files and S3 URIs +- **Flexible Text Extraction**: Extract text from a specific field or use entire JSON object +- **Error Handling**: Configurable strict mode for validation or graceful error skipping +- **Progress Logging**: Built-in progress tracking for long-running operations + +### Configuration Options + +```python +StreamingJSONLReaderConfig( + batch_size=100, # Number of documents per batch (default: 100) + text_field="text", # JSON field to extract as text (default: "text") + # Set to None to use entire JSON as text + strict_mode=False, # If True, raise errors on invalid JSON/missing fields + # If False, skip invalid lines and continue (default) + log_interval=10000, # Log progress every N lines (default: 10000) + metadata_fn=None # Optional function to add custom metadata +) +``` + +### Usage Examples + +#### Basic Usage +```python +from graphrag_toolkit.lexical_graph.indexing.load.readers import ( + StreamingJSONLReaderProvider, + StreamingJSONLReaderConfig +) + +config = StreamingJSONLReaderConfig() +reader = StreamingJSONLReaderProvider(config) +docs = reader.read('data.jsonl') +``` + +#### Custom Text Field +```python +# Extract text from a different field +config = StreamingJSONLReaderConfig( + text_field="content", # Use "content" field instead of "text" +) +reader = StreamingJSONLReaderProvider(config) +docs = reader.read('data.jsonl') +``` + +#### Use Entire JSON as Text +```python +# Use the entire JSON object as document text +config = StreamingJSONLReaderConfig( + text_field=None # None means use full JSON +) +reader = StreamingJSONLReaderProvider(config) +docs = reader.read('data.jsonl') +``` + +#### Lazy Loading for Large Files +```python +# Process large files in batches without loading all into memory +config = StreamingJSONLReaderConfig( + batch_size=50, + log_interval=5000 +) +reader = StreamingJSONLReaderProvider(config) + +for batch in reader.lazy_load_data('large-file.jsonl'): + # Process each batch of up to 50 documents + for doc in batch: + # Process individual document + print(f"Line {doc.metadata['line_number']}: {doc.text[:100]}...") +``` + +#### S3 Files +```python +# Works seamlessly with S3 URIs +config = StreamingJSONLReaderConfig( + batch_size=100, + metadata_fn=lambda path: {'bucket': path.split('/')[2]} +) +reader = StreamingJSONLReaderProvider(config) +docs = reader.read('s3://my-bucket/data/large-file.jsonl') +``` + +#### Strict Mode for Validation +```python +# Raise errors on invalid JSON or missing fields +config = StreamingJSONLReaderConfig( + text_field="required_field", + strict_mode=True # Will raise exception on first error +) +reader = StreamingJSONLReaderProvider(config) + +try: + docs = reader.read('data.jsonl') +except (json.JSONDecodeError, ValueError) as e: + print(f"Validation failed: {e}") +``` + +### Metadata + +Each document includes the following metadata: +- `file_path`: Original source path (local or S3) +- `source`: Either "local_file" or "s3" +- `line_number`: 1-based line number in the file +- `document_type`: Always "jsonl" +- Any additional fields from `metadata_fn` + +## Configuration Examples + +### PDF Reader +```python +from graphrag_toolkit.lexical_graph.indexing.load.readers import PDFReaderProvider, PDFReaderConfig + +config = PDFReaderConfig( + return_full_document=False, + metadata_fn=lambda path: {'source': 'pdf', 'file_path': path} +) +reader = PDFReaderProvider(config) +docs = reader.read('document.pdf') +``` + +### Web Reader +```python +from graphrag_toolkit.lexical_graph.indexing.load.readers import WebReaderProvider, WebReaderConfig + +config = WebReaderConfig( + html_to_text=True, + metadata_fn=lambda url: {'source': 'web', 'url': url} +) +reader = WebReaderProvider(config) +docs = reader.read('https://example.com') +``` + +### YouTube Reader +```python +from graphrag_toolkit.lexical_graph.indexing.load.readers import YouTubeReaderProvider, YouTubeReaderConfig + +config = YouTubeReaderConfig( + language="en", + metadata_fn=lambda url: {'source': 'youtube', 'url': url} +) +reader = YouTubeReaderProvider(config) +docs = reader.read('https://www.youtube.com/watch?v=VIDEO_ID') +``` + +#### Proxy Support + +For environments requiring HTTP/HTTPS proxies (corporate networks, containerized deployments): + +```python +# Option 1: Configure via YouTubeReaderConfig +config = YouTubeReaderConfig( + language="en", + proxy_url="http://proxy.example.com:8080", # HTTP/HTTPS proxy + metadata_fn=lambda url: {'source': 'youtube', 'url': url} +) +reader = YouTubeReaderProvider(config) +docs = reader.read('https://www.youtube.com/watch?v=VIDEO_ID') + +# Option 2: Configure via environment variable +# export YOUTUBE_PROXY_URL=http://proxy.example.com:8080 +config = YouTubeReaderConfig(language="en") +reader = YouTubeReaderProvider(config) # Automatically uses YOUTUBE_PROXY_URL +docs = reader.read('https://www.youtube.com/watch?v=VIDEO_ID') +``` + +The proxy URL should be in the format `http://proxy.example.com:port` or `https://proxy.example.com:port`. The same proxy is used for both HTTP and HTTPS requests. + +### Structured Data Reader (CSV/Excel) +```python +from graphrag_toolkit.lexical_graph.indexing.load.readers import StructuredDataReaderProvider, StructuredDataReaderConfig + +config = StructuredDataReaderConfig( + col_index=0, # Column to use as index + col_joiner=', ', # How to join columns + pandas_config={"sep": ","}, # Pandas options + stream_s3=True, # Enable S3 streaming + stream_threshold_mb=50, # Stream files > 50MB + metadata_fn=lambda path: {'source': 'structured', 'file': path} +) +reader = StructuredDataReaderProvider(config) + +# Works with local and S3 files +docs = reader.read(['data.csv', 's3://bucket/large-file.xlsx']) +``` + +### S3 Directory Reader +```python +from graphrag_toolkit.lexical_graph.indexing.load.readers import S3DirectoryReaderProvider, S3DirectoryReaderConfig + +# Reading from a directory/prefix +config = S3DirectoryReaderConfig( + bucket="my-bucket", + prefix="documents/", # For directory access + metadata_fn=lambda path: {'source': 's3', 'path': path} +) +reader = S3DirectoryReaderProvider(config) +docs = reader.read() # No parameter needed + +# Reading a single file +config = S3DirectoryReaderConfig( + bucket="my-bucket", + key="documents/specific-file.pdf", # For single file + metadata_fn=lambda path: {'source': 's3', 'path': path} +) +reader = S3DirectoryReaderProvider(config) +docs = reader.read() # No parameter needed +``` + +### Streaming JSONL Reader +```python +from graphrag_toolkit.lexical_graph.indexing.load.readers import StreamingJSONLReaderProvider, StreamingJSONLReaderConfig + +# Memory-efficient processing of large JSONL files +config = StreamingJSONLReaderConfig( + batch_size=100, # Process in batches + text_field="text", # Field to extract as document text + strict_mode=False, # Skip invalid lines instead of raising errors + log_interval=10000, # Log progress every N lines + metadata_fn=lambda path: {'source': 'jsonl', 'file': path} +) +reader = StreamingJSONLReaderProvider(config) + +# Works with local and S3 files +docs = reader.read('data.jsonl') +docs = reader.read('s3://bucket/large-file.jsonl') + +# Or use lazy loading for streaming +for batch in reader.lazy_load_data('large-file.jsonl'): + # Process each batch of documents + for doc in batch: + print(doc.text) +``` + +### Database Reader +```python +from graphrag_toolkit.lexical_graph.indexing.load.readers import DatabaseReaderProvider, DatabaseReaderConfig + +config = DatabaseReaderConfig( + connection_string="postgresql://user:pass@localhost/db", + query="SELECT id, content FROM documents", + metadata_fn=lambda row: {'source': 'database', 'id': row.get('id')} +) +reader = DatabaseReaderProvider(config) +docs = reader.read(config.query) +``` + +## Installation Requirements + +Different readers require different dependencies. Install as needed: + +```bash +# PDF processing +pip install pymupdf llama-index-readers-file + +# Web scraping +pip install requests beautifulsoup4 llama-index-readers-web + +# YouTube transcripts +pip install youtube-transcript-api + +# AWS services +pip install boto3 + +# Structured data processing +pip install pandas openpyxl llama-index-readers-structured-data + +# Office documents +pip install python-docx python-pptx + +# GitHub integration +pip install PyGithub + +# Notion integration +pip install notion-client + +# Wikipedia +pip install wikipedia +``` + +## Extending: Writing a Custom Reader + +To add a new data source: + +1. **Create a config class** as a dataclass: +```python +from dataclasses import dataclass +from typing import Optional, Callable, Dict, Any +from .reader_provider_config_base import ReaderProviderConfig + +@dataclass +class MyReaderConfig(ReaderProviderConfig): + api_key: str = "" + metadata_fn: Optional[Callable[[str], Dict[str, Any]]] = None +``` + +2. **Subclass a base provider**: +```python +from .base_reader_provider import BaseReaderProvider + +class MyReaderProvider(BaseReaderProvider): + def __init__(self, config: MyReaderConfig): + self.config = config + + def read(self, input_source): + # Implement your reading logic + documents = [] + # ... process input_source ... + return documents +``` + +3. **Register in `__init__.py`** for easy importing. + +## References +- [Base Classes](../../lexical-graph/src/graphrag_toolkit/lexical_graph/indexing/load/readers/) +- [Configuration Classes](../../lexical-graph/src/graphrag_toolkit/lexical_graph/indexing/load/readers/reader_provider_config.py) +- [Provider Implementations](../../lexical-graph/src/graphrag_toolkit/lexical_graph/indexing/load/readers/providers/) diff --git a/docs-site/src/content/docs/lexical-graph/security.mdx b/docs-site/src/content/docs/lexical-graph/security.mdx new file mode 100644 index 00000000..a204a8d4 --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/security.mdx @@ -0,0 +1,211 @@ +--- +title: Security +--- + +### Topics + + - [Overview](#overview) + - [Managing access to Amazon Neptune](#managing-access-to-amazon-neptune) + - [Managing access to Amazon OpenSearch Serverless](#managing-access-to-amazon-opensearch-serverless) + - [OpenSearch API operations IAM policy](#opensearch-api-operations-iam-policy) + - [Data access policy](#data-access-policy) + - [Network access policy](#network-access-policy) + - [Encryption policy](#encryption-policy) + - [Managing access to Amazon Bedrock](#managing-access-to-amazon-bedrock) + +### Overview + +When building an application with the lexical-graph library, you are responsible for securing access to your source data, and to the graph store, vector store, and foundation model APIs that you use. The following sections provide guidance on using AWS Identity and Access Management (IAM) policies to control access to Amazon Neptune, Amazon OpenSearch Serverless, and Amazon Bedrock. + +### Managing access to Amazon Neptune + +Index operations require read and write access to your Amazon Neptune database. Query operations require only read access to the database. + +To allow your application to read data from an Amazon Neptune database, attach the following example IAM policy to the AWS identity under which your application runs. Replace `` with your AWS account ID, `` with the name of the AWS Region in which your Amazon Neptune database cluster is located, and `` with the [cluster resource id](https://docs.aws.amazon.com/neptune/latest/userguide/iam-data-resources.html) of your database cluster. + +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "NeptuneDBReadAccessStatement", + "Effect": "Allow", + "Action": [ + "neptune-db:ReadDataViaQuery" + ], + "Resource": "arn:aws:neptune-db:::/*", + "Condition": { + "StringEquals": { + "neptune-db:QueryLanguage": "OpenCypher" + } + } + } + ] +} +``` + +To allow your application to write data to an Amazon Neptune database, attach the following example IAM policy to the AWS identity under which your application runs. Replace `` with your AWS account ID, `` with the name of the AWS Region in which your Amazon Neptune database cluster is located, and `` with the [cluster resource id](https://docs.aws.amazon.com/neptune/latest/userguide/iam-data-resources.html) of your database cluster. + +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "NeptuneDBWriteAccessStatement", + "Effect": "Allow", + "Action": [ + "neptune-db:WriteDataViaQuery", + "neptune-db:DeleteDataViaQuery" + ], + "Resource": "arn:aws:neptune-db:::/*", + "Condition": { + "StringEquals": { + "neptune-db:QueryLanguage": "OpenCypher" + } + } + } + ] +} +``` + +See [Managing access to Amazon Neptune databases using IAM policies](https://docs.aws.amazon.com/neptune/latest/userguide/security-iam-access-manage.html) for more details on protecting access to Amazon Neptune using IAM policies. + +### Managing access to Amazon OpenSearch Serverless + +To allow your application to read from and write data to an Amazon OpenSearch Serverless collection, you must associate data access, network and encryption policies with the collection. On top of that, an associated principal must also be granted access to the IAM permission `aoss:APIAccessAll`, which you can do using an IAM policy. + +See [Overview of security in Amazon OpenSearch Serverless](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/serverless-security.html) for more details on protecting access to Amazon OpenSearch Serverless collections. + +#### OpenSearch API operations IAM policy + +To allow data plane access to the OpenSearch API operations, attach the following example IAM policy to the AWS identity under which your application runs. Replace `` with your AWS account ID, `` with the name of the AWS Region in which your Amazon OpenSearch Serverless collection is located, and `` with the id (not the name) of your collection. + +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "OpenSearchServerlessAPIAccessAllStatement", + "Effect": "Allow", + "Action": [ + "aoss:APIAccessAll" + ], + "Resource": [ + "arn:aws:aoss:::collection/" + ] + } + ] +} +``` + +#### Data access policy + +A [data access policy](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/serverless-data-access.html) controls access to the OpenSearch operations that OpenSearch Serverless supports. + +You can use an existing data access policy or you can create a new one using the example policy below. Replace `` with the name of your OpenSearch Serverless collection, and `` with the ARN of the IAM role or user attached to your application. + +``` +[ + { + "Rules": [ + { + "Resource": [ + "collection/" + ], + "Permission": [ + "aoss:DescribeCollectionItems", + "aoss:CreateCollectionItems", + "aoss:UpdateCollectionItems" + ], + "ResourceType": "collection" + }, + { + "Resource": [ + "index//*" + ], + "Permission": [ + "aoss:UpdateIndex", + "aoss:DescribeIndex", + "aoss:ReadDocument", + "aoss:WriteDocument", + "aoss:CreateIndex" + ], + "ResourceType": "index" + } + ], + "Principal": [ + "" + ] + } +] +``` + +#### Network access policy + +A [network access policy](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/serverless-network.html) defines network access to an OpenSearch Serverless collection's endpoint. The network settings for an Amazon OpenSearch Serverless collection determine whether the collection is accessible over the internet from public networks, or whether it must be accessed privately via a VPC endpoint. + +You can use an existing network access policy or you can create a new one using the example policy below. This example policy provides public access to a collection's OpenSearch endpoint. Replace `` with the name of your OpenSearch Serverless collection: + +``` +[ + { + "Rules": [ + { + "Resource": [ + "collection/" + ], + "ResourceType": "collection" + } + ], + "AllowFromPublic": true + } +] +``` + +#### Encryption policy + +An [encryption policy](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/serverless-encryption.html) assigns an encryption key to the collection. Collections are encrypted using either an AWS owned key or a customer managed key. + +You can use an existing encryption policy or you can create a new one using the example policy below. This example policy uses an AWS owned key to encrypt a collection. Replace `` with the name of your OpenSearch Serverless collection: + +``` +[ + { + "Rules":[ + { + "ResourceType":"collection", + "Resource":[ + "collection/" + ] + } + ], + "AWSOwnedKey": true + } +] +``` + +### Managing access to Amazon Bedrock + +To allow your application to invoke the Amazon Bedrock foundation models used by the graphrag-toolkit, attach the following example IAM policy to the AWS identity under which your application runs. Replace `` with the name of the AWS Region in which Amazon Bedrock is located, and `` with the Region prefix that represents the geography covered by an inference profile (e.g. `us` for US-based AWS REgiosn such as `us-east-1` and `us-west-2`). + +This example IAN policy assumes that you are using the toolkit's default models: `us.anthropic.claude-3-7-sonnet-20250219-v1:0` and `cohere.embed-english-v3`. Before running your applictaion, you must [enable access](https://docs.aws.amazon.com/bedrock/latest/userguide/model-access.html) to these models. See [Supported Regions and models for inference profiles](https://docs.aws.amazon.com/bedrock/latest/userguide/inference-profiles-support.html) for details on predefined inference profiles that you can use and the Regions and models that support application inference profiles. + +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "BedrockInvokeModelStatement", + "Effect": "Allow", + "Action": [ + "bedrock:InvokeModel" + ], + "Resource": [ + "arn:aws:bedrock:*::foundation-model/anthropic.claude-3-7-sonnet-20250219-v1:0", + "arn:aws:bedrock:::inference-profile/.anthropic.claude-3-7-sonnet-20250219-v1:0", + "arn:aws:bedrock:::foundation-model/cohere.embed-english-v3" + ] + } + ] +} +``` diff --git a/docs-site/src/content/docs/lexical-graph/semantic-guided-search.mdx b/docs-site/src/content/docs/lexical-graph/semantic-guided-search.mdx new file mode 100644 index 00000000..9c200e0a --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/semantic-guided-search.mdx @@ -0,0 +1,271 @@ +--- +title: Semantic-Guided Search +--- + +The recommended method for query and retrieval is to used the [traversal-based search](/graphrag-toolkit/lexical-graph/traversal-based-search/) operation. The lexical-graph does, however, also currently support semantic-guided search, but this approach has several drawbacks: + + - High storage costs due to requiring an embedding for each statement + - Poor performance with large datasets, with queries often taking minutes to complete + - Expected to be removed in future releases + +This page contains the semantic-guided search documentation. + +### Example + +The following example uses semantic-guided search with all the default settings to query the graph: + +```python +from graphrag_toolkit.lexical_graph import LexicalGraphQueryEngine +from graphrag_toolkit.lexical_graph.storage import GraphStoreFactory +from graphrag_toolkit.lexical_graph.storage import VectorStoreFactory + +with ( + GraphStoreFactory.for_graph_store( + 'neptune-db://my-graph.cluster-abcdefghijkl.us-east-1.neptune.amazonaws.com' + ) as graph_store, + VectorStoreFactory.for_vector_store( + 'aoss://https://abcdefghijkl.us-east-1.aoss.amazonaws.com' + ) as vector_store +): + + query_engine = LexicalGraphQueryEngine.for_semantic_guided_search( + graph_store, + vector_store, + streaming=True + ) + + response = query_engine.query("What are the differences between Neptune Database and Neptune Analytics?") + +print(response.print_response_stream()) +``` + +By default, semantic-guided search uses a composite search strategy using three subretrievers: + + - `StatementCosineSimilaritySearch` – Gets the top k statements using cosine similarity of statement embeddings to the query embedding. + - `KeywordRankingSearch` – Gets the top k statements based on the number of matches to a specified number of keywords and synonyms extracted from the query. Statements with more keyword matches rank higher in the results. + - `SemanticBeamGraphSearch` – A statement-based search that finds a statement's neighbouring statements based on shared entities, and retains the most promising based on the cosine similarity of the candidate statements' embeddings to the query embedding. The search is seeded with statements from other retrievers (e.g. `StatementCosineSimilaritySearch` and/or `KeywordRankingSearch`), or from an initial vector similarity search against the statement index. + +#### Semantic-guided search results + +Semantic-guided search returns one or more search results, each of which comprises a source, and a set of statements: + +``` + + + https://docs.aws.amazon.com/neptune-analytics/latest/userguide/neptune-analytics-vs-neptune-database.html + +Neptune Database is a serverless graph database +Neptune Analytics is an analytics database engine +Neptune Analytics is a solution for quickly analyzing existing graph databases +Neptune Database provides a solution for graph database workloads that need Multi-AZ high availability +Neptune Analytics is a solution for quickly analyzing graph datasets stored in a data lake (details: Graph datasets LOCATION data lake) +Neptune Database provides a solution for graph database workloads that need to scale to 100,000 queries per second +Neptune Database is designed for optimal scalability +Neptune Database provides a solution for graph database workloads that need multi-Region deployments +Neptune Analytics removes the overhead of managing complex data-analytics pipelines (details: Overhead CONTEXT managing complex data-analytics pipelines) +... + + +... + + + + https://docs.aws.amazon.com/neptune-analytics/latest/userguide/neptune-analytics-features.html + +Neptune Analytics allows performing business intelligence queries using openCypher language +The text distinguishes between Neptune Analytics and Neptune Database +Neptune Analytics allows performing custom analytical queries using openCypher language +Neptune Analytics allows performing in-database analytics on large graphs +Neptune Analytics allows focusing on queries and workflows to solve problems +Neptune Analytics can load data extremely fast into memory +Neptune Analytics allows running graph analytics queries using pre-built or custom graph queries +Neptune Analytics manages graphs instead of infrastructure +Neptune Analytics allows loading graph data from Amazon S3 or a Neptune Database endpoint +... + +``` + +#### Configuring the SemanticGuidedRetriever + +Semantic-guided search behaviour can be configured by configuring individual subretrievers: + +| Retriever | Parameter | Description | Default Value | +| ------------- | ------------- | ------------- | ------------- | +| `StatementCosineSimilaritySearch` | `top_k` | Number of statements to include in the results | `100` | +| `KeywordRankingSearch` | `top_k` | Number of statements to include in the results | `100` | +|| `max_keywords` | The maximum number of keywords to extract from the query | `10` | +| `SemanticBeamGraphSearch` | `max_depth` | The maximum depth to follow promising candidates from the starting statements | `3` | +|| `beam_width` | The number of most promising candidates to return for each statement that is expanded | `10` | +| `RerankingBeamGraphSearch` | `max_depth` | The maximum depth to follow promising candidates from the starting statements | `3` | +|| `beam_width` | The number of most promising candidates to return for each statement that is expanded | `10` | +|| `reranker` | Reranker instance that will be used to rerank statements (see below) | `None` +|| `initial_retrievers` | List of retrievers used to see the starting statements (see below) | `None` | + +#### Semantic-guided search with a reranking beam search + +Instead of using a `SemanticBeamGraphSearch` with the `SemanticGuidedRetriever`, you can use a `RerankingBeamGraphSearch` instead. Instead of using cosine similarity to determine which candidate statements to pursue, the `RerankingBeamGraphSearch` uses a reranker. + +You must initialize a `RerankingBeamGraphSearch` instance with a reranker. The toolkit includes two different rerankers: `BGEReranker`, and `SentenceReranker`. If you're running on a CPU device, we recommend using the `SentenceReranker`. If you're running on a GPU device, you can choose either the `BGEReranker` or `SentenceReranker`. + +The example below uses a `SentenceReranker` with a `RerankingBeamGraphSearch` to rerank statements while conducting the beam search: + +```python +from graphrag_toolkit.lexical_graph import LexicalGraphQueryEngine +from graphrag_toolkit.lexical_graph.storage import GraphStoreFactory +from graphrag_toolkit.lexical_graph.storage import VectorStoreFactory +from graphrag_toolkit.lexical_graph.retrieval.retrievers import RerankingBeamGraphSearch, StatementCosineSimilaritySearch, KeywordRankingSearch +from graphrag_toolkit.lexical_graph.retrieval.post_processors import SentenceReranker + +with ( + GraphStoreFactory.for_graph_store( + 'neptune-db://my-graph.cluster-abcdefghijkl.us-east-1.neptune.amazonaws.com' + ) as graph_store, + VectorStoreFactory.for_vector_store( + 'aoss://https://abcdefghijkl.us-east-1.aoss.amazonaws.com' + ) as vector_store +): + + cosine_retriever = StatementCosineSimilaritySearch( + vector_store=vector_store, + graph_store=graph_store, + top_k=50 + ) + + keyword_retriever = KeywordRankingSearch( + vector_store=vector_store, + graph_store=graph_store, + max_keywords=10 + ) + + reranker = SentenceReranker( + batch_size=128 + ) + + beam_retriever = RerankingBeamGraphSearch( + vector_store=vector_store, + graph_store=graph_store, + reranker=reranker, + initial_retrievers=[cosine_retriever, keyword_retriever], + max_depth=8, + beam_width=100 + ) + + query_engine = LexicalGraphQueryEngine.for_semantic_guided_search( + graph_store, + vector_store, + retrievers=[ + cosine_retriever, + keyword_retriever, + beam_retriever + ] + ) + + response = query_engine.query("What are the differences between Neptune Database and Neptune Analytics?") + +print(response.response) +``` + +The example below uses a `BGEReranker` with a `RerankingBeamGraphSearch` to rerank statements while conducting the beam search. + +There will be a delay the first time this runs while the reranker downloads tensors. + +```python +from graphrag_toolkit.lexical_graph import LexicalGraphQueryEngine +from graphrag_toolkit.lexical_graph.storage import GraphStoreFactory +from graphrag_toolkit.lexical_graph.storage import VectorStoreFactory +from graphrag_toolkit.lexical_graph.retrieval.retrievers import RerankingBeamGraphSearch, StatementCosineSimilaritySearch, KeywordRankingSearch +from graphrag_toolkit.lexical_graph.retrieval.post_processors import BGEReranker + +with ( + GraphStoreFactory.for_graph_store( + 'neptune-db://my-graph.cluster-abcdefghijkl.us-east-1.neptune.amazonaws.com' + ) as graph_store, + VectorStoreFactory.for_vector_store( + 'aoss://https://abcdefghijkl.us-east-1.aoss.amazonaws.com' + ) as vector_store +): + + cosine_retriever = StatementCosineSimilaritySearch( + vector_store=vector_store, + graph_store=graph_store, + top_k=50 + ) + + keyword_retriever = KeywordRankingSearch( + vector_store=vector_store, + graph_store=graph_store, + max_keywords=10 + ) + + reranker = BGEReranker( + gpu_id=0, # Remove if running on CPU device, + batch_size=128 + ) + + beam_retriever = RerankingBeamGraphSearch( + vector_store=vector_store, + graph_store=graph_store, + reranker=reranker, + initial_retrievers=[cosine_retriever, keyword_retriever], + max_depth=8, + beam_width=100 + ) + + query_engine = LexicalGraphQueryEngine.for_semantic_guided_search( + graph_store, + vector_store, + retrievers=[ + cosine_retriever, + keyword_retriever, + beam_retriever + ] + ) + + response = query_engine.query("What are the differences between Neptune Database and Neptune Analytics?") + +print(response.response) +``` + +### Postprocessors + +There are a number of postprocessors you can use to further improve and format results: + +| Postprocessor | Description | +| ------------- | ------------- | ------------- | +| `BGEReranker` | Reranks (and limits) results using the `BAAI/bge-reranker-v2-minicpm-layerwise` model before returning them to the query engine. Use only if you have a GPU device. | +| `SentenceReranker` | Reranks (and limits) results using the `mixedbread-ai/mxbai-rerank-xsmall-v1`. model before returning them to the query engine. | +| `StatementDiversityPostProcessor` | Removes similar statements from the results using TF-IDF similarity. Before running `StatementDiversityPostProcessor` for the first time, load the following package: `python -m spacy download en_core_web_sm` | +| `StatementEnhancementPostProcessor` | Enhances statements by using chunk context and an LLM to improve content while preserving original metadata. (Requires an LLM call per statement.) | + +The example below uses a `StatementDiversityPostProcessor`, `SentenceReranker` and `StatementEnhancementPostProcessor`. If you're running on a GPU device, you can replace the `SentenceReranker` with a `BGEReranker`. + +```python +from graphrag_toolkit.lexical_graph import LexicalGraphQueryEngine +from graphrag_toolkit.lexical_graph.storage import GraphStoreFactory +from graphrag_toolkit.lexical_graph.storage import VectorStoreFactory +from graphrag_toolkit.lexical_graph.retrieval.post_processors import SentenceReranker, SentenceReranker, StatementDiversityPostProcessor, StatementEnhancementPostProcessor +import os + +with ( + GraphStoreFactory.for_graph_store( + 'neptune-db://my-graph.cluster-abcdefghijkl.us-east-1.neptune.amazonaws.com' + ) as graph_store, + VectorStoreFactory.for_vector_store( + 'aoss://https://abcdefghijkl.us-east-1.aoss.amazonaws.com' + ) as vector_store +): + + query_engine = LexicalGraphQueryEngine.for_semantic_guided_search( + graph_store, + vector_store, + post_processors=[ + SentenceReranker(), + StatementDiversityPostProcessor(), + StatementEnhancementPostProcessor() + ] + ) + + response = query_engine.query("What are the differences between Neptune Database and Neptune Analytics?") + +print(response.response) +``` diff --git a/docs-site/src/content/docs/lexical-graph/storage-model.mdx b/docs-site/src/content/docs/lexical-graph/storage-model.mdx new file mode 100644 index 00000000..25bc296b --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/storage-model.mdx @@ -0,0 +1,69 @@ +--- +title: Storage Model +--- + +### Topics + +- [Overview](#overview) +- [Graph store](#graph-store) + - [Logging graph queries](#logging-graph-queries) +- [Vector store](#vector-store) + +### Overview + +The lexical-graph uses two separate stores: a `GraphStore` and a `VectorStore`. A `VectorStore` acts as a container for a collection of `VectorIndex`. When constructing or querying a graph, you must provide instances of both a graph store and vector store. + +The toolkit provides graph store implementations for both [Amazon Neptune Analytics](https://docs.aws.amazon.com/neptune-analytics/latest/userguide/what-is-neptune-analytics.html) and [Amazon Neptune Database](https://docs.aws.amazon.com/neptune/latest/userguide/intro.html) (engine version 1.4.1.0 or later), and now [FalkorDB](https://docs.falkordb.com/)**,** along with vector store implementations for Neptune Analytics, [Amazon OpenSearch Serverless](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/serverless.html) and Postgres with the pgvector extension. The lexical-graph provides several convenient factory methods for creating instances of these stores. + +> This early release of the toolkit provides support for Amazon Neptune and Amazon OpenSearch Serverless, but we welcome alternative store implementations. The store APIs and the ways in which the stores are used have been designed to anticipate alternative implementations. However, the proof is in the development: if you experience issues developing an alternative store, [let us know](https://github.com/awslabs/graphrag-toolkit/issues). + +Graph stores and vector stores provide connectivity to an *existing* storage instance, which you will need to have provisioned beforehand. + +### Graph store + +Graph stores must support the [openCypher](https://opencypher.org/) property graph query language. Graph construction queries typically use an `UNWIND ... MERGE` idiom to create or update the graph for a [batch of inputs](https://docs.aws.amazon.com/neptune-analytics/latest/userguide/best-practices-content.html#best-practices-content-14). The Neptune graph store implementations override the `GraphStore.node_id()` method to ensure that node ids in the code (e.g. `chunkId`) are mapped to Neptune's `~id` reserved property. Alternative graph store implementations can leave the base implementation of `node_id()` as-is. This will result in node ids being mapped to a property of the same name (i.e. a reference to `chunkId` in the code will be mapped to a `chunkId` property of a node). + +You use the `GraphStoreFactory.for_graph_store()` static factory method to create a graph store. + +The lexical-graph supports the following graph databases: + + - [Amazon Neptune](/graphrag-toolkit/lexical-graph/graph-store-neptune-db/) + - [Amazon Neptune Analytics](/graphrag-toolkit/lexical-graph/graph-store-neptune-analytics/) + - [Neo4j](/graphrag-toolkit/lexical-graph/graph-store-neo4j/) + +#### Logging graph queries + +By default, all graph queries in logs are redacted. To configure the toolkit to log queries and their results, use `NonRedactedGraphQueryLogFormatting` when creating a graph store: + +```python +import os +from graphrag_toolkit.lexical_graph import set_logging_config +from graphrag_toolkit.lexical_graph.storage import GraphStoreFactory +from graphrag_toolkit.lexical_graph.storage.graph import NonRedactedGraphQueryLogFormatting + +set_logging_config('DEBUG', ['graphrag_toolkit.lexical_graph.storage.graph']) + +graph_store = GraphStoreFactory.for_graph_store( + os.environ['GRAPH_STORE'], + log_formatting=NonRedactedGraphQueryLogFormatting() +) +``` + +### Vector store + +A vector store is a collection of vector indexes. The lexical-graph uses up to two vector indexes: a chunk index and a statement index. The chunk index is typically much smaller than the statement index. If you want to use [semantic-guided search](/graphrag-toolkit/lexical-graph/semantic-guided-search/), you will need to enable the statement index. If you want to use [traversal-based-search](/graphrag-toolkit/lexical-graph/traversal-based-search/), you will need to enable the chunk index. The `VectorStoreFactory` described below enables both indexes by default. + +You use the `VectorStoreFactory.for_vector_store()` static factory method to create a vector store. + +The lexical-graph supports the following vector-stores: + + - [Amazon OpenSearch Serverless](/graphrag-toolkit/lexical-graph/vector-store-opensearch-serverless/) + - [Amazon Neptune Analytics](/graphrag-toolkit/lexical-graph/vector-store-neptune-analytics/) + - [Postgres with the pgvector extension](/graphrag-toolkit/lexical-graph/vector-store-postgres/) + - [Amazon S3 Vectors](/graphrag-toolkit/lexical-graph/vector-store-s3-vectors/) + +By default, the `VectorStoreFactory` will enable both the statement index and the chunk index. However, we recommend using traversal-based search, which requres only the chunk index. Use the `index_names` argument to enable just the chunk index: + +```python +vector_store = VectorStoreFactory.for_vector_store(opensearch_connection_info, index_names=['chunk']) +``` diff --git a/docs-site/src/content/docs/lexical-graph/traversal-based-search-configuration.mdx b/docs-site/src/content/docs/lexical-graph/traversal-based-search-configuration.mdx new file mode 100644 index 00000000..a041106f --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/traversal-based-search-configuration.mdx @@ -0,0 +1,301 @@ +--- +title: Traversal-Based Search Configuration +--- + +### Topics + + - [Overview](#overview) + - [Search results configuration](#search-results-configuration) + - [max_search_results](#max_search_results) + - [max_statements_per_topic](#max_statements_per_topic) + - [max_statements](#max_statements) + - [statement_pruning_factor](#statement_pruning_factor) + - [statement_pruning_threshold](#statement_pruning_threshold) + - [When to use search results configuration](#when-to-use-search-results-configuration) + - [Retriever selection](#retriever-selection) + - [retrievers](#retrievers) + - [When to use different retrievers](#when-to-use-different-retrievers) + - [Reranking strategy](#reranking-strategy) + - [reranker](#reranker) + - [Choosing a reranker strategy](#choosing-a-reranker-strategy) + - [Troubleshooting reranking results](#troubleshooting-reranking-results) + - [Graph and vector search parameters](#graph-and-vector-search-parameters) + - [intermediate_limit](#intermediate_limit) + - [query_limit](#query_limit) + - [vss_top_k](#vss_top_k) + - [vss_diversity_factor](#vss_diversity_factor) + - [num_workers](#num_workers) + - [When to change the graph and vector search parameters](#when-to-change-the-graph-and-vector-search-parameters) + - [Entity network context selection](#entity-network-context-selection) + - [Entity network generation](#entity-network-generation) + - [ec_max_depth](#ec_max_depth) + - [ec_max_contexts](#ec_max_contexts) + - [ec_max_score_factor](#ec_max_score_factor) + - [ec_min_score_factor](#ec_min_score_factor) + - [When to adjust entity network generation](#when-to-adjust-entity-network-generation) + + + + +### Overview + +You can use the traversal-based search configuration options to customize traversal-based search operations to better suit your specific application, dataset, and query types. The following configuration options are available to help you optimize search performance: + + - [**Search results configuration**](#search-results-configuration) Adjust the number of search results and statements returned and set scoring thresholds to filter out low-quality statements and results + - [**Retriever selection**](#retriever-selection) Specify which retrievers to use when fetching information + - [**Reranking strategy**](#reranking-strategy) Modify how statements and results are reranked and sorted + - [**Graph and vector search parameters**](#graph-and-vector-search-parameters) Customize parameters that control graph queries and vector searches + - [**Entity network context selection**](#entity-network-context-selection) Configure parameters used to select entity network contexts + +These options allow you to fine-tune your search behavior based on your specific requirements and improve the relevance of returned results. +___ + +### Search results configuration + +When configuring search functionality, you can use the following parameters to control the number and quality of returned results: + +##### `max_search_results` + +Defines the maximum number of search results to return. Each search result contains one or more statements that belong to the same topic (and source). If you set this to `None`, all matching search results will be returned. The default value is `10`. + +##### `max_statements_per_topic` + +Controls how many statements can be included with a single topic, effectively limiting the size of each search result. If set to `None`, all statements belonging to the topic that match the search will be included in the result. The default value is `10`. + +##### `max_statements` + +Limits the total number of statements across the entire resultset. If you set this to `None`, all statements from all results will be returned. The default value is `100`. + +##### `statement_pruning_factor` + +This parameter helps filter out lower-quality statements based on a percentage of the highest statement score in the entire set of results. Any statement with a score less than ` * statement_pruning_factor` will be removed from the results. The default value is `0.05` (5% of the maximum score). + +##### `statement_pruning_threshold` + +Sets an absolute minimum score threshold for statements. Any statement with a score lower than this threshold will be removed from the results. The default value is `None`. + +#### Example + +```python +query_engine = LexicalGraphQueryEngine.for_traversal_based_search( + graph_store, + vector_store, + statement_pruning_threshold=0.2 +) +``` + +#### When to use search results configuration + +The `max_search_results`, `max_statements_per_topic` and `max_statements` parameters allow you to control the overall size of the results. + +Each search result comprises one or more statements belonging to a single topic from a single source. Statements from the same source but different topics appear as separate search results. Increasing `max_search_results` increases the variety of sources in your results. Increasing `max_statements_per_topic` adds more detail to each individual search result. + +When increasing the number of statements (either overall or per topic), you should consider increasing the statement pruning parameters as well. This helps ensure that even with larger result sets, you're still getting highly relevant statements rather than less relevant information. + +___ + +### Retriever selection + +You can use the `retrievers` parameter to configure traversal-based search with up to [three different retrievers](/graphrag-toolkit/lexical-graph/traversal-based-search/#retrievers). + +##### `retrievers` + +Accepts an array of retriever class names. Choose from: + + - **`ChunkBasedSearch`** This retriever uses a vector similarity search to find information that is similar to the original query. The retriever first finds relevant chunks using vector similarity search. From these chunks, the retriever traverses topics, statements, and facts. Chunk-based search tends to return a narrowly-scoped set of results based on the statement and fact neighbourhoods of chunks that match the original query. + - **`EntityBasedSearch`** This retriever uses as its starting points the entities in an entity network context. From these entities, the retriever traverses facts, statements and topics. Entity-based search tends to return a broadly-scoped set of results, based on the neighbourhoods of individual entities and the facts that connect entities. + - **`EntityNetworkSearch`** This retriever uses textual transcriptions of an entity network context to drive vector searches for information that is dissimilar to the original query but nonetheless structurally relevant for creating an accurate and full response. These vector searches return chunks that are similar to 'something different from the question being asked'. From these chunks, the retriever traverses topics, statements, and facts to explore the structurally relevant space of dissimilar content. + +#### Example + +```python +from graphrag_toolkit.lexical_graph.retrieval.retrievers import * + +query_engine = LexicalGraphQueryEngine.for_traversal_based_search( + graph_store, + vector_store, + retrievers=[ChunkBasedSearch, EntityBasedSearch] +) +``` + +#### When to use different retrievers + +By default, traversal-based search is configured to use a combination of `ChunkBasedSearch` and `EntityNetworkSearch`. This combination provides access to content that is both directly similar to the question and content that may be relevant but not explicitly mentioned in the query. + +Consider using the `ChunkBasedSearch` retriever by itself if: + + - Your queries need primarily similarity-based search + - You want to focus on individual relevant statements rather than entire chunks + - You need broader search scope than traditional vector search + +This retriever uses local connectivity to find relevant statements in other chunks from the same source, expanding beyond basic vector similarity. + +The `EntityBasedSearch` and `EntityNetworkSearch` retrievers provide different ways of utilising entity networks in a search: + + - The `EntityBasedSearch` uses global connectivity to find statements from different sources connected by the same facts. It often produces more diverse results than other retrievers. + - The `EntityNetworkSearch` retriever converts an entity network (retrieved through graph traversal) into a set of similarity searches. This approach balances global and local connectivity. + +___ + +### Reranking strategy + +Traversal-based search incorporates reranking at two key points during the retrieval process: + + - When generating entity network contexts, both entities and entity networks are reranked + - Before finalizing search results, the complete set of statements undergoes reranking + +Reranking is managed through a single parameter: + +##### `reranker` + +Parameters options: + + - `model`: Uses a LlamaIndex-based `SentenceReranker` to rerank all statements in the result set + - `tfidf` (default): Applies a term frequency-inverse document frequency measure to rank statements + - `None`: Disables the reranking feature completely + +The tfidf-based option is significantly faster than the model-based approach. To use the model reranker, you must first install the following additional dependencies: + +``` +pip install torch sentence_transformers +``` + +#### Example + +```python +query_engine = LexicalGraphQueryEngine.for_traversal_based_search( + graph_store, + vector_store, + reranker='model' +) +``` + +#### Choosing a reranker strategy + +The tfidf reranker option provides a fast, cost-effective, and generally effective solution for most use cases. However, if you find that the results don't meet your requirements, consider switching to the model reranker. Be aware that while model may provide different results, it operates significantly slower than tfidf and doesn't guarantee improved outcomes. + +##### Troubleshooting reranking results + +An effective reranking strategy should ensure that only highly relevant statements appear in your final results. For reranking to work properly, the relevant statements must first be captured by your retrievers before the reranking process begins. + +If your search results don't include content you expect to see, verify whether this content is present in the pre-ranked results by: + + 1. Disabling the reranker by setting `reranker=None` + 2. Increasing the following parameters in your [search results configuration](#search-results-configuration): + - [max_search_results](#max_search_results) + - [max_statements_per_topic](#max_statements_per_topic) + - [max_statements](#max_statements) + +After making these adjustments, review the results returned by the `retrieve()` operation. If the expected content still doesn't appear, the issue isn't related to reranking. Instead, consider other tuning approaches described elsewhere in the documentation, such as: + + - Changing your retriever configuration + - Adjusting pruning thresholds + - Configuring entity network contexts + +___ + +### Graph and vector search parameters + +These settings govern how the system queries both the graph and vector stores. When a user submits a query, multiple searches run across both stores, with some executing in parallel. The vector store returns the most similar items based on a top K approach. Results can be diversified across different sources. Graph store queries return statement sets, grouped by their source. Graph queries use a two-phase process: initial statement identification followed by connection exploration. + +##### `intermediate_limit` + +Controls how many statements are identified in the first phase of a graph query, before exploring their connections (both local and global). The default value is `50`. + +##### `query_limit` + +Defines how many results each graph query returns. Each result consists of statements from a single source. The default value is `10`. + +##### `vss_top_k` + +Specifies how many top matching results are used to begin similarity-based traversals. The default value is `10`. + +##### `vss_diversity_factor` + +Ensures results come from a diverse range of sources. Queries to a vector store retrieve (`vss_top_k × vss_diversity_factor`) initial matches, and then iteratively select the most relevant result from previously unused sources. This process continues until reaching `vss_top_k` total results. If set to `None`, simply returns the first `vss_top_k` matches. The default value is `5`. + +##### `num_workers` + +Sets the number of threads available for running graph queries in parallel. The default value is `10`. + +#### Example + +```python +query_engine = LexicalGraphQueryEngine.for_traversal_based_search( + graph_store, + vector_store, + intermediate_limit=25, + num_workers=3 +) +``` + +#### When to change the graph and vector search parameters + +Whereas the [search results configuration](#search-results-configuration) parameters control the handling of the search results, the graph and vector store configuration parameters control the query processing used to generate the results. + +If your queries require finding highly diverse content from across multiple sources, increase the `vss_diversity_factor`. If your queries require content that derives directly from primary sources, reduce `vss_diversity_factor`, or set it to `None`. + +If you experience out of memeory issue while running user queries, reduce the `intermediate_limit` and `num_workers`. This will reduce the size of the working set for each graph query, and reduce the number of graph queries running in parallel. + +If your application requires a large number of search results, you should consider increasing the `intermediate_limit`, `query_limit` and/or `vss_top_k`. Note that increasing these parameters can increase query latencies, and require more memory. + +___ + +### Entity network context selection + +The system creates focused [entity network contexts](/graphrag-toolkit/lexical-graph/traversal-based-search/#entity-network-contexts) based on the user's query terms. These contextual networks guide both retrieval and response generation phases. + +#### Entity network generation + +The process for generating entity network contexts is as follows: + + 1. **Initial entity discovery** Match query terms to entities using various search methods: lookup by id, exact match, partial match, full text search, or any other search technique offered by the graph store. + 2. **Entity prioritization** Sort matched entities by relevance to the query. Calculate the degree centrality of the top entity: this will be used as a benchmark for subsequent filtering. + 3. **Network expansion** Starting from each root entity node, follow entity-to-entity relationships, expanding to a depth of 2-3 levels. + 4. **Network pruning** Apply filtering based on degree centrality thresholds derived from the benchmark created in step 2. Remove entities above and below these thresholds along each path. + 5. **Path selection** Rerank all valid paths and select the top N highest-ranking paths. These form the final set of entity network contexts. + +You can configure entity network generation using the following parameters: + +##### `ec_max_depth` + +Determines the maximum number of entities in each entity network path. + +The default value is `3`. + +##### `ec_max_contexts` + +Limits the number of entity contexts returned by providers. Note: Multiple entity contexts may originate from the same root entity. The default value is `3`. + +##### `ec_max_score_factor` + +Filters out entities whose degree centrality exceeds a threshold based on a percentage of the degree centrality of the top entity. The default value is `10` (1000% of the top entity's score). + +##### `ec_min_score_factor` + +Filters out entities whose degree centrality falls below a threshold based on a percentage of the degree centrality of the top entity. The default value is `0.1` (10% of the top entity's score). + +#### Example + +```python +query_engine = LexicalGraphQueryEngine.for_traversal_based_search( + graph_store, + vector_store, + ec_max_depth=3, + ec_max_contexts=3 +) +``` + +#### When to adjust entity network generation + +The entity network context settings control how extensively the system searches for related content and how it filters results based on entity relationships. Increase the search scope to find structurally relevant but dissimilar content. Reduce the search scope to focus on content similar to the query. + +A **broad but shallow search** – e.g. `ec_max_depth=1` and `ec_max_contexts=5` – helps explore diverse contexts focused on direct matches to the query. + +A **deep but narrow search** – e.g. `ec_max_depth=3` and `ec_max_contexts=2` – helps explore distantly related content through key entities. + +The `ec_max_score_factor` and `ec_min_score_factor` parameters allow you to filter out 'whales' and 'minnows' in proportion to the significance of the top entity. + +`ec_max_score_factor` controls how prominently high-scoring distant entities appear in the search results. Higher values will include well-connected entities even if they're distantly related. Increase `ec_max_score_factor` when you want to see important entities that aren't directly connected. + +`ec_min_score_factor` controls the inclusion of less significant distant entities. Lower values will result in the inclusion of rarely mentioned entities even if they're distantly related. Decrease `ec_min_score_factor` to find niche or uncommon connections. diff --git a/docs-site/src/content/docs/lexical-graph/traversal-based-search.mdx b/docs-site/src/content/docs/lexical-graph/traversal-based-search.mdx new file mode 100644 index 00000000..7f1e5fa8 --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/traversal-based-search.mdx @@ -0,0 +1,267 @@ +--- +title: Traversal-Based Search +--- + +### Topics + + - [Overview](#overview) + - [Example](#example) + - [Basic concepts](#basic-concepts) + - [Connectivity types](#connectivity-types) + - [Entity network contexts](#entity-network-contexts) + - [Retrievers](#retrievers) + - [Search results](#search-results) + +### Overview + +The recommended method for query and retrieval is to used the traversal-based search operation. While the lexical-graph does include support for semantic-guided search, this alternative approach has several significant drawbacks: + + - High storage costs due to requiring an embedding for each statement + - Poor performance with large datasets, with queries often taking minutes to complete + - Expected to be removed in future releases + +For optimal results, users should use traversal-based search in their applications. + +Traversal-based search can be used in two ways: retrieval and querying. When you perform a retrieval operation, the system searches the graph and vector stores to find the most relevant information related to your query. It then returns these raw search results directly to you. With a query operation, the system takes an extra step. After finding the relevant information, it passes these results to a Large Language Model (LLM). The LLM processes this information and generates a natural language response that answers your query. + +### Example + +The following example performs a traversal-based search using the default settings: + +```python +from graphrag_toolkit.lexical_graph import LexicalGraphQueryEngine +from graphrag_toolkit.lexical_graph.storage import GraphStoreFactory +from graphrag_toolkit.lexical_graph.storage import VectorStoreFactory + +with ( + GraphStoreFactory.for_graph_store( + 'neptune-db://my-graph.cluster-abcdefghijkl.us-east-1.neptune.amazonaws.com' + ) as graph_store, + VectorStoreFactory.for_vector_store( + 'aoss://https://abcdefghijkl.us-east-1.aoss.amazonaws.com' + ) as vector_store +): + + query_engine = LexicalGraphQueryEngine.for_traversal_based_search( + graph_store, + vector_store, + streaming=True + ) + + response = query_engine.query("What are the differences between Neptune Database and Neptune Analytics?") + +print(response.print_response_stream()) +``` + +The parameters used to configure traversal-based search are described in [Traversal-Based Search Configuration](/graphrag-toolkit/lexical-graph/traversal-based-search-configuration/). + +### Basic concepts + +Traversal-based search is a method that employs one or more retrievers to locate information within a lexical graph. This approach leverages two key features of the lexical graph structure: connectivity (both local and global) and entity network contexts. + +#### Connectivity types + +The lexical graph provides both local and global connectivity: + + - *Local Connectivity* Local connectivity enables traversal within a localized network, typically within a single source. This is primarily facilitated by topics, which connect relevant chunks of information within the same source material. + - *Global Connectivity* Global connectivity allows navigation to related components that may be more distant in the graph structure. This is achieved through facts, which create connections across different sources. + +Different retrievers emphasize these connectivity types in varying ways: + + - The `ChunkBasedSearch` retriever primarily utilizes local connectivity + - The `EntityBasedSearch` retriever focuses more on global connectivity + - The `EntityNetworkSearch` retriever balances local and global connectivity + +#### Entity network contexts + +An entity network context consists of a filtered and ranked network of entities that relate to search terms found in the user's query. These contexts serve multiple important functions: + + - *Search Initialization* Provides starting points for entity-based searches in the `EntityBasedSearch` retriever + - *Similarity Searching* Entity network transcriptions – textual representations of the entity network contexts – help find content that differs from but relates to the original query in the `EntityNetworkSearch` retriever + - *Reranking* Entity network transcriptions can be used to enhance the original search terms when reranking statements in search results + - *LLM Integration* Entity network transcriptions can also be provided to Large Language Models (LLMs) during query operations to help focus responses on the most relevant search results + +### Retrievers + +Traversal-based search provides three different retrievers: + + - The `ChunkBasedSearch` retriever uses a vector similarity search to find information that is similar to the original query. The retriever first finds relevant chunks using vector similarity search. From these chunks, the retriever traverses topics, statements, and facts. Chunk-based search tends to return a narrowly-scoped set of results based on the statement and fact neighbourhoods of chunks that match the original query. + - The `EntityBasedSearch` retriever uses as its starting points the entities in an entity network context. From these entities, the retriever traverses facts, statements and topics. Entity-based search tends to return a broadly-scoped set of results, based on the neighbourhoods of individual entities and the facts that connect entities. + - The `EntityNetworkSearch` retriever uses textual transcriptions of an entity network context to drive vector searches for information that is dissimilar to the original query but nonetheless structurally relevant for creating an accurate and full response. These vector searches return chunks that are similar to 'something different from the question being asked'. From these chunks, the retriever traverses topics, statements, and facts to explore the structurally relevant space of dissimilar content. + +By default, the traversal-based search is configured to use a combination of `ChunkBasedSearch` and `EntityNetworkSearch`. Together, these two retrievers provide access to content that is similar to the question being asked, plus content that is similar to 'something different from the question being asked'. + +### Search results + +When used with traversal-based search, the `retrieve()` operation of the `LexicalGraphQueryEngine` returns a collection of LlamaIndex scored nodes (`NodeWithScore`). Each node contains a single search result, comprising a source, topic, and a set of statements. For example, + +```python +response = query_engine.query("What are the differences between Neptune Database and Neptune Analytics?") + +for n in response.source_nodes: + print(n.text) +``` + + – returns the following output: + +``` +{ + "source": "https://docs.aws.amazon.com/neptune-analytics/latest/userguide/neptune-analytics-features.html", + "topic": "Neptune Analytics Features", + "statements": [ + "Neptune Analytics allows loading graph data from a Neptune Database endpoint.", + "Neptune Analytics enables running graph analytics queries.", + "Neptune Analytics allows loading graph data from Amazon S3.", + "Neptune Analytics supports custom graph queries.", + "Neptune Analytics supports pre-built graph queries." + ] +} +{ + ... +} +``` + +The `metadata` property of each node contains a dictionary with a far more detailed breakdown of the search result. This includes the score for each statement, the facts that support each statement, the retrievers used to fetch each statement, and the entity network contexts used in the query. For example, + +```python +import json +for n in response.source_nodes: + print(json.dumps(n.metadata, indent=2)) +``` + + – returns the following output: + +``` +{ + "result": { + "source": { + "sourceId": "aws::4510583f:e412", + "metadata": { + "url": "https://docs.aws.amazon.com/neptune-analytics/latest/userguide/neptune-analytics-features.html" + } + }, + "topics": [ + { + "topic": "Neptune Analytics Features", + "topicId": "fbbde2f69acd195da90e578d0f9eeefe", + "statements": [ + { + "statementId": "810a8ac6943708e1584662b32431eb67", + "statement": "Neptune Analytics allows loading graph data from a Neptune Database endpoint.", + "facts": [ + "Neptune Analytics FEATURE loading graph data", + "Neptune Analytics SUPPORTS LOADING FROM Neptune Database" + ], + "details": "", + "chunkId": "aws::4510583f:e412:9f69cb6f", + "score": 0.3187, + "statement_str": "Neptune Analytics allows loading graph data from a Neptune Database endpoint. (details: Neptune Analytics FEATURE loading graph data, Neptune Analytics SUPPORTS LOADING FROM Neptune Database)", + "retrievers": [ + "ChunkBasedSearch (3.12.0)" + ] + }, + { + "statementId": "797021c7c33db8674fa0be42a1cdd9a6", + "statement": "Neptune Analytics enables running graph analytics queries.", + "facts": [ + "Neptune Analytics FEATURE running graph analytics queries" + ], + "details": "", + "chunkId": "aws::4510583f:e412:9f69cb6f", + "score": 0.2233, + "statement_str": "Neptune Analytics enables running graph analytics queries. (details: Neptune Analytics FEATURE running graph analytics queries)", + "retrievers": [ + "ChunkBasedSearch (3.12.0)" + ] + }, + { + "statementId": "23deac383344021ed50e1c78448408a8", + "statement": "Neptune Analytics allows loading graph data from Amazon S3.", + "facts": [ + "Neptune Analytics FEATURE loading graph data", + "Neptune Analytics SUPPORTS LOADING FROM Amazon S3" + ], + "details": "", + "chunkId": "aws::4510583f:e412:9f69cb6f", + "score": 0.2197, + "statement_str": "Neptune Analytics allows loading graph data from Amazon S3. (details: Neptune Analytics FEATURE loading graph data, Neptune Analytics SUPPORTS LOADING FROM Amazon S3)", + "retrievers": [ + "ChunkBasedSearch (3.12.0)" + ] + }, + { + "statementId": "85a4ea712a9a83fb4ac7f441be72e694", + "statement": "Neptune Analytics supports custom graph queries.", + "facts": [ + "Neptune Analytics FEATURE custom graph queries" + ], + "details": "", + "chunkId": "aws::4510583f:e412:9f69cb6f", + "score": 0.199, + "statement_str": "Neptune Analytics supports custom graph queries. (details: Neptune Analytics FEATURE custom graph queries)", + "retrievers": [ + "ChunkBasedSearch (3.12.0)" + ] + }, + { + "statementId": "3a480d6a686748a628009de3cd8238ed", + "statement": "Neptune Analytics supports pre-built graph queries.", + "facts": [ + "Neptune Analytics FEATURE pre-built graph queries" + ], + "details": "", + "chunkId": "aws::4510583f:e412:9f69cb6f", + "score": 0.1857, + "statement_str": "Neptune Analytics supports pre-built graph queries. (details: Neptune Analytics FEATURE pre-built graph queries)", + "retrievers": [ + "ChunkBasedSearch (3.12.0)" + ] + } + ] + } + ] + }, + "entity_contexts": { + "contexts": [ + { + "entities": [ + { + "entity": { + "entityId": "19ad98dc563a3a3c935d93723d3c9029", + "value": "Neptune Analytics", + "classification": "Software" + }, + "score": 37.0, + "reranking_score": 0.5025 + }, + { + "entity": { + "entityId": "ecc28e0aba278f8803bfbc5ae162831a", + "value": "Neptune", + "classification": "Software" + }, + "score": 10.0, + "reranking_score": 0.0 + } + ] + }, + { + "entities": [ + { + "entity": { + "entityId": "51874c430e9cb1f5b09d790049d5380d", + "value": "Neptune Database", + "classification": "Software" + }, + "score": 5.0, + "reranking_score": 0.5025 + } + ] + } + ] + } +} +{ + ... +} +``` diff --git a/docs-site/src/content/docs/lexical-graph/vector-store-neptune-analytics.mdx b/docs-site/src/content/docs/lexical-graph/vector-store-neptune-analytics.mdx new file mode 100644 index 00000000..c853f787 --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/vector-store-neptune-analytics.mdx @@ -0,0 +1,27 @@ +--- +title: Neptune Analytics Vector Store +--- + +### Topics + + - [Overview](#overview) + - [Creating a Neptune Analytics vector store](#creating-a-neptune-analytics-vector-store) + +### Overview + +You can use Amazon Neptune Analytics as a vector store. + +### Creating a Neptune Analytics vector store + +Use the `VectorStoreFactory.for_vector_store()` static factory method to create an instance of an Amazon Neptune Analytics vector store. + +To create a Neptune Analytics vector store, supply a connection string that begins `neptune-graph://`, followed by the graph's identifier: + +```python +from graphrag_toolkit.lexical_graph.storage import VectorStoreFactory + +neptune_connection_info = 'neptune-graph://g-jbzzaqb209' + +with VectorStoreFactory.for_vector_store(neptune_connection_info) as vector_store: + ... +``` diff --git a/docs-site/src/content/docs/lexical-graph/vector-store-opensearch-serverless.mdx b/docs-site/src/content/docs/lexical-graph/vector-store-opensearch-serverless.mdx new file mode 100644 index 00000000..87f04334 --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/vector-store-opensearch-serverless.mdx @@ -0,0 +1,129 @@ +--- +title: OpenSearch Serverless Vector Store +--- + +### Topics + + - [Overview](#overview) + - [Install dependencies](#install-dependencies) + - [Creating an OpenSearch Serverless vector store](#creating-an-opensearch-serverless-vector-store) + - [Amazon OpenSearch Serverless and custom document IDs](#amazon-opensearch-serverless-and-custom-document-ids) + - [Verify and repair an Amazon OpenSearch Serverless vector store](#verify-and-repair-an-amazon-opensearch-serverless-vector-store) + +### Overview + +You can use an Amazon OpenSearch Serverless collection as a vector store. + +### Install dependencies + +The OpenSeacrh vector store requires both the `opensearch-py` and `llama-index-vector-stores-opensearch` packages: + +``` +pip install opensearch-py llama-index-vector-stores-opensearch +``` + +### Creating an OpenSearch Serverless vector store + +Use the `VectorStoreFactory.for_vector_store()` static factory method to create an instance of an Amazon OpenSearch Serverless vector store. + +To create an Amazon OpenSearch Serverless vector store, supply a connection string that begins `aoss://`, followed by the https endpoint of the OpenSearch Serverless collection: + +```python +from graphrag_toolkit.lexical_graph.storage import VectorStoreFactory + +opensearch_connection_info = 'aoss://https://123456789012.us-east-1.aoss.amazonaws.com' + +with VectorStoreFactory.for_vector_store(opensearch_connection_info) as vector_store: + ... +``` + +### Amazon OpenSearch Serverless and custom document IDs + +Amazon OpenSearch Serverless vector search collections do not allow documents to be indexed by a custom document ID, or updated by upsert requests. Internally, Amazon OpenSearch Serverless creates a unique document ID for each index action. This means that if the same document is indexed twice, there will be two separate entris in a collection. + +Version 3.10.3 of the toolkit introduces a step into the bulk indexing process that checks whether a document has already been indexed. If it has, the process ignores the request to (re)index that particular document. Further, if the check determines that the document has already been indexed multiple times in the vector store, it deletes the redundant copies from teh store. + +#### Verify and repair an Amazon OpenSearch Serverless vector store + +3.10.3 introduces a [command-line tool](https://github.com/awslabs/graphrag-toolkit/blob/main/examples/lexical-graph/scripts/repair_opensearch_vector_store.py) that you can use to verify and repair an Amazon OpenSearch Serverless vector store. Download [repair_opensearch_vector_store.py](https://github.com/awslabs/graphrag-toolkit/blob/main/examples/lexical-graph/scripts/repair_opensearch_vector_store.py) and run the folliwng command: + +``` +$ python repair_opensearch_vector_store.py --graph-store --vector-store --dry-run +``` + +The `--dry-run` flag above allows you to run the tool and see what repairs are necessary without actually modifying the indexes. Remove the `--dry-run` flag to repair (delete duplicate documents from) the vector store. + +The tool has the following parameters: + +| Parameter | Description | Mandatory | Default | +| ------------- | ------------- | ------------- | ------------- | +| `--graph-store` | Graph store connection info (for example `neptune-db://mydbcluster.cluster-123456789012.us-east-1.neptune.amazonaws.com:8182`) | Yes | – | +| `--vector-store` | Vector store connection info (for example `aoss://https://123456789012.us-east-1.aoss.amazonaws.com`) | Yes | – | +| `--tenant-ids` | Space-separated list of tenant ids to check | No | All tenants | +| `--batch-size` | Number of OpenSearch documents to check with each request to OpenSearch | No | 1000 | +| `--dry-run` | Verify the store, but do not repair (delete any duplicates from) the store | No | Tool deletes duplicate documents from the vector store | + +The tool returns results in the following format: + +``` +{ + "duration_seconds": 16, + "dry_run": false, + "totals": { + "total_node_ids": 15354, + "total_doc_ids": 15354, + "total_deleted_doc_ids": 0, + "total_unindexed": 0 + }, + "results": [ + { + "tenant_id": "default_", + "index": "chunk", + "num_nodes": 17, + "num_docs": 17, + "num_deleted": 0, + "num_unindexed": 0 + }, + { + "tenant_id": "default_", + "index": "statement", + "num_nodes": 211, + "num_docs": 211, + "num_deleted": 0, + "num_unindexed": 0 + }, + { + "tenant_id": "local", + "index": "chunk", + "num_nodes": 1, + "num_docs": 1, + "num_deleted": 0, + "num_unindexed": 0 + }, + { + "tenant_id": "local", + "index": "statement", + "num_nodes": 26, + "num_docs": 26, + "num_deleted": 0, + "num_unindexed": 0 + } + ] +} +``` + +Field descriptions: + +| Field | Description | +| ------------- | ------------- | +| `dry_run` | `true` - Duplicate docs not actually deleted from vector store (the number of deleted docs in the results are indicative of the numbers that would have been deleted); `false` - Duplicate docs will have been deleted from the vector store. | +| `total_node_ids` | Total number of indexable nodes in the graph | +| `total_doc_ids` | Total number of documents in the vector store | +| `total_deleted_doc_ids` | Total number of documents deleted from vector store (indicative number only if `dry_run` is `true`) | +| `total_unindexed` | Total number of nodes that have not been indexed | +| `tenant_id` | Tenant id (the default tenant is `default_`) | +| `index` | Index name | +| `num_nodes` | Number of indexable nodes in a specific tenant graph | +| `num_docs` | Number of documents in a specific tenant vector index | +| `num_deleted` | Number of documents deleted from a specific tenant vector index (indicative number only if `dry_run` is `true`) | +| `num_unindexed` | Number of nodes that have not been indexed in a specific tenant vector index | diff --git a/docs-site/src/content/docs/lexical-graph/vector-store-postgres.mdx b/docs-site/src/content/docs/lexical-graph/vector-store-postgres.mdx new file mode 100644 index 00000000..2f07724b --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/vector-store-postgres.mdx @@ -0,0 +1,48 @@ +--- +title: Postgres Vector Store +--- + +### Topics + + - [Overview](#overview) + - [Install dependencies](#install-dependencies) + - [Creating Postgres vector store](#creating-a-postgres-vector-store) + - [Connecting to an IAM auth-enabled Postgres vector store](#connecting-to-an-iam-auth-enabled-postgres-vector-store) + +### Overview + +You can use a Postgres database with the [pgvector](https://github.com/pgvector/pgvector) extension as a vector store. + +### Install dependencies + +The Postgres vector store requires both the `psycopg2` and `pgvector` packages: + +``` +pip install psycopg2-binary pgvector +``` + +### Creating a Postgres vector store + +Use the `VectorStoreFactory.for_vector_store()` static factory method to create an instance of a Postgres vector store. + +To create a Postgres vector store, supply a connection string in the following format: + +``` +postgresql://[user[:password]@][netloc][:port][/dbname][?param1=value1&...] +``` + +For example: + +``` +postgresql://graphrag:!zfg%dGGh@mydbcluster.cluster-123456789012.us-west-2.rds.amazonaws.com:5432/postgres +``` + +#### Connecting to an IAM auth-enabled Postgres vector store + +If your Postgres database supports [AWS Identity and Access Management (IAM) database authentication](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html), omit the password, and add `enable_iam_db_auth=True` to the connection string query parameters: + +``` +postgresql://graphrag@mydbcluster.cluster-123456789012.us-west-2.rds.amazonaws.com:5432/postgres?enable_iam_db_auth=True +``` + +You will need to create a database user, and [grant the `rds_iam` role](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.DBAccounts.html#UsingWithRDS.IAMDBAuth.DBAccounts.PostgreSQL) to use IAM authentication. diff --git a/docs-site/src/content/docs/lexical-graph/vector-store-s3-vectors.mdx b/docs-site/src/content/docs/lexical-graph/vector-store-s3-vectors.mdx new file mode 100644 index 00000000..1a92dc88 --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/vector-store-s3-vectors.mdx @@ -0,0 +1,96 @@ +--- +title: S3 Vectors Vector Store +--- + +### Topics + + - [Overview](#overview) + - [Creating an S3 Vectors vector store](#creating-an-s3-vectors-vector-store) + - [Connection string parameters](#connection-string-parameters) + - [IAM permissions required to use Amazon S3 Vectors as a vector store](#iam-permissions-required-to-use-amazon-s3-vectors-as-a-vector-store) + - [Indexing](#indexing) + - [Querying](#querying) + +### Overview + +You can use [Amazon S3 Vectors](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-vectors.html) as a vector store. + +### Creating an S3 Vectors vector store + +Use the `VectorStoreFactory.for_vector_store()` static factory method to create an instance of an Amazon S3 Vectors vector store. + +To create an Amazon S3 Vectors store, supply a connection string in the following format: + +``` +s3vectors://[/] +``` + +For example: + +```python +from graphrag_toolkit.lexical_graph.storage import VectorStoreFactory + +s3_vectors_connection_info = 's3vectors://my-s3-vectors-bucket/app1' + +with VectorStoreFactory.for_vector_store(s3_vectors_connection_info) as vector_store: + ... +``` + +#### Connection string parameters + +The connection string includes two parameters: + +##### `bucket_name` + +Mandatory. Name of an Amazon S3 vector bucket in the same AWS Region as the application running the graphrag-toolkit. If the vector bucket does not already exist, the indexing process will create a new bucket. + +##### `index_prefix` + +Optional. Prefix to be attached to the name of each index created by the indexing process. Prefixes allow you to store indexes created by different graphrag-toolkit applications in the same vector bucket. + +Imagine an application with two [tenants](/graphrag-toolkit/lexical-graph/multi-tenancy/) - the default tenant, and an `admin` tenant - and a connection to an S3 Vectors vector store that uses the following connection string: + +``` +s3vectors://my-s3-vectors-bucket +``` + +Because the vector store connection string is configured with a bucket name only, the application will create the following chunk indexes: + + - `chunk` + - `chunk-admin` + +If the connection string includes a prefix, like this - + +``` +s3vectors://my-s3-vectors-bucket/app1 +``` + +the application will create the following chunk indexes: + + - `app1.chunk` + - `app1.chunk-admin` + +### IAM permissions required to use Amazon S3 Vectors as a vector store + +#### Indexing + +The identity under which the graphrag-toolkit's indexing process runs requires the following IAM permissions: + + - `s3Vectors:GetVectorBucket` + - `s3Vectors:CreateVectorBucket` + - `s3Vectors:GetIndex` + - `s3Vectors:CreateIndex` + - `s3Vectors:DeleteVectors` + - `s3Vectors:GetVectors` + - `s3Vectors:PutVectors` + +#### Querying + +The identity under which the graphrag-toolkit's querying process runs requires the following IAM permissions: + + - `s3Vectors:GetVectorBucket` + - `s3Vectors:GetIndex` + - `s3Vectors:QueryVectors` + - `s3Vectors:GetVectors` + +See [Identity and Access management in S3 Vectors](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-vectors-access-management.html) for more details on AWS security best practices for S3 Vectors. diff --git a/docs-site/src/content/docs/lexical-graph/versioned-updates.mdx b/docs-site/src/content/docs/lexical-graph/versioned-updates.mdx new file mode 100644 index 00000000..3aa1f342 --- /dev/null +++ b/docs-site/src/content/docs/lexical-graph/versioned-updates.mdx @@ -0,0 +1,718 @@ +--- +title: Versioned Updates +--- + +### Topics + + - [Overview](#overview) + - [Document subgraphs](#document-subgraphs) + - [Stable document identities](#stable-document-identities) + - [Using versioned updates](#using-versioned-updates) + - [Indexing](#indexing) + - [Querying](#querying) + - [Combining versioned updates with metadata filtering](#combining-versioned-updates-with-metadata-filtering) + - [Example](#example) + - [Inspecting source metadata](#inspecting-source-metadata) + - [Get details of all source nodes](#get-details-of-all-source-nodes) + - [Get details of all current source nodes](#get-details-of-all-current-source-nodes) + - [Get details of all previous source nodes](#get-details-of-all-previous-source-nodes) + - [Get details of previous versions of files with specific metadata](#get-details-of-previous-versions-of-files-with-specific-metadata) + - [Deleting documents](#deleting-documents) + - [Deleting documents by source id](#deleting-documents-by-source-id) + - [Deleting all previous versions of a document](#deleting-all-previous-versions-of-a-document) + - [Deleting a document with version-specific metadata](#deleting-a-document-with-version-specific-metadata) + - [Automatically delete versioned documents](#automatically-delete-versioned-documents) + - [Implementing deletion protection](#implementing-deletion-protection) + - [Upgrading existing graph and vector stores](#upgrading-existing-graph-and-vector-stores) + - [Upgrading specific tenants](#upgrading-specific-tenants) + - [Upgrading specific vector indexes](#upgrading-specific-vector-indexes) + +### Overview + +The graphrag-toolkit allows you to version source documents along a single timeline based on _extraction_ timestamps. Using this versioned update feature, if you re-ingest a document whose contents and/or metadata have changed since it was last extracted, any old documents will be archived, and the newly ingested document treated as the current version of the source document. + +### Document subgraphs + +The `(source)<--(chunk)<--(topic)<--(statement)` part of the lexical graph model represents a bounded document subgraph. The id of a source node is a function of the metadata and textual contents of a source document. The ids of chunks, topics, and statements are in turn a function of the source id. If the metadata and/or contents of a source document change, and the document is reprocessed, the source will be assigned a different id – and so will all the chunks, topics and statements that derive from that source. + +![Versionable Subgraph](../../../content/images/versionable-unit.png) + +This means that if you extract two different versions of a document (i.e. versions with different contents and/or metadata) at different times, you'll end up with two different bounded document subgraphs: two source nodes, and then independent `(chunk)<--(topic)<--(statement)` subgraphs beneath each of those source nodes. If the toolkit's versioning feature is enabled, the last version of the document to be extracted will be treated as the current version, and all other versions marked as historical, archived versions. + +#### Stable document identities + +For a document to be versioned in this manner, there must be some way of specifying that different sets of text and metadata represent _different_ versions of the _same_ document. In other words, the document must have a stable identity, independent of variations in content and/or metadata. + +The graphrag-toolkit uses a concept of _version-independent metadata fields_ to represent this stable identify. When you index a document, you can specify which of that document's metadata fields represent its stable identify. For example, if a document has `title`, `author` and `last_updated` metadata fields, you might specify that a combination of the `title` and `author` metadata fields represent that document's stable identify. When the document is indexed, any previously indexed, non-versioned documents whose `title` and `author` field _values_ match those of the newly ingested document will be archived. + +> **Important** Which metadata fields you choose to represent different documents' stable identities will have a big impact on the versioning of documents. A set of specific version-independent field values should match all versions of a specific document, without including any other documents. A URI is often sufficient to uniquely identify a web page, whereas a filename may not always uniquely identify a file – there are lots of files named `readme.md`, for example. If a set of version-independent metadat fields is too permissive, you risk versioning – or worse, deleting – the wrong documents. If in doubt, consider adding a synthetic document id metadata field to each document that you index. + +### Using versioned updates + +If you have an existing graph and vector store built by a version of the graphrag-toolkit prior to version 3.14, you will need to upgrade them first. See [Upgrading existing graph and vector stores](#upgrading-existing-graph-and-vector-stores). + +#### Indexing + +Indexed documents are versioned based on _extraction_ timestamps. A document will be `valid_from` the timestamp when it was extracted. If a different version of the document is subsequently indexed, the old version will be considered `valid_to` the extraction timestamp of the new version. + +When _extracting_ data (using `LexicalGraphIndex.extract()` or `LexicalGraphIndex.extract_and_build()`), you must add the names of _version-independent metadata fields_ to the metadata of each document you want to update and version. Use the `add_versioning_info` helper to do this ([`versioning.py:35`](https://github.com/awslabs/graphrag-toolkit/blob/main/lexical-graph/src/graphrag_toolkit/lexical_graph/versioning.py#L35)): + +```python +from graphrag_toolkit.lexical_graph import add_versioning_info + +metadata = add_versioning_info( + metadata={}, + id_fields=['url'], # metadata fields that together identify this document across versions + valid_from=1761899971000 # optional: unix timestamp (ms) when this version became valid +) +``` + +Both `id_fields` and `valid_from` are optional. `id_fields` accepts a string or a list of strings. + +When _building_ a lexical graph (using `LexicalGraphIndex.build()` or `LexicalGraphIndex.extract_and_build()`), you must enable versioning, using either the `GraphRAGConfig.enable_versioning=True` global configuration parameter, or by passing a `BuildConfig(enable_versioning=True)` configuration object to the `LexicalGraphIndex` constructor, or by passing `enable_versioning=True` to the `LexicalGraphIndex.build()` or `LexicalGraphIndex.extract_and_build()` methods. + +The presence of `enable_versioning=True` forces the build process to check for previous versions of each document, as identified by the version-independent metadata fields supplied in the extract stage. + +The following example uses `LexicalGraphIndex.extract_and_build()` to extract data from a local directory and build a lexical graph. The `get_file_metadata()` function wraps the metadata produced by the `default_file_metadata_func()` to indicate that the `file_name` and `file_path` metadata fields together act as a version-independent identifier: + +```python +import os + +from graphrag_toolkit.lexical_graph import LexicalGraphIndex, GraphRAGConfig +from graphrag_toolkit.lexical_graph import add_versioning_info +from graphrag_toolkit.lexical_graph.storage import GraphStoreFactory +from graphrag_toolkit.lexical_graph.storage import VectorStoreFactory + +from llama_index.core import SimpleDirectoryReader +from llama_index.core.readers.file.base import default_file_metadata_func + +GraphRAGConfig.enable_versioning = True + +def get_file_metadata(file_path): + metadata = default_file_metadata_func(file_path) + return add_versioning_info(metadata, id_fields=['file_name', 'file_path']) + +with( + GraphStoreFactory.for_graph_store(os.environ['GRAPH_STORE']) as graph_store, + VectorStoreFactory.for_vector_store(os.environ['VECTOR_STORE']) as vector_store +): + graph_index = LexicalGraphIndex( + graph_store, + vector_store + ) + + reader = SimpleDirectoryReader(input_dir='./my_docs/', file_metadata=get_file_metadata) + + docs = reader.load_data() + graph_index.extract_and_build(docs) +``` + +The example above uses `GraphRAGConfig.enable_versioning = True` to force versioning checks during the build stage. Alternatively, you can supply a `BuildConfig` object: + +```python +from graphrag_toolkit.lexical_graph import LexicalGraphIndex, BuildConfig + +graph_index = LexicalGraphIndex( + graph_store, + vector_store, + indexing_config=BuildConfig(enable_versioning=True) +) +``` + +Or you can pass an `enable_versioning=True` keyword argument to the build method: + +```python +graph_index.extract_and_build( + docs, + enable_versioning=True +) +``` + +##### Do I need to specify id fields for every document? + +No. You only need to specify the names of version-independent metadata fields for documents you wish to update and version. You don't have to specify these fields the first time you index a document (though you can), only when you re-index a document. + +If you anticipate using the versioned updates feature, you should take care to ensure that all documents that might be updated and versioned in the future carry metadata fields that can act as version-independent metadata fields. You cannot add to or modify the metadata attached to a source document once it has been added to the lexical graph. This means you should plan ahead when ingesting data that might be versioned in the future. + +#### Querying + +To take advantage of the versioned updates feature when querying a lexical graph, you must use either the `GraphRAGConfig.enable_versioning=True` global configuration parameter or the `versioning` keyword argument when creating a `LexicalGraphQueryEngine`. The `versioning` keyword argument accepts either a boolean, or a `VersioningConfig` object. The latter allows you to specify an historical timestamp, so that you can query the state of the graph at a particular point in time. + +If you do not specify that versioning is to be used, the query engine will generate a response that ignores all versioning information in the lexical graph. + +The following example uses `GraphRAGConfig.enable_versioning = True` to query against the current state of the lexical graph: + +```python +import os + +from graphrag_toolkit.lexical_graph import LexicalGraphQueryEngine, GraphRAGConfig + +GraphRAGConfig.enable_versioning = True + +with( + GraphStoreFactory.for_graph_store(os.environ['GRAPH_STORE']) as graph_store, + VectorStoreFactory.for_vector_store(os.environ['VECTOR_STORE']) as vector_store +): + query_engine = LexicalGraphQueryEngine.for_traversal_based_search( + graph_store, + vector_store + ) + + response = query_engine.query('Which instance families are available for Amazon Neptune?') +``` + +The next example uses the `versioning=True` keyword argument supplied to a `LExicalGraphQueryEngine` to query against the current state of the lexical graph: + +```python +import os + +from graphrag_toolkit.lexical_graph import LexicalGraphQueryEngine + +with( + GraphStoreFactory.for_graph_store(os.environ['GRAPH_STORE']) as graph_store, + VectorStoreFactory.for_vector_store(os.environ['VECTOR_STORE']) as vector_store +): + query_engine = LexicalGraphQueryEngine.for_traversal_based_search( + graph_store, + vector_store, + versioning=True + ) + + response = query_engine.query('Which instance families are available for Amazon Neptune?') +``` + +The following example uses a `VersioningConfig` object to query against the historical state of the lexical graph at a specific point in time: + +```python +import os + +from graphrag_toolkit.lexical_graph import LexicalGraphQueryEngine, VersioningConfig + +GraphRAGConfig.enable_versioning = True + +with( + GraphStoreFactory.for_graph_store(os.environ['GRAPH_STORE']) as graph_store, + VectorStoreFactory.for_vector_store(os.environ['VECTOR_STORE']) as vector_store +): + query_engine = LexicalGraphQueryEngine.for_traversal_based_search( + graph_store, + vector_store, + versioning=VersioningConfig(at_timestamp=1761899971500) + ) + + response = query_engine.query('Which instance families are available for Amazon Neptune?') +``` + +#### Combining versioned updates with metadata filtering + +You can combine versioned updates with metadata filtering. Metadata filtering allows you to filter documents based on domain-specific metadata that you control; versioning allows you to filter documents based on their history along the extraction timeline. + +```python +import os + +from graphrag_toolkit.lexical_graph import LexicalGraphQueryEngine +from graphrag_toolkit.lexical_graph.metadata import FilterConfig +from llama_index.core.vector_stores.types import FilterOperator, MetadataFilter + +with( + GraphStoreFactory.for_graph_store(os.environ['GRAPH_STORE']) as graph_store, + VectorStoreFactory.for_vector_store(os.environ['VECTOR_STORE']) as vector_store +): + query_engine = LexicalGraphQueryEngine.for_traversal_based_search( + graph_store, + vector_store, + versioning=True, + filter_config = FilterConfig( + MetadataFilter( + key='url', + value='https://docs.aws.amazon.com/neptune/latest/userguide/intro.html', + operator=FilterOperator.EQ + ) + ) + ) + + response = query_engine.query('What are the differences between Neptune Database and Neptune Analytics?') +``` + +### Example + +The following diagram shows four rounds of extraction: + +![Versioning](../../../content/images/versioning-1.png) + +Documents are inserted and versioned in the following order: + +| extraction timestamp | source id | metadata | version independent fields | replaces | +| --- | --- | --- | --- | --- | +| 1761899971000 | s1 | `{'doc_id': 'D1', 'revision': 1}` | | | +| | s2 | `{'title': 'T1', 'app': 'app_01', 'month': '06'}` | | | +| | s3 | `{'url': 'http://xyz', 'accessed': 'Mon'}` | | | +| 1761899972000 | s4 | `{'title': 'T1', 'app': 'app_01', 'month': '07'}` | `['title', 'app']` | s2 | +| | s5 | `{'url': 'http://xyz', 'accessed': 'Tues'}` | `['url']` | s3 | +| 1761899973000 | s6 | `{'url': 'http://xyz', 'accessed': 'Wed'}` | `['url']` | s5 | +| | s7 | `{'doc_id': 'D1', 'revision': 2}` | `['doc_id']` | s1 | +| 1761899974000 | s8 | `{'doc_id': 'D2', 'revision': 1}` | `['doc_id']` | | +| | s9 | `{'url': 'http://xyz', 'accessed': 'Mon'}` | `['url']` | s6 | + +#### Querying current documents + +At the end of these four rounds of extraction, the documents s7, s4, s8 and s9 are considered current: + +![Current](../../../content/images/versioning-2.png) + +#### Querying at a point in time + +If we were to query at timestamp 1761899972500, documents s1, s4 and s5 would be considered current: + +![Historical](../../../content/images/versioning-3.png) + +### Inspecting source metadata + +You can inspect the metadata and versioining infomration attached to source nodes using the `LexicalGraphIndex.get_sources()` method. + +#### Get details of all source nodes + +```python +import os +import json + +from graphrag_toolkit.lexical_graph import LexicalGraphIndex +from graphrag_toolkit.lexical_graph.storage import GraphStoreFactory +from graphrag_toolkit.lexical_graph.storage import VectorStoreFactory + +with ( + GraphStoreFactory.for_graph_store(os.environ['GRAPH_STORE']) as graph_store, + VectorStoreFactory.for_vector_store(os.environ['VECTOR_STORE']) as vector_store +): + + graph_index = LexicalGraphIndex( + graph_store, + vector_store, + tenant_id='tenant123' # optional - uses default tenant if not specified + ) + + sources = graph_index.get_sources() + + print(json.dumps(sources, indent=2)) +``` + +The results are formatted like this: + +```json +[ + { + "metadata": { + "file_path": "/home/myuser/docs/readme.md", + "creation_date": "2025-12-16T00:00:00.000Z", + "file_name": "readme.md", + "title": "How to play", + "file_size": 93, + "last_modified_date": "2025-12-16T00:00:00.000Z", + "file_type": "text/markdown", + "version": "v1" + }, + "versioning": { + "build_timestamp": 1765880067513, + "id_fields": [ + "file_name", + "title" + ], + "valid_from": 1761899971000, + "valid_to": 1761899972000, + "extract_timestamp": 1765880063557 + }, + "sourceId": "aws:tenant123:31141440:6de6" + }, + { + "metadata": { + "file_path": "/home/myuser/docs/readme.md", + "creation_date": "2025-12-16T00:00:00.000Z", + "file_name": "readme.md", + "title": "How to play", + "file_size": 91, + "last_modified_date": "2025-12-16T00:00:00.000Z", + "file_type": "text/markdown", + "version": "v2" + }, + "versioning": { + "build_timestamp": 1765880102994, + "id_fields": [ + "file_name", + "title" + ], + "valid_from": 1761899972000, + "valid_to": 1761899973000, + "extract_timestamp": 1765880098515 + }, + "sourceId": "aws:tenant123:34570f12:0726" + }, + { + "metadata": { + "file_path": "/home/myuser/docs/readme.md", + "creation_date": "2025-12-16T00:00:00.000Z", + "file_name": "readme.md", + "title": "How to play", + "file_size": 93, + "last_modified_date": "2025-12-16T00:00:00.000Z", + "file_type": "text/markdown", + "version": "v3" + }, + "versioning": { + "build_timestamp": 1765880173432, + "id_fields": [ + "file_name", + "title" + ], + "valid_from": 1761899973000, + "valid_to": 1761899974000, + "extract_timestamp": 1765880166001 + }, + "sourceId": "aws:tenant123:07ca52e6:8960" + }, + { + "metadata": { + "file_path": "/home/myuser/docs/readme.md", + "creation_date": "2025-12-16T00:00:00.000Z", + "file_name": "readme.md", + "title": "How to play", + "file_size": 83, + "last_modified_date": "2025-12-16T00:00:00.000Z", + "file_type": "text/markdown", + "version": "v4" + }, + "versioning": { + "build_timestamp": 1765880242134, + "id_fields": [ + "file_name", + "title" + ], + "valid_from": 1761899974000, + "valid_to": 10000000000000, + "extract_timestamp": 1765880236433 + }, + "sourceId": "aws:tenant123:7a54612d:57b8" + } +] +``` + +#### Get details of all current source nodes + +```python +import os +import json + +from graphrag_toolkit.lexical_graph import LexicalGraphIndex +from graphrag_toolkit.lexical_graph.storage import GraphStoreFactory +from graphrag_toolkit.lexical_graph.storage import VectorStoreFactory +from graphrag_toolkit.lexical_graph.versioning import VersioningConfig, VersioningMode + +with ( + GraphStoreFactory.for_graph_store(os.environ['GRAPH_STORE']) as graph_store, + VectorStoreFactory.for_vector_store(os.environ['VECTOR_STORE']) as vector_store +): + + graph_index = LexicalGraphIndex( + graph_store, + vector_store, + tenant_id='tenant123' # optional - uses default tenant if not specified + ) + + versioning_config = VersioningConfig(versioning_mode=VersioningMode.CURRENT) + + sources = graph_index.get_sources(versioning_config=versioning_config) + + print(json.dumps(sources, indent=2)) +``` + +#### Get details of all previous source nodes + +```python +import os +import json + +from graphrag_toolkit.lexical_graph import LexicalGraphIndex +from graphrag_toolkit.lexical_graph.storage import GraphStoreFactory +from graphrag_toolkit.lexical_graph.storage import VectorStoreFactory +from graphrag_toolkit.lexical_graph.versioning import VersioningConfig, VersioningMode + +with ( + GraphStoreFactory.for_graph_store(os.environ['GRAPH_STORE']) as graph_store, + VectorStoreFactory.for_vector_store(os.environ['VECTOR_STORE']) as vector_store +): + + graph_index = LexicalGraphIndex( + graph_store, + vector_store, + tenant_id='tenant123' # optional - uses default tenant if not specified + ) + + versioning_config = VersioningConfig(versioning_mode=VersioningMode.PREVIOUS) + + sources = graph_index.get_sources(versioning_config=versioning_config) + + print(json.dumps(sources, indent=2)) +``` + +#### Get details of previous versions of files with specific metadata + +```python +import os +import json + +from graphrag_toolkit.lexical_graph import LexicalGraphIndex +from graphrag_toolkit.lexical_graph.storage import GraphStoreFactory +from graphrag_toolkit.lexical_graph.storage import VectorStoreFactory +from graphrag_toolkit.lexical_graph.versioning import VersioningConfig, VersioningMode + +with ( + GraphStoreFactory.for_graph_store(os.environ['GRAPH_STORE']) as graph_store, + VectorStoreFactory.for_vector_store(os.environ['VECTOR_STORE']) as vector_store +): + + graph_index = LexicalGraphIndex( + graph_store, + vector_store, + tenant_id='tenant123' # optional - uses default tenant if not specified + ) + + versioning_config = VersioningConfig(versioning_mode=VersioningMode.PREVIOUS) + + sources = graph_index.get_sources( + filter={ + 'file_name': 'readme.md', + 'title': 'How to play' + }, + versioning_config=versioning_config + ) + + print(json.dumps(sources, indent=2)) +``` + +### Deleting documents + +You can deleted individual document subgraphs using the `LexcialGraphIndex.delete_sources()` method. + +> **WARNING** Deleting documents is a destructive action: document subgraphs will be physically removed from the graph store and their embeddings from the vector store. You can use the `LexcialGraphIndex.get_sources()` method to validate the sources that will be deleted before running `delete_sources()`. As an extra precaution, consider backing up your graph and vector stores prior to initiating a delete. Backup processes for the different graph and vector store backends are out-of-scope for the toolkit. + +`delete_sources()` has the same signature as `get_sources()`. You can use `get_sources()` to review which document versions will be deleted before running `delete_sources()`. + +When a versioned document is deleted, its source node, together with all its chunk, topic and statement nodes, are deleted from the lexical graph. The delete process will also remove any orphaned facts and entities that are no longer connected to at least one document. + +#### Deleting documents by source id + +```python +import os +import json + +from graphrag_toolkit.lexical_graph import LexicalGraphIndex +from graphrag_toolkit.lexical_graph.storage import GraphStoreFactory +from graphrag_toolkit.lexical_graph.storage import VectorStoreFactory + +with ( + GraphStoreFactory.for_graph_store(os.environ['GRAPH_STORE']) as graph_store, + VectorStoreFactory.for_vector_store(os.environ['VECTOR_STORE']) as vector_store +): + + graph_index = LexicalGraphIndex( + graph_store, + vector_store, + tenant_id='tenant123' # optional - uses default tenant if not specified + ) + + deleted = graph_index.delete_sources(source_ids=[ + 'aws:tenant123:31141440:6de6', + 'aws:tenant123:34570f12:0726' + ]) + + print(json.dumps(deleted, indent=2)) +``` + +#### Deleting all previous versions of a document + +The following example uses version-independent metadata fields (in this case `file_name` and `title`) to identify all versions (current and previous) of a specific document, and then versioning config with `versioning_mode=VersioningMode.PREVIOUS` to further narrow the selection to all those versions of the document that are no longer current. These previous versions of the document are then deleted: + +```python +import os +import json + +from graphrag_toolkit.lexical_graph import LexicalGraphIndex +from graphrag_toolkit.lexical_graph.storage import GraphStoreFactory +from graphrag_toolkit.lexical_graph.storage import VectorStoreFactory +from graphrag_toolkit.lexical_graph.versioning import VersioningConfig, VersioningMode + +with ( + GraphStoreFactory.for_graph_store(os.environ['GRAPH_STORE']) as graph_store, + VectorStoreFactory.for_vector_store(os.environ['VECTOR_STORE']) as vector_store +): + + graph_index = LexicalGraphIndex( + graph_store, + vector_store, + tenant_id='tenant123' # optional - uses default tenant if not specified + ) + + versioning_config = VersioningConfig(versioning_mode=VersioningMode.PREVIOUS) + + deleted = graph_index.delete_sources( + filter={ + 'file_name': 'readme.md', + 'title': 'How to play' + }, + versioning_config=versioning_config + ) + + print(json.dumps(deleted, indent=2)) +``` + +#### Deleting a document with version-specific metadata + +The following example assumes that each version of the `readme.md` file titled 'How to play', has a unique `version` metadata value (`version` is a domain-specific piece of metadata supplied by the application at indexing time, not a part of the internal versioning metadata used by the versioned update feature). Here, we delete version `v2` of the versioned document. + +```python +import os +import json + +from graphrag_toolkit.lexical_graph import LexicalGraphIndex +from graphrag_toolkit.lexical_graph.storage import GraphStoreFactory +from graphrag_toolkit.lexical_graph.storage import VectorStoreFactory + +with ( + GraphStoreFactory.for_graph_store(os.environ['GRAPH_STORE']) as graph_store, + VectorStoreFactory.for_vector_store(os.environ['VECTOR_STORE']) as vector_store +): + + graph_index = LexicalGraphIndex( + graph_store, + vector_store, + tenant_id='tenant123' # optional - uses default tenant if not specified + ) + + deleted = graph_index.delete_sources( + filter={ + 'file_name': 'readme.md', + 'title': 'How to play', + 'version': 'v2' + } + ) + + print(json.dumps(deleted, indent=2)) +``` + +### Automatically delete versioned documents + +You can configure the build process to automatically delete versioned documents using the `DeletePrevVersions` node handler: + +```python +import os + +from graphrag_toolkit.lexical_graph import LexicalGraphIndex, GraphRAGConfig +from graphrag_toolkit.lexical_graph import add_versioning_info +from graphrag_toolkit.lexical_graph.storage import GraphStoreFactory +from graphrag_toolkit.lexical_graph.storage import VectorStoreFactory +from graphrag_toolkit.lexical_graph.indexing.build import DeletePrevVersions + +from llama_index.core import SimpleDirectoryReader +from llama_index.core.readers.file.base import default_file_metadata_func + +GraphRAGConfig.enable_versioning = True + +def get_file_metadata(file_path): + metadata = default_file_metadata_func(file_path) + return add_versioning_info(metadata, id_fields=['file_name', 'file_path']) + +with( + GraphStoreFactory.for_graph_store(os.environ['GRAPH_STORE']) as graph_store, + VectorStoreFactory.for_vector_store(os.environ['VECTOR_STORE']) as vector_store +): + graph_index = LexicalGraphIndex( + graph_store, + vector_store + ) + + reader = SimpleDirectoryReader(input_dir='./my_docs/', file_metadata=get_file_metadata) + docs = reader.load_data() + + graph_index.extract_and_build(docs, handler=DeletePrevVersions(lexical_graph=graph_index)) +``` + +> **Warning** Use `DeletePrevVersions` with care. If your version-independent metadata fields are too permissive, you may end up versioning and deleting the wrong documents. + +#### Implementing deletion protection + +`DeletePrevVersions` accepts a custom filter function. This function will be invoked with the metadata of each versioned document that is a candidate for deletion. If the function returns `True`, the document will be deleted; if it returns `False`, the document will not be deleted. You can use this custom filter function and a custom metadata field to implement deletion protection. The following example adds a `deletionProtection` metadata field to each document to be indexed; the custom filter function then checks the value of this field: + +```python +import os + +from graphrag_toolkit.lexical_graph import LexicalGraphIndex, GraphRAGConfig +from graphrag_toolkit.lexical_graph import add_versioning_info +from graphrag_toolkit.lexical_graph.storage import GraphStoreFactory +from graphrag_toolkit.lexical_graph.storage import VectorStoreFactory +from graphrag_toolkit.lexical_graph.indexing.build import DeletePrevVersions + +from llama_index.core import SimpleDirectoryReader +from llama_index.core.readers.file.base import default_file_metadata_func + +GraphRAGConfig.enable_versioning = True + +def get_file_metadata(file_path): + metadata = default_file_metadata_func(file_path) + metadata['deletionProtection'] = True # custom metadata field + return add_versioning_info(metadata, id_fields=['file_name', 'file_path']) + +def deletion_protection_filter_fn(metadata): + deletion_protection = metadata.get('deletionProtection', False) + return not deletion_protection + +with( + GraphStoreFactory.for_graph_store(os.environ['GRAPH_STORE']) as graph_store, + VectorStoreFactory.for_vector_store(os.environ['VECTOR_STORE']) as vector_store +): + graph_index = LexicalGraphIndex( + graph_store, + vector_store + ) + + reader = SimpleDirectoryReader(input_dir='./my_docs/', file_metadata=get_file_metadata) + docs = reader.load_data() + + graph_index.extract_and_build( + docs, + handler=DeletePrevVersions( + lexical_graph=graph_index, + filter_fn=deletion_protection_filter_fn # do not delete docs with deletionProtection == True + ) + ) +``` + +### Upgrading existing graph and vector stores + +If you have existing graph and vector stores created by a version of the graphrag-toolkit prior to version 3.14.x, you will need to upgrade them before using the versioned updates feature. The graphrag-toolkit includes an `upgrade_for_versioning.py` script that will upgrade a graph and vector store so that you can use versioned updates. + +> Do not index any documents while the upgrade script is running. + +Download the [`upgrade_for_versioning.py`](https://github.com/awslabs/graphrag-toolkit/blob/main/examples/lexical-graph/scripts/upgrade_for_versioning.py) script to an environment that can access your graph and vector stores. Then run: + +``` +python upgrade_for_versioning.py --graph-store --vector_store +``` + +#### Upgrading specific tenants + +By default, the script upgrades all [tenants](/graphrag-toolkit/lexical-graph/multi-tenancy/) in the graph and vector stores. You can restrict the list of tenants using the `--tenant-ids ` parameter. For example: + +``` +python upgrade_for_versioning.py --graph-store --vector_store --tenant-ids t1 t2 _default +``` + +Note that `_default` identifies the default tenant. + +#### Upgrading specific vector indexes + +By default, the script only updates the chunk index for each tenant. Your vector store may also contain a statement index, which is used by the [semantic-guide search](/graphrag-toolkit/lexical-graph/semantic-guided-search/). Semantic-guided search is likely to be removed in future versions of the toolkit – to avoid unnecessary work, we therefore recommend _not_ upgrading this index. + +If, however, you do want to upgrade the statement index, supply an `--index-names ` parameter: + +``` +python upgrade_for_versioning.py --graph-store --vector_store --index_names chunk statement +``` diff --git a/docs-site/src/content/images/extract-and-build.png b/docs-site/src/content/images/extract-and-build.png new file mode 100644 index 00000000..d1a2ff90 Binary files /dev/null and b/docs-site/src/content/images/extract-and-build.png differ diff --git a/docs-site/src/content/images/hybrid-extract-and-build.png b/docs-site/src/content/images/hybrid-extract-and-build.png new file mode 100644 index 00000000..da60cf3c Binary files /dev/null and b/docs-site/src/content/images/hybrid-extract-and-build.png differ diff --git a/docs-site/src/content/images/lexical-graph.png b/docs-site/src/content/images/lexical-graph.png new file mode 100644 index 00000000..f78e47af Binary files /dev/null and b/docs-site/src/content/images/lexical-graph.png differ diff --git a/docs-site/src/content/images/question-answering.png b/docs-site/src/content/images/question-answering.png new file mode 100644 index 00000000..e9e49d50 Binary files /dev/null and b/docs-site/src/content/images/question-answering.png differ diff --git a/docs-site/src/content/images/versionable-unit.png b/docs-site/src/content/images/versionable-unit.png new file mode 100644 index 00000000..eea2ae46 Binary files /dev/null and b/docs-site/src/content/images/versionable-unit.png differ diff --git a/docs-site/src/content/images/versioning-1.png b/docs-site/src/content/images/versioning-1.png new file mode 100644 index 00000000..cacedaec Binary files /dev/null and b/docs-site/src/content/images/versioning-1.png differ diff --git a/docs-site/src/content/images/versioning-2.png b/docs-site/src/content/images/versioning-2.png new file mode 100644 index 00000000..0143adeb Binary files /dev/null and b/docs-site/src/content/images/versioning-2.png differ diff --git a/docs-site/src/content/images/versioning-3.png b/docs-site/src/content/images/versioning-3.png new file mode 100644 index 00000000..a5fad46e Binary files /dev/null and b/docs-site/src/content/images/versioning-3.png differ diff --git a/docs-site/src/env.d.ts b/docs-site/src/env.d.ts new file mode 100644 index 00000000..9bc5cb41 --- /dev/null +++ b/docs-site/src/env.d.ts @@ -0,0 +1 @@ +/// \ No newline at end of file diff --git a/docs-site/src/styles/custom.css b/docs-site/src/styles/custom.css new file mode 100644 index 00000000..e0cce2be --- /dev/null +++ b/docs-site/src/styles/custom.css @@ -0,0 +1,174 @@ +/* Valkey-style theming for GraphRAG Toolkit docs. + Reference: https://glide.valkey.io */ + +:root { + /* Brand accent — graph-purple */ + --sl-color-accent-low: #1a0d2e; + --sl-color-accent: #7c3aed; + --sl-color-accent-high: #c4b5fd; + + /* Text */ + --sl-color-white: #ffffff; + --sl-color-gray-1: #e6e6f0; + --sl-color-gray-2: #b8b8cc; + --sl-color-gray-3: #8a8aa3; + --sl-color-gray-4: #4a4a66; + --sl-color-gray-5: #25253a; + --sl-color-gray-6: #14141f; + + /* Backgrounds */ + --sl-color-black: #0a0a14; + --sl-color-bg: #0a0a14; + --sl-color-bg-nav: #0f0f1e; + --sl-color-bg-sidebar: #0f0f1e; + --sl-color-bg-inline-code: #1c1c2e; + + --sl-font: 'Inter', system-ui, -apple-system, 'Segoe UI', sans-serif; + --sl-font-system-mono: 'JetBrains Mono', ui-monospace, 'SF Mono', Menlo, monospace; +} + +:root[data-theme='light'] { + --sl-color-accent-low: #ede9fe; + --sl-color-accent: #6d28d9; + --sl-color-accent-high: #4c1d95; + + --sl-color-white: #0a0a14; + --sl-color-gray-1: #14141f; + --sl-color-gray-2: #25253a; + --sl-color-gray-3: #4a4a66; + --sl-color-gray-4: #8a8aa3; + --sl-color-gray-5: #d9d9e6; + --sl-color-gray-6: #f1f0f7; + --sl-color-gray-7: #faf9ff; + + --sl-color-black: #ffffff; + --sl-color-bg: #ffffff; + --sl-color-bg-nav: #faf9ff; + --sl-color-bg-sidebar: #faf9ff; + --sl-color-bg-inline-code: #f1f0f7; +} + +:root[data-theme='light'] .hero::before { + background: + radial-gradient(ellipse 80% 60% at 50% 0%, rgba(124, 58, 237, 0.18), transparent 60%), + radial-gradient(ellipse 60% 40% at 80% 30%, rgba(59, 130, 246, 0.14), transparent 60%), + radial-gradient(ellipse 60% 40% at 20% 40%, rgba(236, 72, 153, 0.10), transparent 60%); +} + +:root[data-theme='light'] .hero h1 { + background: linear-gradient(180deg, #1a0d2e 0%, #6d28d9 100%); + -webkit-background-clip: text; + background-clip: text; +} + +:root[data-theme='light'] .hero .tagline { + color: var(--sl-color-gray-3); +} + +:root[data-theme='light'] header.header { + background: rgba(255, 255, 255, 0.75); +} + +:root[data-theme='light'] .card { + background: linear-gradient(180deg, rgba(124, 58, 237, 0.05) 0%, rgba(124, 58, 237, 0.01) 100%); + border-color: var(--sl-color-gray-5); +} + +:root[data-theme='light'] .hero a.action.minimal { + color: var(--sl-color-gray-1); + border-color: var(--sl-color-gray-5); + background: #ffffff; +} + +/* Splash hero — big centered, gradient backdrop, valkey-style */ +.hero { + padding-block: 4rem 3rem; + position: relative; + overflow: hidden; +} + +.hero::before { + content: ''; + position: absolute; + inset: 0; + background: + radial-gradient(ellipse 80% 60% at 50% 0%, rgba(124, 58, 237, 0.25), transparent 60%), + radial-gradient(ellipse 60% 40% at 80% 30%, rgba(59, 130, 246, 0.18), transparent 60%), + radial-gradient(ellipse 60% 40% at 20% 40%, rgba(236, 72, 153, 0.12), transparent 60%); + pointer-events: none; + z-index: -1; +} + +.hero h1 { + font-size: clamp(2.5rem, 6vw, 4.5rem); + font-weight: 800; + letter-spacing: -0.03em; + background: linear-gradient(180deg, #ffffff 0%, #c4b5fd 100%); + -webkit-background-clip: text; + background-clip: text; + color: transparent; + line-height: 1.05; +} + +.hero .tagline { + font-size: clamp(1.1rem, 1.8vw, 1.4rem); + color: var(--sl-color-gray-2); + max-width: 42rem; + margin-inline: auto; +} + +.hero .actions { + margin-top: 2rem; + gap: 0.75rem; +} + +.hero a.action.primary { + background: linear-gradient(135deg, #7c3aed 0%, #4f46e5 100%); + border: none; + box-shadow: 0 8px 24px -8px rgba(124, 58, 237, 0.6); + transition: transform 0.15s ease, box-shadow 0.15s ease; +} + +.hero a.action.primary:hover { + transform: translateY(-2px); + box-shadow: 0 12px 32px -8px rgba(124, 58, 237, 0.8); +} + +.hero a.action.minimal { + color: var(--sl-color-white); + border: 1px solid var(--sl-color-gray-4); + background: rgba(255, 255, 255, 0.03); +} + +/* Feature card grid below hero */ +.card-grid { + margin-block: 3rem; +} + +.card { + background: linear-gradient(180deg, rgba(124, 58, 237, 0.06) 0%, rgba(124, 58, 237, 0.02) 100%); + border: 1px solid var(--sl-color-gray-5); + border-radius: 12px; + transition: border-color 0.2s ease, transform 0.2s ease; +} + +.card:hover { + border-color: var(--sl-color-accent); + transform: translateY(-2px); +} + +.card .icon { + color: var(--sl-color-accent-high); +} + +/* Top nav polish */ +header.header { + border-bottom: 1px solid var(--sl-color-gray-5); + backdrop-filter: blur(12px); + background: rgba(10, 10, 20, 0.7); +} + +/* Code blocks */ +.expressive-code { + border-radius: 10px; +} diff --git a/docs-site/tsconfig.json b/docs-site/tsconfig.json new file mode 100644 index 00000000..bcbf8b50 --- /dev/null +++ b/docs-site/tsconfig.json @@ -0,0 +1,3 @@ +{ + "extends": "astro/tsconfigs/strict" +}