From db2387e90e42bbc09e70e44625bd9fc1cc6c67ad Mon Sep 17 00:00:00 2001 From: Louis Choquel Date: Wed, 11 Feb 2026 18:03:43 +0100 Subject: [PATCH 1/5] Rename .plx extension to .mthds and update "workflow" terminology to "method" MTHDS is the new name for the open standard. Pipelex remains the reference implementation. This is a hard switch with no backward-compatible .plx loading. - Add MTHDS_EXTENSION constant in helpers.py as single source of truth - Rename Plx* classes to Mthds* (PlxFactory -> MthdsFactory, PlxConfig -> MthdsConfig) - Rename all .plx files to .mthds (builder bundles, test fixtures) - Update functions/variables referencing "plx" as file format - Update TOML config sections (plx_config -> mthds_config) - Update all CLI help text, error messages, and examples - Update documentation and README ("workflow" -> "method" for MTHDS concept) - Update VS Code config file associations Co-Authored-By: Claude Opus 4.6 --- .vscode/launch.json | 6 +- .vscode/settings.json | 2 +- README.md | 30 +- docs/home/1-releases/chicago.md | 12 +- .../observer-provider-injection.md | 2 +- docs/home/2-get-started/pipe-builder.md | 24 +- .../2-get-started/write-workflows-manually.md | 22 +- .../language-spec-v0-1-0.md | 24 +- .../pipelex-paradigm/index.md | 4 +- docs/home/3-understand-pipelex/viewpoint.md | 66 ++--- docs/home/4-cookbook-examples/extract-dpe.md | 2 +- .../home/4-cookbook-examples/extract-gantt.md | 6 +- .../4-cookbook-examples/extract-generic.md | 2 +- .../extract-proof-of-purchase.md | 2 +- .../home/4-cookbook-examples/extract-table.md | 4 +- docs/home/4-cookbook-examples/hello-world.md | 2 +- docs/home/4-cookbook-examples/index.md | 4 +- .../4-cookbook-examples/invoice-extractor.md | 8 +- docs/home/4-cookbook-examples/simple-ocr.md | 2 +- docs/home/4-cookbook-examples/write-tweet.md | 6 +- docs/home/5-setup/configure-ai-providers.md | 4 +- docs/home/5-setup/index.md | 2 +- docs/home/5-setup/project-organization.md | 14 +- .../concepts/define_your_concepts.md | 6 +- .../concepts/inline-structures.md | 10 +- .../concepts/native-concepts.md | 12 +- .../concepts/python-classes.md | 10 +- .../concepts/refining-concepts.md | 6 +- .../6-build-reliable-ai-workflows/domain.md | 16 +- .../kick-off-a-pipelex-workflow-project.md | 30 +- .../libraries.md | 18 +- .../pipe-builder.md | 22 +- .../pipelex-bundle-specification.md | 14 +- .../pipes/executing-pipelines.md | 28 +- .../pipes/index.md | 22 +- .../pipes/pipe-controllers/PipeBatch.md | 4 +- .../pipes/pipe-controllers/PipeCondition.md | 4 +- .../pipes/pipe-controllers/PipeParallel.md | 4 +- .../pipes/pipe-controllers/PipeSequence.md | 6 +- .../pipes/pipe-controllers/index.md | 4 +- .../pipes/pipe-operators/PipeExtract.md | 4 +- .../pipes/pipe-operators/PipeFunc.md | 8 +- .../pipes/pipe-operators/PipeImgGen.md | 4 +- .../pipes/pipe-operators/PipeLLM.md | 4 +- .../pipes/pipe-operators/index.md | 2 +- .../pipes/pipe-output.md | 4 +- .../pipes/provide-inputs.md | 4 +- .../pipes/understanding-multiplicity.md | 6 +- .../inference-backend-config.md | 10 +- .../config-technical/library-config.md | 62 ++--- docs/home/9-tools/cli/build/inputs.md | 8 +- docs/home/9-tools/cli/build/output.md | 10 +- docs/home/9-tools/cli/build/pipe.md | 22 +- docs/home/9-tools/cli/build/runner.md | 12 +- docs/home/9-tools/cli/build/structures.md | 6 +- docs/home/9-tools/cli/index.md | 4 +- docs/home/9-tools/cli/run.md | 10 +- docs/home/9-tools/cli/show.md | 2 +- docs/home/9-tools/cli/validate.md | 28 +- docs/home/9-tools/pipe-builder.md | 6 +- docs/index.md | 16 +- docs/under-the-hood/architecture-overview.md | 10 +- docs/under-the-hood/index.md | 2 +- docs/under-the-hood/reasoning-controls.md | 4 +- pipelex/builder/CLAUDE.md | 10 +- ...ntic_builder.plx => agentic_builder.mthds} | 0 .../builder/{builder.plx => builder.mthds} | 0 pipelex/builder/builder_loop.py | 14 +- ...{concept_fixer.plx => concept_fixer.mthds} | 0 pipelex/builder/conventions.py | 4 +- .../{pipe_design.plx => pipe_design.mthds} | 0 ...esize_image.plx => synthesize_image.mthds} | 0 pipelex/cli/_cli.py | 4 +- pipelex/cli/agent_cli/CLAUDE.md | 10 +- pipelex/cli/agent_cli/_agent_cli.py | 24 +- .../cli/agent_cli/commands/agent_output.py | 6 +- .../cli/agent_cli/commands/assemble_cmd.py | 8 +- pipelex/cli/agent_cli/commands/build_cmd.py | 2 +- pipelex/cli/agent_cli/commands/build_core.py | 23 +- pipelex/cli/agent_cli/commands/graph_cmd.py | 16 +- pipelex/cli/agent_cli/commands/inputs_cmd.py | 10 +- pipelex/cli/agent_cli/commands/run_cmd.py | 8 +- .../cli/agent_cli/commands/validate_cmd.py | 6 +- pipelex/cli/commands/build/app.py | 2 +- pipelex/cli/commands/build/inputs_cmd.py | 14 +- pipelex/cli/commands/build/output_cmd.py | 14 +- pipelex/cli/commands/build/pipe_cmd.py | 33 +-- pipelex/cli/commands/build/runner_cmd.py | 18 +- pipelex/cli/commands/build/structures_cmd.py | 24 +- pipelex/cli/commands/run_cmd.py | 38 +-- pipelex/cli/commands/show_cmd.py | 2 +- pipelex/cli/commands/validate_cmd.py | 14 +- pipelex/cli/error_handlers.py | 2 +- pipelex/core/interpreter/helpers.py | 8 +- pipelex/hub.py | 2 +- pipelex/language/mthds_config.py | 28 ++ .../{plx_factory.py => mthds_factory.py} | 32 +-- pipelex/language/plx_config.py | 28 -- pipelex/libraries/library.py | 4 +- pipelex/libraries/library_manager.py | 98 +++---- pipelex/libraries/library_manager_abstract.py | 4 +- pipelex/libraries/library_utils.py | 50 ++-- .../compose/construct_blueprint.py | 4 +- pipelex/pipelex.toml | 12 +- pipelex/pipeline/pipeline_run_setup.py | 16 +- pipelex/pipeline/validate_bundle.py | 40 +-- pipelex/system/configuration/configs.py | 4 +- ...ted_concepts.plx => nested_concepts.mthds} | 0 .../test_structure_generator_cli.py | 12 +- .../{cv_batch.plx => cv_batch.mthds} | 0 .../{joke_batch.plx => joke_batch.mthds} | 0 .../pipe_batch/test_pipe_batch_graph.py | 2 +- ...ewsletter.plx => discord_newsletter.mthds} | 0 .../{test_tweet.plx => test_tweet.mthds} | 0 .../{cv_job_match.plx => cv_job_match.mthds} | 0 .../{pipe_img_gen.plx => pipe_img_gen.mthds} | 0 ...uts.plx => pipe_llm_document_inputs.mthds} | 0 ..._html.plx => pipe_llm_filename_html.mthds} | 0 ...inputs.plx => pipe_llm_image_inputs.mthds} | 0 ...e_llm_vision.plx => pipe_llm_vision.mthds} | 0 ...on.py => test_builder_mthds_validation.py} | 48 ++-- .../{base_domain.plx => base_domain.mthds} | 0 ...{middle_domain.plx => middle_domain.mthds} | 0 ...refines.plx => out_of_order_refines.mthds} | 0 .../test_out_of_order_refines.py | 22 +- ...ncept.plx => refines_custom_concept.mthds} | 0 .../pipelex/language/test_mthds_factory.py | 15 + .../pipelex/language/test_plx_factory.py | 15 - .../test_concept_to_concept_references.py | 78 +++--- .../pipeline/test_load_concepts_only.py | 84 +++--- ...former.plx => uppercase_transformer.mthds} | 0 ...condition_1.plx => pipe_condition_1.mthds} | 0 ...condition_2.plx => pipe_condition_2.mthds} | 0 ...mplex.plx => pipe_condition_complex.mthds} | 0 ...pipe_condition_continue_output_type.mthds} | 0 ...dition.plx => text_length_condition.mthds} | 0 ...lysis.plx => parallel_text_analysis.mthds} | 0 ...e_parallel_1.plx => pipe_parallel_1.mthds} | 0 ...italize_text.plx => capitalize_text.mthds} | 0 ...ewsletter.plx => discord_newsletter.mthds} | 0 ...e_sequence_1.plx => pipe_sequence_1.mthds} | 0 ...e_sequence_2.plx => pipe_sequence_2.mthds} | 0 ...e_sequence_3.plx => pipe_sequence_3.mthds} | 0 .../test_pipe_sequence_list_output_bug.py | 36 +-- ...ls.plx => compose_structured_models.mthds} | 0 .../test_pipe_func_validation_errors.py | 86 +++--- ..._basic.plx => test_structures_basic.mthds} | 0 ...plex.plx => test_structures_complex.mthds} | 0 ...ation.plx => crazy_image_generation.mthds} | 0 ..._pipelines.plx => failing_pipelines.mthds} | 0 .../pipelines/{flows.plx => flows.mthds} | 0 ...plx => multiple_images_input_to_llm.mthds} | 0 .../{multiplicity.plx => multiplicity.mthds} | 0 ...ed_concepts.plx => refined_concepts.mthds} | 0 ...age_inputs.plx => test_image_inputs.mthds} | 0 ...age_out_in.plx => test_image_out_in.mthds} | 0 .../pipelines/{tests.plx => tests.mthds} | 0 .../unit/pipelex/cli/test_agent_graph_cmd.py | 58 ++-- .../core/interpreter/test_interpreter.py | 18 +- .../core/test_data/errors/invalid_plx.py | 10 +- .../core/test_data/interpreter_test_cases.py | 4 +- ...t_plx_factory.py => test_mthds_factory.py} | 258 +++++++++--------- .../pipelex/tools/{test.plx => test.mthds} | 0 163 files changed, 1051 insertions(+), 1045 deletions(-) rename pipelex/builder/{agentic_builder.plx => agentic_builder.mthds} (100%) rename pipelex/builder/{builder.plx => builder.mthds} (100%) rename pipelex/builder/concept/{concept_fixer.plx => concept_fixer.mthds} (100%) rename pipelex/builder/pipe/{pipe_design.plx => pipe_design.mthds} (100%) rename pipelex/builder/synthetic_inputs/{synthesize_image.plx => synthesize_image.mthds} (100%) create mode 100644 pipelex/language/mthds_config.py rename pipelex/language/{plx_factory.py => mthds_factory.py} (95%) delete mode 100644 pipelex/language/plx_config.py rename tests/e2e/pipelex/concepts/nested_concepts/{nested_concepts.plx => nested_concepts.mthds} (100%) rename tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/{cv_batch.plx => cv_batch.mthds} (100%) rename tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/{joke_batch.plx => joke_batch.mthds} (100%) rename tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/{discord_newsletter.plx => discord_newsletter.mthds} (100%) rename tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/{test_tweet.plx => test_tweet.mthds} (100%) rename tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/{cv_job_match.plx => cv_job_match.mthds} (100%) rename tests/e2e/pipelex/pipes/pipe_operators/pipe_img_gen/{pipe_img_gen.plx => pipe_img_gen.mthds} (100%) rename tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/{pipe_llm_document_inputs.plx => pipe_llm_document_inputs.mthds} (100%) rename tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/{pipe_llm_filename_html.plx => pipe_llm_filename_html.mthds} (100%) rename tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/{pipe_llm_image_inputs.plx => pipe_llm_image_inputs.mthds} (100%) rename tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/{pipe_llm_vision.plx => pipe_llm_vision.mthds} (100%) rename tests/integration/pipelex/builder/{test_builder_plx_validation.py => test_builder_mthds_validation.py} (78%) rename tests/integration/pipelex/concepts/out_of_order_refines/multi_file/{base_domain.plx => base_domain.mthds} (100%) rename tests/integration/pipelex/concepts/out_of_order_refines/multi_file/{middle_domain.plx => middle_domain.mthds} (100%) rename tests/integration/pipelex/concepts/out_of_order_refines/{out_of_order_refines.plx => out_of_order_refines.mthds} (100%) rename tests/integration/pipelex/concepts/refines_custom_concept/{refines_custom_concept.plx => refines_custom_concept.mthds} (100%) create mode 100644 tests/integration/pipelex/language/test_mthds_factory.py delete mode 100644 tests/integration/pipelex/language/test_plx_factory.py rename tests/integration/pipelex/pipes/controller/pipe_batch/{uppercase_transformer.plx => uppercase_transformer.mthds} (100%) rename tests/integration/pipelex/pipes/controller/pipe_condition/{pipe_condition_1.plx => pipe_condition_1.mthds} (100%) rename tests/integration/pipelex/pipes/controller/pipe_condition/{pipe_condition_2.plx => pipe_condition_2.mthds} (100%) rename tests/integration/pipelex/pipes/controller/pipe_condition/{pipe_condition_complex.plx => pipe_condition_complex.mthds} (100%) rename tests/integration/pipelex/pipes/controller/pipe_condition/{pipe_condition_continue_output_type.plx => pipe_condition_continue_output_type.mthds} (100%) rename tests/integration/pipelex/pipes/controller/pipe_condition/{text_length_condition.plx => text_length_condition.mthds} (100%) rename tests/integration/pipelex/pipes/controller/pipe_parallel/{parallel_text_analysis.plx => parallel_text_analysis.mthds} (100%) rename tests/integration/pipelex/pipes/controller/pipe_parallel/{pipe_parallel_1.plx => pipe_parallel_1.mthds} (100%) rename tests/integration/pipelex/pipes/controller/pipe_sequence/{capitalize_text.plx => capitalize_text.mthds} (100%) rename tests/integration/pipelex/pipes/controller/pipe_sequence/{discord_newsletter.plx => discord_newsletter.mthds} (100%) rename tests/integration/pipelex/pipes/controller/pipe_sequence/{pipe_sequence_1.plx => pipe_sequence_1.mthds} (100%) rename tests/integration/pipelex/pipes/controller/pipe_sequence/{pipe_sequence_2.plx => pipe_sequence_2.mthds} (100%) rename tests/integration/pipelex/pipes/controller/pipe_sequence/{pipe_sequence_3.plx => pipe_sequence_3.mthds} (100%) rename tests/integration/pipelex/pipes/operator/pipe_compose_structured/{compose_structured_models.plx => compose_structured_models.mthds} (100%) rename tests/integration/pipelex/pipes/operator/pipe_llm/{test_structures_basic.plx => test_structures_basic.mthds} (100%) rename tests/integration/pipelex/pipes/operator/pipe_llm/{test_structures_complex.plx => test_structures_complex.mthds} (100%) rename tests/integration/pipelex/pipes/pipelines/{crazy_image_generation.plx => crazy_image_generation.mthds} (100%) rename tests/integration/pipelex/pipes/pipelines/{failing_pipelines.plx => failing_pipelines.mthds} (100%) rename tests/integration/pipelex/pipes/pipelines/{flows.plx => flows.mthds} (100%) rename tests/integration/pipelex/pipes/pipelines/{multiple_images_input_to_llm.plx => multiple_images_input_to_llm.mthds} (100%) rename tests/integration/pipelex/pipes/pipelines/{multiplicity.plx => multiplicity.mthds} (100%) rename tests/integration/pipelex/pipes/pipelines/{refined_concepts.plx => refined_concepts.mthds} (100%) rename tests/integration/pipelex/pipes/pipelines/{test_image_inputs.plx => test_image_inputs.mthds} (100%) rename tests/integration/pipelex/pipes/pipelines/{test_image_out_in.plx => test_image_out_in.mthds} (100%) rename tests/integration/pipelex/pipes/pipelines/{tests.plx => tests.mthds} (100%) rename tests/unit/pipelex/language/{test_plx_factory.py => test_mthds_factory.py} (65%) rename tests/unit/pipelex/tools/{test.plx => test.mthds} (100%) diff --git a/.vscode/launch.json b/.vscode/launch.json index 77c0b76f6..4bae7cc96 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -75,7 +75,7 @@ "program": "${workspaceFolder}/.venv/bin/pipelex", "args": [ "validate", - "temp/bundle.plx", + "temp/bundle.mthds", ], "console": "integratedTerminal", "justMyCode": false @@ -99,7 +99,7 @@ "program": "${workspaceFolder}/.venv/bin/pipelex", "args": [ "run", - "tests/integration/pipelex/pipes/pipelines/test_image_out_in.plx", + "tests/integration/pipelex/pipes/pipelines/test_image_out_in.mthds", ], "console": "integratedTerminal", "justMyCode": false @@ -111,7 +111,7 @@ "program": "${workspaceFolder}/.venv/bin/pipelex", "args": [ "run", - "tests/integration/pipelex/pipes/pipelines/test_image_out_in.plx", + "tests/integration/pipelex/pipes/pipelines/test_image_out_in.mthds", "--pipe", "describe_image", "--inputs", diff --git a/.vscode/settings.json b/.vscode/settings.json index c37976523..f83a8323d 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -22,7 +22,7 @@ "python.testing.pytestEnabled": true, "djlint.showInstallError": false, "files.associations": { - "*.plx": "plx" + "*.mthds": "mthds" }, "editor.formatOnSave": true, "[html]": { diff --git a/README.md b/README.md index 1c4eb35a7..e80faecb5 100644 --- a/README.md +++ b/README.md @@ -11,8 +11,8 @@

-

AI Workflows That Agents Build & Run

-

Pipelex is developing the open standard for repeatable AI workflows.
+

AI Methods That Agents Build & Run

+

Pipelex is developing the open standard for repeatable AI methods.
Write business logic, not API calls.

@@ -76,17 +76,17 @@ Use your existing API keys from OpenAI, Anthropic, Google, Mistral, etc. See [Co Run models locally with Ollama, vLLM, LM Studio, or llama.cpp - no API keys required. See [Configure AI Providers](https://docs.pipelex.com/pre-release/home/5-setup/configure-ai-providers/) for details. -## 3. Generate Your First Workflow +## 3. Generate Your First Method -Create a complete AI workflow with a single command: +Create a complete AI method with a single command: ```bash -pipelex build pipe "Take a CV and Job offer in PDF, analyze if they match and generate 5 questions for the interview" --output results/cv_match.plx +pipelex build pipe "Take a CV and Job offer in PDF, analyze if they match and generate 5 questions for the interview" --output results/cv_match.mthds ``` -This command generates a production-ready `.plx` file with domain definitions, concepts, and multiple processing steps that analyzes CV-job fit and prepares interview questions. +This command generates a production-ready `.mthds` file with domain definitions, concepts, and multiple processing steps that analyzes CV-job fit and prepares interview questions. -**cv_match.plx** +**cv_match.mthds** ```toml domain = "cv_match" description = "Matching CVs with job offers and generating interview questions" @@ -109,7 +109,7 @@ refines = "Text" [pipe.analyze_cv_job_match_and_generate_questions] type = "PipeSequence" description = """ -Main pipeline that orchestrates the complete CV-job matching and interview question generation workflow. Takes a candidate's CV and a job offer as PDF documents, extracts their content, performs a comprehensive match analysis identifying strengths, gaps, and areas to probe, and generates exactly 5 targeted interview questions based on the analysis results. +Main pipeline that orchestrates the complete CV-job matching and interview question generation method. Takes a candidate's CV and a job offer as PDF documents, extracts their content, performs a comprehensive match analysis identifying strengths, gaps, and areas to probe, and generates exactly 5 targeted interview questions based on the analysis results. """ inputs = { cv_pdf = "PDF", job_offer_pdf = "PDF" } output = "Question[5]" @@ -255,7 +255,7 @@ flowchart TD ```bash # Run with input file -pipelex run results/cv_match.plx --inputs inputs.json +pipelex run results/cv_match.mthds --inputs inputs.json ``` Create an `inputs.json` file with your PDF URLs: @@ -305,13 +305,13 @@ asyncio.run(run_pipeline())
-

From Whiteboard to AI Workflow in less than 5 minutes with no hands (2025-07)

+

From Whiteboard to AI Method in less than 5 minutes with no hands (2025-07)

Pipelex Demo
-

The AI workflow that writes an AI workflow in 64 seconds (2025-09)

+

The AI method that writes an AI method in 64 seconds (2025-09)

Pipelex Live Demo @@ -323,21 +323,21 @@ asyncio.run(run_pipeline()) ## 💡 What is Pipelex? -Pipelex is an open-source language that enables you to build and run **repeatable AI workflows**. Instead of cramming everything into one complex prompt, you break tasks into focused steps, each pipe handling one clear transformation. +Pipelex is an open-source language that enables you to build and run **repeatable AI methods**. Instead of cramming everything into one complex prompt, you break tasks into focused steps, each pipe handling one clear transformation. -Each pipe processes information using **Concepts** (typing with meaning) to ensure your pipelines make sense. The Pipelex language (`.plx` files) is simple and human-readable, even for non-technical users. Each step can be structured and validated, giving you the reliability of software with the intelligence of AI. +Each pipe processes information using **Concepts** (typing with meaning) to ensure your pipelines make sense. The Pipelex language (`.mthds` files) is simple and human-readable, even for non-technical users. Each step can be structured and validated, giving you the reliability of software with the intelligence of AI. ## 📖 Next Steps **Learn More:** - [Design and Run Pipelines](https://docs.pipelex.com/pre-release/home/6-build-reliable-ai-workflows/pipes/) - Complete guide with examples -- [Kick off a Pipeline Project](https://docs.pipelex.com/pre-release/home/6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project/) - Deep dive into Pipelex +- [Kick off a Pipeline Project](https://docs.pipelex.com/pre-release/home/6-build-reliable-ai-workflows/kick-off-a-pipelex-method-project/) - Deep dive into Pipelex - [Configure AI Providers](https://docs.pipelex.com/pre-release/home/5-setup/configure-ai-providers/) - Set up AI providers and models ## 🔧 IDE Extension -We **highly** recommend installing our extension for `.plx` files into your IDE. You can find it in the [Open VSX Registry](https://open-vsx.org/extension/Pipelex/pipelex). It's coming soon to VS Code marketplace too. If you're using Cursor, Windsurf or another VS Code fork, you can search for it directly in your extensions tab. +We **highly** recommend installing our extension for `.mthds` files into your IDE. You can find it in the [Open VSX Registry](https://open-vsx.org/extension/Pipelex/pipelex). It's coming soon to VS Code marketplace too. If you're using Cursor, Windsurf or another VS Code fork, you can search for it directly in your extensions tab. ## 📚 Examples & Cookbook diff --git a/docs/home/1-releases/chicago.md b/docs/home/1-releases/chicago.md index 82e11df81..f9a8128d6 100644 --- a/docs/home/1-releases/chicago.md +++ b/docs/home/1-releases/chicago.md @@ -4,7 +4,7 @@ title: "Chicago Release" # Pipelex v0.18.0 "Chicago" -**The AI workflow framework that just works.** +**The AI method framework that just works.** ## Why Pipelex @@ -12,19 +12,19 @@ Pipelex eliminates the complexity of building AI-powered applications. Instead o - **One framework** for prompts, pipelines, and structured outputs - **One API key** for dozens of AI models -- **One workflow** from prototype to production +- **One method** from prototype to production --- ## A Major Milestone -Three months after our first public launch in San Francisco, Pipelex reaches a new level of maturity with the "Chicago" release (currently in beta-test). This version delivers on our core promise: **enabling every developer to build AI workflows that are reliable, flexible, and production-ready**. +Three months after our first public launch in San Francisco, Pipelex reaches a new level of maturity with the "Chicago" release (currently in beta-test). This version delivers on our core promise: **enabling every developer to build AI methods that are reliable, flexible, and production-ready**. Version 0.18.0 represents our most significant release to date, addressing the three priorities that emerged from real-world usage: - **Universal model access** — one API key for all leading AI models - **State-of-the-art document extraction** — deployable anywhere -- **Visual pipeline inspection** — full transparency into your workflows +- **Visual pipeline inspection** — full transparency into your methods --- @@ -91,7 +91,7 @@ Broad support for open-source AI: ### Developer Experience -- **Pure PLX Workflows** — Inline concept structures now support nested concepts, making Pipelex fully usable with just `.plx` files and the CLI—no Python code required +- **Pure MTHDS Methods** — Inline concept structures now support nested concepts, making Pipelex fully usable with just `.mthds` files and the CLI—no Python code required - **Deep Integration Options** — Generate Pydantic BaseModels from your declarative concepts for full IDE autocomplete, type checking, and validation (TypeScript Zod structures coming soon) - **PipeCompose Construct Mode** — Build `StructuredContent` objects deterministically without an LLM, composing outputs from working memory variables, fixed values, templates, and nested structures - **Cloud Storage for Artifacts** — Store generated images and extracted pages on AWS S3 or Google Cloud Storage with public or signed URLs @@ -112,7 +112,7 @@ Then run `pipelex init` to configure your environment and obtain your Gateway AP --- -*Ready to build AI workflows that just work?* +*Ready to build AI methods that just work?* [Join the Waitlist](https://go.pipelex.com/waitlist){ .md-button .md-button--primary } [Documentation](https://docs.pipelex.com/pre-release){ .md-button } diff --git a/docs/home/10-advanced-customizations/observer-provider-injection.md b/docs/home/10-advanced-customizations/observer-provider-injection.md index f277ef067..eaeb4b21b 100644 --- a/docs/home/10-advanced-customizations/observer-provider-injection.md +++ b/docs/home/10-advanced-customizations/observer-provider-injection.md @@ -216,4 +216,4 @@ def setup_pipelex(): return pipelex_instance ``` -The observer system provides powerful insights into your pipeline execution patterns and is essential for monitoring, debugging, and optimizing your Pipelex workflows. \ No newline at end of file +The observer system provides powerful insights into your pipeline execution patterns and is essential for monitoring, debugging, and optimizing your Pipelex methods. \ No newline at end of file diff --git a/docs/home/2-get-started/pipe-builder.md b/docs/home/2-get-started/pipe-builder.md index 48e81d3b8..7bb7f90f7 100644 --- a/docs/home/2-get-started/pipe-builder.md +++ b/docs/home/2-get-started/pipe-builder.md @@ -1,5 +1,5 @@ --- -title: "Generate Workflows with Pipe Builder" +title: "Generate Methods with Pipe Builder" --- ![Pipelex Banner](https://d2cinlfp2qnig1.cloudfront.net/banners/pipelex_banner_docs_v2.png) @@ -18,9 +18,9 @@ During the second step of the initialization, we recommand, for a quick start, t If you want to bring your own API keys, see [Configure AI Providers](../../home/5-setup/configure-ai-providers.md) for details. -# Generate workflows with Pipe Builder +# Generate methods with Pipe Builder -The fastest way to create production-ready AI workflows is with the Pipe Builder. Just describe what you want, and Pipelex generates complete, validated pipelines. +The fastest way to create production-ready AI methods is with the Pipe Builder. Just describe what you want, and Pipelex generates complete, validated pipelines. ```bash pipelex build pipe "Take a CV and Job offer in PDF, analyze if they match and generate 5 questions for the interview" @@ -28,12 +28,12 @@ pipelex build pipe "Take a CV and Job offer in PDF, analyze if they match and ge The pipe builder generates three files in a numbered directory (e.g., `results/pipeline_01/`): -1. **`bundle.plx`** - Complete production-ready script in our Pipelex language with domain definition, concepts, and pipe steps +1. **`bundle.mthds`** - Complete production-ready script in our Pipelex language with domain definition, concepts, and pipe steps 2. **`inputs.json`** - Template describing the **mandatory** inputs for running the pipe 3. **`run_{pipe_code}.py`** - Ready-to-run Python script that you can customize and execute !!! tip "Pipe Builder Requirements" - For now, the pipe builder requires access to **Claude 4.5 Sonnet**, either through Pipelex Inference, or using your own key through Anthropic, Amazon Bedrock or BlackboxAI. Don't hesitate to join our [Discord](https://go.pipelex.com/discord) to get a key, otherwise, you can also create the workflows yourself, following our [documentation guide](./write-workflows-manually.md). + For now, the pipe builder requires access to **Claude 4.5 Sonnet**, either through Pipelex Inference, or using your own key through Anthropic, Amazon Bedrock or BlackboxAI. Don't hesitate to join our [Discord](https://go.pipelex.com/discord) to get a key, otherwise, you can also create the methods yourself, following our [documentation guide](./write-workflows-manually.md). !!! info "Learn More" Want to understand how the Pipe Builder works under the hood? See [Pipe Builder Deep Dive](../9-tools/pipe-builder.md) for the full explanation of its multi-step generation process. @@ -43,14 +43,14 @@ The pipe builder generates three files in a numbered directory (e.g., `results/p **Option 1: CLI** ```bash -pipelex run results/cv_match.plx --inputs inputs.json +pipelex run results/cv_match.mthds --inputs inputs.json ``` The `--inputs` file should be a JSON dictionary where keys are input variable names and values are the input data. Learn more on how to provide the inputs of a pipe: [Providing Inputs to Pipelines](../../home/6-build-reliable-ai-workflows/pipes/provide-inputs.md) **Option 2: Python** -This requires having the `.plx` file or your pipe inside the directory where the Python file is located. +This requires having the `.mthds` file or your pipe inside the directory where the Python file is located. ```python import json @@ -76,7 +76,7 @@ print(pipe_output.main_stuff) ## IDE Support -We **highly** recommend installing our own extension for PLX files into your IDE of choice. You can find it in the [Open VSX Registry](https://open-vsx.org/extension/Pipelex/pipelex) and download it directly using [this link](https://open-vsx.org/api/Pipelex/pipelex/0.2.1/file/Pipelex.pipelex-0.2.1.vsix). It's coming soon to the VS Code marketplace too and if you are using Cursor, Windsurf or another VS Code fork, you can search for it directly in your extensions tab. +We **highly** recommend installing our own extension for MTHDS files into your IDE of choice. You can find it in the [Open VSX Registry](https://open-vsx.org/extension/Pipelex/pipelex) and download it directly using [this link](https://open-vsx.org/api/Pipelex/pipelex/0.2.1/file/Pipelex.pipelex-0.2.1.vsix). It's coming soon to the VS Code marketplace too and if you are using Cursor, Windsurf or another VS Code fork, you can search for it directly in your extensions tab. ## Examples @@ -86,12 +86,12 @@ We **highly** recommend installing our own extension for PLX files into your IDE ## Next Steps -Now that you know how to generate workflows with the Pipe Builder, explore these resources: +Now that you know how to generate methods with the Pipe Builder, explore these resources: -**Learn how to Write Workflows yourself** +**Learn how to Write Methods yourself** -- [:material-pencil: Write Workflows Manually](./write-workflows-manually.md){ .md-button .md-button--primary } -- [:material-book-open-variant: Build Reliable AI Workflows](../6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md){ .md-button .md-button--primary } +- [:material-pencil: Write Methods Manually](./write-workflows-manually.md){ .md-button .md-button--primary } +- [:material-book-open-variant: Build Reliable AI Methods](../6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md){ .md-button .md-button--primary } **Explore Examples:** diff --git a/docs/home/2-get-started/write-workflows-manually.md b/docs/home/2-get-started/write-workflows-manually.md index 478983b92..d4c083e90 100644 --- a/docs/home/2-get-started/write-workflows-manually.md +++ b/docs/home/2-get-started/write-workflows-manually.md @@ -1,16 +1,16 @@ -# Writing Workflows +# Writing Methods -Ready to dive deeper? This section shows you how to manually create pipelines and understand the `.plx` language. +Ready to dive deeper? This section shows you how to manually create pipelines and understand the `.mthds` language. -!!! tip "Prefer Automated Workflow Generation?" - If you have access to **Claude 4.5 Sonnet** (via Pipelex Inference, Anthropic, Amazon Bedrock, or BlackBox AI), you can use our **pipe builder** to generate workflows from natural language descriptions. See the [Pipe Builder guide](./pipe-builder.md) to learn how to use `pipelex build pipe` commands. This tutorial is for those who want to write workflows manually or understand the `.plx` language in depth. +!!! tip "Prefer Automated Method Generation?" + If you have access to **Claude 4.5 Sonnet** (via Pipelex Inference, Anthropic, Amazon Bedrock, or BlackBox AI), you can use our **pipe builder** to generate methods from natural language descriptions. See the [Pipe Builder guide](./pipe-builder.md) to learn how to use `pipelex build pipe` commands. This tutorial is for those who want to write methods manually or understand the `.mthds` language in depth. ## Write Your First Pipeline Let's build a **character generator** to understand the basics. -Create a `.plx` file anywhere in your project (we recommend a `pipelines` directory): +Create a `.mthds` file anywhere in your project (we recommend a `pipelines` directory): -`character.plx` +`character.mthds` ```toml domain = "characters" # domain of existance of your pipe @@ -70,9 +70,9 @@ As you might notice, this is plain text, and nothing is structured. Now we are g Let's create a rigorously structured `Character` object instead of plain text. We need to create the concept `Character`. The concept names MUST be in PascalCase. [Learn more about defining concepts](../6-build-reliable-ai-workflows/concepts/define_your_concepts.md) -### Option 1: Define the Structure in your `.plx` file +### Option 1: Define the Structure in your `.mthds` file -Define structures directly in your `.plx` file: +Define structures directly in your `.mthds` file: ```toml [concept.Character] # Declare the concept by giving it a name. @@ -89,7 +89,7 @@ description = "A description of the character" # Fourth attribute: "descrip Specify that the output of your Pipellm is a `Character` object: -`characters.plx` +`characters.mthds` ```toml domain = "characters" @@ -146,7 +146,7 @@ Learn more in [Inline Structures](../6-build-reliable-ai-workflows/concepts/inli Specify that the output of your Pipellm is a `Character` object: -`characters.plx` +`characters.mthds` ```toml domain = "characters" @@ -330,7 +330,7 @@ Now that you understand the basics, explore more: **Learn more about Pipelex (domains, project structure, best practices...)** -- [Build Reliable AI Workflows](../../home/6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md) - Deep dive into pipeline design +- [Build Reliable AI Methods](../../home/6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md) - Deep dive into pipeline design - [Cookbook Examples](../../home/4-cookbook-examples/index.md) - Real-world examples and patterns **Learn More about the other pipes** diff --git a/docs/home/3-understand-pipelex/language-spec-v0-1-0.md b/docs/home/3-understand-pipelex/language-spec-v0-1-0.md index 7f6b319aa..f26e523ba 100644 --- a/docs/home/3-understand-pipelex/language-spec-v0-1-0.md +++ b/docs/home/3-understand-pipelex/language-spec-v0-1-0.md @@ -1,28 +1,28 @@ -# Pipelex (PLX) – Declarative AI Workflow Spec (v0.1.0) +# Pipelex (MTHDS) – Declarative AI Method Spec (v0.1.0) -**Build deterministic, repeatable AI workflows using declarative TOML syntax.** +**Build deterministic, repeatable AI methods using declarative TOML syntax.** -The Pipelex Language (PLX) uses a TOML-based syntax to define deterministic, repeatable AI workflows. This specification documents version 0.1.0 of the language and establishes the canonical way to declare domains, concepts, and pipes inside `.plx` bundles. +The Pipelex Language (MTHDS) uses a TOML-based syntax to define deterministic, repeatable AI methods. This specification documents version 0.1.0 of the language and establishes the canonical way to declare domains, concepts, and pipes inside `.mthds` bundles. --- ## Core Idea -Pipelex is a workflow declaration language that gets interpreted at runtime, we already have a Python runtime (see [github.com/pipelex/pipelex](https://github.com/pipelex/pipelex)). +Pipelex is a method declaration language that gets interpreted at runtime, we already have a Python runtime (see [github.com/pipelex/pipelex](https://github.com/pipelex/pipelex)). -Pipelex lets you declare **what** your AI workflow should accomplish and **how** to execute it step by step. Each `.plx` file represents a bundle where you define: +Pipelex lets you declare **what** your AI method should accomplish and **how** to execute it step by step. Each `.mthds` file represents a bundle where you define: - **Concepts** (PascalCase): the structured or unstructured data flowing through your system -- **Pipes** (snake_case): operations or orchestrators that define your workflow +- **Pipes** (snake_case): operations or orchestrators that define your method - **Domain** (named in snake_case): the topic or field of work this bundle is about -Write once in `.plx` files. Run anywhere. Get the same results every time. +Write once in `.mthds` files. Run anywhere. Get the same results every time. --- ## Semantics -Pipelex workflows are **declarative and deterministic**: +Pipelex methods are **declarative and deterministic**: - Pipes are evaluated based on their dependencies, not declaration order - Controllers explicitly define execution flow (sequential, parallel, or conditional) @@ -35,7 +35,7 @@ All concepts are strongly typed. All pipes declare their inputs and outputs. The **Guarantees:** -- Deterministic workflow execution and outputs +- Deterministic method execution and outputs - Strong typing with validation before runtime **Not supported in v0.1.0:** @@ -48,9 +48,9 @@ All concepts are strongly typed. All pipes declare their inputs and outputs. The --- -## Complete Example: CV Job Matching Workflow +## Complete Example: CV Job Matching Method -This workflow analyses candidate CVs against job offer requirements to determine match quality. +This method analyses candidate CVs against job offer requirements to determine match quality. ```toml domain = "cv_job_matching" @@ -180,5 +180,5 @@ Evaluate how well this candidate matches the job requirements. - Processes all candidate CVs in parallel (batch processing) - Each CV is extracted and analyzed against the structured job requirements using an LLM - Produces a scored match analysis for each candidate with strengths, weaknesses, and hiring recommendations -- Demonstrates sequential orchestration, parallel processing, nested workflows, and strong typing +- Demonstrates sequential orchestration, parallel processing, nested methods, and strong typing diff --git a/docs/home/3-understand-pipelex/pipelex-paradigm/index.md b/docs/home/3-understand-pipelex/pipelex-paradigm/index.md index 80ca7b913..0754ec490 100644 --- a/docs/home/3-understand-pipelex/pipelex-paradigm/index.md +++ b/docs/home/3-understand-pipelex/pipelex-paradigm/index.md @@ -1,12 +1,12 @@ # The Pipelex Paradigm -Pipelex is an **open-source Python framework** for defining and running **repeatable AI workflows**. +Pipelex is an **open-source Python framework** for defining and running **repeatable AI methods**. Here's what we've learned: LLMs are powerful, but asking them to do everything in one prompt is like asking a brilliant colleague to solve ten problems while juggling. The more complexity you pack into a single prompt, the more reliability drops. You've seen it: the perfect prompt that works 90% of the time until it doesn't. The solution is straightforward: break complex tasks into focused steps. But without proper tooling, you end up with spaghetti code and prompts scattered across your codebase. -Pipelex introduces **knowledge pipelines**: a way to capture these workflow steps as **composable pipes**. Each pipe follows one rule: **knowledge in, knowledge out**. Unlike rigid templates, each pipe uses AI's full intelligence to handle variation while guaranteeing consistent output structure. You get **deterministic structure with adaptive intelligence**, the reliability of software with the flexibility of AI. +Pipelex introduces **knowledge pipelines**: a way to capture these method steps as **composable pipes**. Each pipe follows one rule: **knowledge in, knowledge out**. Unlike rigid templates, each pipe uses AI's full intelligence to handle variation while guaranteeing consistent output structure. You get **deterministic structure with adaptive intelligence**, the reliability of software with the flexibility of AI. ## Working with Knowledge and Using Concepts to Make Sense diff --git a/docs/home/3-understand-pipelex/viewpoint.md b/docs/home/3-understand-pipelex/viewpoint.md index 1690ef0fe..78aed111d 100644 --- a/docs/home/3-understand-pipelex/viewpoint.md +++ b/docs/home/3-understand-pipelex/viewpoint.md @@ -5,13 +5,13 @@ Web version: https://knowhowgraph.com/ --- # Viewpoint: The Know-How Graph -Declarative, Repeatable AI Workflows as Shared Infrastructure +Declarative, Repeatable AI Methods as Shared Infrastructure **TL;DR** Agents are great at solving new problems, terrible at doing the same thing twice. -We argue that repeatable AI workflows should complement agents: written in a declarative language that both humans and agents can understand, reuse, and compose. These workflows become tools that agents can build, invoke, and share to turn repeatable cognitive work into reliable infrastructure. +We argue that repeatable AI methods should complement agents: written in a declarative language that both humans and agents can understand, reuse, and compose. These methods become tools that agents can build, invoke, and share to turn repeatable cognitive work into reliable infrastructure. At scale, this forms a **Know-How Graph:** a network of reusable methods that become shared infrastructure. @@ -25,13 +25,13 @@ This is **the repeatability paradox**. Agents excel at understanding requirement ### We Need a Standard for Reusable Methods -The solution is to capture these methods as AI workflows so agents can reuse them. +The solution is to capture these methods as AI methods so agents can reuse them. -By "AI workflows" we mean the actual intellectual work that wasn't automatable before LLMs: extracting structured data from unstructured documents, applying complex analyses and business rules, generating reports with reasoning. **This isn’t about API plumbing or app connectors, it’s about the actual intellectual work.** +By "AI methods" we mean the actual intellectual work that wasn't automatable before LLMs: extracting structured data from unstructured documents, applying complex analyses and business rules, generating reports with reasoning. **This isn’t about API plumbing or app connectors, it’s about the actual intellectual work.** -Yet look at what's happening today: teams everywhere are hand-crafting the same workflows from scratch. To extract data points from contracts and RFPs, to process expense reports, to classify documents, to screen resumes: identical problems solved in isolation, burning engineering hours. +Yet look at what's happening today: teams everywhere are hand-crafting the same methods from scratch. To extract data points from contracts and RFPs, to process expense reports, to classify documents, to screen resumes: identical problems solved in isolation, burning engineering hours. -## AI workflows must be formalized +## AI methods must be formalized OpenAPI and MCP enable interoperability for software and agents. The remaining problem is formalizing the **methods that assemble the cognitive steps themselves:** extraction, analysis, synthesis, creativity, and decision-making, the part where understanding matters. These formalized methods must be: @@ -39,29 +39,29 @@ OpenAPI and MCP enable interoperability for software and agents. The remaining p - **Efficient:** use the right AI model for each step, large or small. - **Transparent:** no black boxes. Domain experts can audit the logic, spot issues, suggest improvements. -The workflow becomes a shared artifact that humans and AI collaborate on, optimize together, and trust to run at scale. +The method becomes a shared artifact that humans and AI collaborate on, optimize together, and trust to run at scale. ### Current solutions are inadequate -Engineers building AI workflows today are stuck with bad options. +Engineers building AI methods today are stuck with bad options. -Code frameworks like LangChain require **maintaining custom software for every workflow,** with business logic buried in implementation details and technical debt accumulating with each new use case. +Code frameworks like LangChain require **maintaining custom software for every method,** with business logic buried in implementation details and technical debt accumulating with each new use case. -Visual builders like Zapier, Make, or n8n excel at what they're designed for: connecting APIs and automating data flow between services. **But automation platforms are not cognitive workflow systems.** AI was bolted on as a feature after the fact. They weren't built for intellectual work. When you need actual understanding and multi-step reasoning, these tools quickly become unwieldy. +Visual builders like Zapier, Make, or n8n excel at what they're designed for: connecting APIs and automating data flow between services. **But automation platforms are not cognitive method systems.** AI was bolted on as a feature after the fact. They weren't built for intellectual work. When you need actual understanding and multi-step reasoning, these tools quickly become unwieldy. -None of these solutions speak the language of the domain expert. None of them were built for agents to understand, modify, or generate workflows from requirements. They express technical plumbing, not business logic. +None of these solutions speak the language of the domain expert. None of them were built for agents to understand, modify, or generate methods from requirements. They express technical plumbing, not business logic. At the opposite, agent SDKs and multi-agent frameworks give you flexibility but sacrifice the repeatability you need for production. **You want agents for exploration and problem-solving, but when you've found a solution that works, you need to lock it down.** -> We need a universal workflow language that expresses business logic, not technical plumbing. -This workflow language must run across platforms, models, and agent frameworks, where the method outlives any vendor or model version. +> We need a universal method language that expresses business logic, not technical plumbing. +This method language must run across platforms, models, and agent frameworks, where the method outlives any vendor or model version. > ## We Need a Declarative Language -AI workflows should be first-class citizens of our technical infrastructure: not buried in code or trapped in platforms, but expressed in a language built for the job. The method should be an artifact you can version, diff, test, and optimize. +AI methods should be first-class citizens of our technical infrastructure: not buried in code or trapped in platforms, but expressed in a language built for the job. The method should be an artifact you can version, diff, test, and optimize. -**We need a declarative language that states what you want, not how to compute it.** As SQL separated intent from implementation for data, we need the same for AI workflows — so we can build a Know-How Graph: a reusable graph of methods that agents and humans both understand. +**We need a declarative language that states what you want, not how to compute it.** As SQL separated intent from implementation for data, we need the same for AI methods — so we can build a Know-How Graph: a reusable graph of methods that agents and humans both understand. ### The language shouldn’t need documentation: it is the documentation @@ -71,22 +71,22 @@ Traditional programs are instructions a machine blindly executes. The machine do ### Language fosters collaboration: users and agents building together -The language must be readable by everyone who matters: domain experts who know the business logic, engineers who optimize and deploy it, and crucially, AI agents that can build and refine workflows autonomously. +The language must be readable by everyone who matters: domain experts who know the business logic, engineers who optimize and deploy it, and crucially, AI agents that can build and refine methods autonomously. -Imagine agents that transform natural language requirements into working workflows. They design each transformation step (or reuse existing ones), test against real or synthetic data, incorporate expert feedback, and iterate to improve quality while reducing costs. Once a workflow is built, agents can invoke it as a reliable tool whenever they need structured, predictable outputs. +Imagine agents that transform natural language requirements into working methods. They design each transformation step (or reuse existing ones), test against real or synthetic data, incorporate expert feedback, and iterate to improve quality while reducing costs. Once a method is built, agents can invoke it as a reliable tool whenever they need structured, predictable outputs. -> This is how agents finally remember know-how: by encoding methods into reusable workflows they can build, share, and execute on demand. +> This is how agents finally remember know-how: by encoding methods into reusable methods they can build, share, and execute on demand. > ## The Know-How Graph: a Network of Composable Methods -**Breaking complex work into smaller tasks is a recursive, core pattern.** Each workflow should stand on the shoulders of others, composing like LEGO bricks to build increasingly sophisticated cognitive systems. +**Breaking complex work into smaller tasks is a recursive, core pattern.** Each method should stand on the shoulders of others, composing like LEGO bricks to build increasingly sophisticated cognitive systems. What emerges is a **Know-How Graph**: not just static knowledge, but executable methods that connect and build upon one another. **Unlike a knowledge graph mapping facts, this maps procedures: the actual know-how of getting cognitive work done.** **Example:** -A recruitment workflow doesn't start from scratch. It composes existing workflows: +A recruitment method doesn't start from scratch. It composes existing methods: - ExtractCandidateProfile (experience, education, skills…) - ExtractJobOffer (skills, years of experience…). @@ -95,23 +95,23 @@ These feed into your custom ScoreCard logic to produce a MatchAnalysis, which tr Each component can be assigned to different team members and validated independently by the relevant stakeholders. -> Think of a workflow as a proven route through the work, and the Know-How Graph as the network of all such routes. +> Think of a method as a proven route through the work, and the Know-How Graph as the network of all such routes. > ### Know-how is as shareable as knowledge -Think about the explosion of prompt sharing since 2023. All those people trading their best ChatGPT prompts on Twitter, GitHub, Reddit, LinkedIn. Now imagine that same viral knowledge sharing, but with complete, tested, composable workflows instead of fragile prompts. +Think about the explosion of prompt sharing since 2023. All those people trading their best ChatGPT prompts on Twitter, GitHub, Reddit, LinkedIn. Now imagine that same viral knowledge sharing, but with complete, tested, composable methods instead of fragile prompts. -We’ve seen this movie: software package managers, SQL views, Docker, dbt packages. Composable standards create ecosystems where everyone’s work makes everyone else more productive. Generic workflows for common tasks will spread rapidly, while companies keep their differentiating workflows as competitive advantage. That's how we stop reinventing the wheel while preserving secret sauce. +We’ve seen this movie: software package managers, SQL views, Docker, dbt packages. Composable standards create ecosystems where everyone’s work makes everyone else more productive. Generic methods for common tasks will spread rapidly, while companies keep their differentiating methods as competitive advantage. That's how we stop reinventing the wheel while preserving secret sauce. -The same principle applies to AI workflows through the Know-How Graph: durable infrastructure that compounds value over time. +The same principle applies to AI methods through the Know-How Graph: durable infrastructure that compounds value over time. -> The Know-How Graph will thrive on the open web because workflows are just files: easy to publish, fork, improve, and compose. +> The Know-How Graph will thrive on the open web because methods are just files: easy to publish, fork, improve, and compose. > ### What this unlocks -- Faster time to production (reuse existing workflows + AI writes them for you) +- Faster time to production (reuse existing methods + AI writes them for you) - Lower run costs (optimize price / performance for each task) - Better collaboration between tech and business - Better auditability / compliance @@ -121,26 +121,26 @@ The same principle applies to AI workflows through the Know-How Graph: durable i [**Pipelex**](https://github.com/Pipelex/pipelex) is our take on this language: open-source (MIT), designed for the Know-How Graph. -Each workflow is built from pipes: modular transformations that guarantee their output structure while applying intelligence to the content. A pipe is a knowledge transformer with a simple contract: knowledge in → knowledge out., each defined conceptually and with explicit structure and validation. The method is readable and editable by humans and agents. +Each method is built from pipes: modular transformations that guarantee their output structure while applying intelligence to the content. A pipe is a knowledge transformer with a simple contract: knowledge in → knowledge out., each defined conceptually and with explicit structure and validation. The method is readable and editable by humans and agents. -Our Pipelex workflow builder is itself a Pipelex workflow. The tooling builds itself. +Our Pipelex method builder is itself a Pipelex method. The tooling builds itself. ## Why This Can Become a Standard -Pipelex is MIT-licensed and designed for portability. Workflows are files, based on TOML syntax (itself well standardized), and the outputs are validated JSON. +Pipelex is MIT-licensed and designed for portability. Methods are files, based on TOML syntax (itself well standardized), and the outputs are validated JSON. -Early adopters are contributing to the [cookbook repo](https://github.com/Pipelex/pipelex-cookbook/tree/feature/Chicago), building integrations, and running workflows in production. The pieces for ecosystem growth are in place: declarative spec, reference implementation, composable architecture. +Early adopters are contributing to the [cookbook repo](https://github.com/Pipelex/pipelex-cookbook/tree/feature/Chicago), building integrations, and running methods in production. The pieces for ecosystem growth are in place: declarative spec, reference implementation, composable architecture. Building a standard is hard. We're at v0.1.0, with versioning and backward compatibility coming next. The spec will evolve with your feedback. ## Join Us -The most valuable standards are boring infrastructure everyone relies on: SQL, HTTP, JSON. Pipelex aims to be that for AI workflows. +The most valuable standards are boring infrastructure everyone relies on: SQL, HTTP, JSON. Pipelex aims to be that for AI methods. -Start with one workflow: extract invoice data, process applications, analyze reports… Share what works. Build on what others share. +Start with one method: extract invoice data, process applications, analyze reports… Share what works. Build on what others share. -**The future of AI needs both:** smarter agents that explore and adapt, AND reliable workflows that execute proven methods at scale. One workflow at a time, let's build the cognitive infrastructure every organization needs. +**The future of AI needs both:** smarter agents that explore and adapt, AND reliable methods that execute proven methods at scale. One method at a time, let's build the cognitive infrastructure every organization needs. --- diff --git a/docs/home/4-cookbook-examples/extract-dpe.md b/docs/home/4-cookbook-examples/extract-dpe.md index 7df181d3e..edc91c142 100644 --- a/docs/home/4-cookbook-examples/extract-dpe.md +++ b/docs/home/4-cookbook-examples/extract-dpe.md @@ -52,7 +52,7 @@ class Dpe(StructuredContent): yearly_energy_costs: Optional[float] = None ``` -## The Pipeline Definition: `extract_dpe.plx` +## The Pipeline Definition: `extract_dpe.mthds` The pipeline uses a `PipeLLM` with a very specific prompt to extract the information from the document. The combination of the image and the OCR text allows the LLM to accurately capture all the details. diff --git a/docs/home/4-cookbook-examples/extract-gantt.md b/docs/home/4-cookbook-examples/extract-gantt.md index 156e8eeee..7ea9043f6 100644 --- a/docs/home/4-cookbook-examples/extract-gantt.md +++ b/docs/home/4-cookbook-examples/extract-gantt.md @@ -51,9 +51,9 @@ class GanttChart(StructuredContent): milestones: Optional[List[Milestone]] ``` -## The Pipeline Definition: `gantt.plx` +## The Pipeline Definition: `gantt.mthds` -The `extract_gantt_by_steps` pipeline is a sequence of smaller, focused pipes. This is a great example of building a complex workflow from simple, reusable components. +The `extract_gantt_by_steps` pipeline is a sequence of smaller, focused pipes. This is a great example of building a complex method from simple, reusable components. ```toml [pipe.extract_gantt_by_steps] @@ -92,7 +92,7 @@ Here is the name of the task you have to extract the dates for: @gantt_task_name """ ``` -This demonstrates the "divide and conquer" approach that Pipelex encourages. By breaking down a complex problem into smaller steps, each step can be handled by a specialized pipe, making the overall workflow more robust and easier to debug. +This demonstrates the "divide and conquer" approach that Pipelex encourages. By breaking down a complex problem into smaller steps, each step can be handled by a specialized pipe, making the overall method more robust and easier to debug. ## Flowchart diff --git a/docs/home/4-cookbook-examples/extract-generic.md b/docs/home/4-cookbook-examples/extract-generic.md index e0cf87b1e..519beacca 100644 --- a/docs/home/4-cookbook-examples/extract-generic.md +++ b/docs/home/4-cookbook-examples/extract-generic.md @@ -24,7 +24,7 @@ async def extract_generic(pdf_url: str) -> TextAndImagesContent: return markdown_and_images ``` -The `merge_markdown_and_images` function is a great example of how you can add your own Python code to a Pipelex workflow to perform custom processing. +The `merge_markdown_and_images` function is a great example of how you can add your own Python code to a Pipelex method to perform custom processing. ```python def merge_markdown_and_images(working_memory: WorkingMemory) -> TextAndImagesContent: diff --git a/docs/home/4-cookbook-examples/extract-proof-of-purchase.md b/docs/home/4-cookbook-examples/extract-proof-of-purchase.md index 4faed4ad7..48736f345 100644 --- a/docs/home/4-cookbook-examples/extract-proof-of-purchase.md +++ b/docs/home/4-cookbook-examples/extract-proof-of-purchase.md @@ -48,7 +48,7 @@ class ProofOfPurchase(StructuredContent): ``` This demonstrates how you can create nested data structures to accurately model your data. -## The Pipeline Definition: `extract_proof_of_purchase.plx` +## The Pipeline Definition: `extract_proof_of_purchase.mthds` The pipeline uses a powerful `PipeLLM` to extract the structured data from the document. The prompt is carefully engineered to guide the LLM. diff --git a/docs/home/4-cookbook-examples/extract-table.md b/docs/home/4-cookbook-examples/extract-table.md index 2f963daec..97e9a57a1 100644 --- a/docs/home/4-cookbook-examples/extract-table.md +++ b/docs/home/4-cookbook-examples/extract-table.md @@ -56,7 +56,7 @@ class HtmlTable(StructuredContent): return self ``` -## The Pipeline Definition: `table.plx` +## The Pipeline Definition: `table.mthds` The pipeline uses a two-step "extract and review" pattern. The first pipe does the initial extraction, and the second pipe reviews the generated HTML against the original image to correct any errors. This is a powerful pattern for increasing the reliability of LLM outputs. @@ -88,4 +88,4 @@ Rewrite the entire html table with your potential corrections. Make sure you do not forget any text. """ ``` -This self-correction pattern is a key technique for building robust and reliable AI workflows with Pipelex. \ No newline at end of file +This self-correction pattern is a key technique for building robust and reliable AI methods with Pipelex. \ No newline at end of file diff --git a/docs/home/4-cookbook-examples/hello-world.md b/docs/home/4-cookbook-examples/hello-world.md index b81e1c4aa..536521f59 100644 --- a/docs/home/4-cookbook-examples/hello-world.md +++ b/docs/home/4-cookbook-examples/hello-world.md @@ -44,7 +44,7 @@ asyncio.run(hello_world()) This example shows the minimal setup needed to run a Pipelex pipeline: initialize Pipelex, execute a pipeline by its code name, and pretty-print the results. -## The Pipeline Definition: `hello_world.plx` +## The Pipeline Definition: `hello_world.mthds` The pipeline definition is extremely simple - it's a single LLM call that generates a haiku: diff --git a/docs/home/4-cookbook-examples/index.md b/docs/home/4-cookbook-examples/index.md index b17436d70..79704d4d8 100644 --- a/docs/home/4-cookbook-examples/index.md +++ b/docs/home/4-cookbook-examples/index.md @@ -5,7 +5,7 @@ Welcome to the Pipelex Cookbook! [![GitHub](https://img.shields.io/badge/Cookbook-5a0dad?logo=github&logoColor=white&style=flat)](https://github.com/Pipelex/pipelex-cookbook/tree/feature/Chicago) -This is your go-to resource for practical examples and ready-to-use recipes to build powerful and reliable AI workflows with Pipelex. Whether you're a beginner looking to get started or an experienced user searching for advanced patterns, you'll find something useful here. +This is your go-to resource for practical examples and ready-to-use recipes to build powerful and reliable AI methods with Pipelex. Whether you're a beginner looking to get started or an experienced user searching for advanced patterns, you'll find something useful here. ## Philosophy @@ -34,7 +34,7 @@ Here are some of the examples you can find in the cookbook, organized by categor * [**Simple OCR**](./simple-ocr.md): A basic OCR pipeline to extract text from a PDF. * [**Generic Document Extraction**](./extract-generic.md): A powerful pipeline to extract text and images from complex documents. -* [**Invoice Extractor**](./invoice-extractor.md): A complete workflow for processing invoices, including reporting. +* [**Invoice Extractor**](./invoice-extractor.md): A complete method for processing invoices, including reporting. * [**Proof of Purchase Extraction**](./extract-proof-of-purchase.md): A targeted pipeline for extracting data from receipts. ### Graphical Extraction diff --git a/docs/home/4-cookbook-examples/invoice-extractor.md b/docs/home/4-cookbook-examples/invoice-extractor.md index 8dc82644c..186266061 100644 --- a/docs/home/4-cookbook-examples/invoice-extractor.md +++ b/docs/home/4-cookbook-examples/invoice-extractor.md @@ -9,7 +9,7 @@ This example provides a comprehensive pipeline for processing invoices. It takes ## The Pipeline Explained -The `process_invoice` pipeline is a complete workflow for invoice processing. +The `process_invoice` pipeline is a complete method for invoice processing. ```python async def process_invoice(pdf_url: str) -> ListContent[Invoice]: @@ -51,9 +51,9 @@ class Invoice(StructuredContent): # ... other fields ``` -## The Pipeline Definition: `invoice.plx` +## The Pipeline Definition: `invoice.mthds` -The entire workflow is defined in a PLX file. This declarative approach makes the pipeline easy to understand and modify. Here's a snippet from `invoice.plx`: +The entire method is defined in a MTHDS file. This declarative approach makes the pipeline easy to understand and modify. Here's a snippet from `invoice.mthds`: ```toml [pipe.process_invoice] @@ -89,7 +89,7 @@ The category of this invoice is: $invoice_details.category. """ ``` -This shows how a complex workflow, including text extraction with `PipeExtract` and LLM calls, can be defined in a simple, readable format. The `model = "$engineering-structured"` line is particularly powerful, as it tells the LLM to structure its output according to the `Invoice` model. +This shows how a complex method, including text extraction with `PipeExtract` and LLM calls, can be defined in a simple, readable format. The `model = "$engineering-structured"` line is particularly powerful, as it tells the LLM to structure its output according to the `Invoice` model. ## The Pipeline Flowchart diff --git a/docs/home/4-cookbook-examples/simple-ocr.md b/docs/home/4-cookbook-examples/simple-ocr.md index bccfa51cd..58f4633a7 100644 --- a/docs/home/4-cookbook-examples/simple-ocr.md +++ b/docs/home/4-cookbook-examples/simple-ocr.md @@ -2,7 +2,7 @@ This example demonstrates a basic OCR (Optical Character Recognition) pipeline. It takes a PDF file as input, extracts the text from each page, and saves the content. -This is a fundamental building block for many document processing workflows. +This is a fundamental building block for many document processing methods. ## Get the code diff --git a/docs/home/4-cookbook-examples/write-tweet.md b/docs/home/4-cookbook-examples/write-tweet.md index a3454a708..1825cd2c5 100644 --- a/docs/home/4-cookbook-examples/write-tweet.md +++ b/docs/home/4-cookbook-examples/write-tweet.md @@ -36,7 +36,7 @@ This example shows how to use multiple inputs to guide the generation process an ## The Data Structure: `OptimizedTweet` Model -The data model for this pipeline is very simple, as the final output is just a piece of text. However, the pipeline uses several concepts internally to manage the workflow, such as `DraftTweet`, `TweetAnalysis`, and `WritingStyle`. +The data model for this pipeline is very simple, as the final output is just a piece of text. However, the pipeline uses several concepts internally to manage the method, such as `DraftTweet`, `TweetAnalysis`, and `WritingStyle`. ```python class OptimizedTweet(TextContent): @@ -44,7 +44,7 @@ class OptimizedTweet(TextContent): pass ``` -## The Pipeline Definition: `tech_tweet.plx` +## The Pipeline Definition: `tech_tweet.mthds` This pipeline uses a two-step "analyze and optimize" sequence. The first pipe analyzes the draft tweet for common pitfalls, and the second pipe rewrites the tweet based on the analysis and a provided writing style. This is a powerful pattern for refining generated content. @@ -82,7 +82,7 @@ Evaluate the tweet for these key issues: @draft_tweet """ ``` -This "analyze and refine" pattern is a great way to build more reliable and sophisticated text generation workflows. The first step provides a structured critique, and the second step uses that critique to improve the final output. +This "analyze and refine" pattern is a great way to build more reliable and sophisticated text generation methods. The first step provides a structured critique, and the second step uses that critique to improve the final output. Here is the flowchart generated during this run: diff --git a/docs/home/5-setup/configure-ai-providers.md b/docs/home/5-setup/configure-ai-providers.md index 881648662..cb894e52f 100644 --- a/docs/home/5-setup/configure-ai-providers.md +++ b/docs/home/5-setup/configure-ai-providers.md @@ -173,10 +173,10 @@ Learn more in our [Inference Backend Configuration](../../home/7-configuration/c Now that you have your backend configured: 1. **Organize your project**: [Project Organization](./project-organization.md) -2. **Learn the concepts**: [Writing Workflows Tutorial](../../home/2-get-started/pipe-builder.md) +2. **Learn the concepts**: [Writing Methods Tutorial](../../home/2-get-started/pipe-builder.md) 3. **Explore examples**: [Cookbook Repository](https://github.com/Pipelex/pipelex-cookbook/tree/feature/Chicago) -4. **Deep dive**: [Build Reliable AI Workflows](../../home/6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md) +4. **Deep dive**: [Build Reliable AI Methods](../../home/6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md) !!! tip "Advanced Configuration" For detailed backend configuration options, see [Inference Backend Configuration](../../home/7-configuration/config-technical/inference-backend-config.md). diff --git a/docs/home/5-setup/index.md b/docs/home/5-setup/index.md index 61a3cc0b7..2051dd126 100644 --- a/docs/home/5-setup/index.md +++ b/docs/home/5-setup/index.md @@ -12,7 +12,7 @@ If you already have a project running and want to tune behavior, jump to [Config ## Quick guide - **Need to run pipelines with LLMs?** Start with [Configure AI Providers](./configure-ai-providers.md). -- **Need a recommended repo layout for `.plx` and Python code?** See [Project Organization](./project-organization.md). +- **Need a recommended repo layout for `.mthds` and Python code?** See [Project Organization](./project-organization.md). - **Need to understand telemetry and privacy trade-offs?** See [Telemetry](./telemetry.md). - **Ready to tune the knobs?** Go to [Configuration Overview](../7-configuration/index.md). diff --git a/docs/home/5-setup/project-organization.md b/docs/home/5-setup/project-organization.md index d62e3bd72..da08468d6 100644 --- a/docs/home/5-setup/project-organization.md +++ b/docs/home/5-setup/project-organization.md @@ -2,7 +2,7 @@ ## Overview -Pipelex automatically discovers `.plx` pipeline files anywhere in your project (excluding `.venv`, `.git`, `node_modules`, etc.). +Pipelex automatically discovers `.mthds` pipeline files anywhere in your project (excluding `.venv`, `.git`, `node_modules`, etc.). ## Recommended: Keep pipelines with related code @@ -11,11 +11,11 @@ your_project/ ├── my_project/ # Your Python package │ ├── finance/ │ │ ├── services.py -│ │ ├── invoices.plx # Pipeline with finance code +│ │ ├── invoices.mthds # Pipeline with finance code │ │ └── invoices_struct.py # Structure classes │ └── legal/ │ ├── services.py -│ ├── contracts.plx # Pipeline with legal code +│ ├── contracts.mthds # Pipeline with legal code │ └── contracts_struct.py ├── .pipelex/ # Config at repo root │ └── pipelex.toml @@ -28,8 +28,8 @@ your_project/ ```bash your_project/ ├── pipelines/ -│ ├── invoices.plx -│ ├── contracts.plx +│ ├── invoices.mthds +│ ├── contracts.mthds │ └── structures.py └── .pipelex/ └── pipelex.toml @@ -51,8 +51,8 @@ Learn more in our [Project Structure documentation](../../home/6-build-reliable- Now that you understand project organization: 1. **Start building**: [Get Started](../../home/2-get-started/pipe-builder.md) -2. **Learn the concepts**: [Writing Workflows Tutorial](../../home/2-get-started/pipe-builder.md) +2. **Learn the concepts**: [Writing Methods Tutorial](../../home/2-get-started/pipe-builder.md) 3. **Explore examples**: [Cookbook Repository](https://github.com/Pipelex/pipelex-cookbook/tree/feature/Chicago) -4. **Deep dive**: [Build Reliable AI Workflows](../../home/6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md) +4. **Deep dive**: [Build Reliable AI Methods](../../home/6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md) diff --git a/docs/home/6-build-reliable-ai-workflows/concepts/define_your_concepts.md b/docs/home/6-build-reliable-ai-workflows/concepts/define_your_concepts.md index f2f4270c9..c1fc9447f 100644 --- a/docs/home/6-build-reliable-ai-workflows/concepts/define_your_concepts.md +++ b/docs/home/6-build-reliable-ai-workflows/concepts/define_your_concepts.md @@ -1,6 +1,6 @@ # Defining Your Concepts -Concepts are the foundation of reliable AI workflows. They define what flows through your pipes—not just as data types, but as meaningful pieces of knowledge with clear boundaries and validation rules. +Concepts are the foundation of reliable AI methods. They define what flows through your pipes—not just as data types, but as meaningful pieces of knowledge with clear boundaries and validation rules. ## Writing Concept Definitions @@ -72,7 +72,7 @@ Those concepts will be Text-based by default. If you want to use structured outp Group concepts that naturally belong together in the same domain. A domain acts as a namespace for a set of related concepts and pipes, helping you organize and reuse your pipeline components. You can learn more about them in [Understanding Domains](../domain.md). ```toml -# finance.plx +# finance.mthds domain = "finance" description = "Financial document processing" @@ -86,7 +86,7 @@ LineItem = "An individual item or service listed in a financial document" ## Get Started with Inline Structures -To add structure to your concepts, the **recommended approach** is using **inline structures** directly in your `.plx` files. Inline structures support all field types including nested concepts: +To add structure to your concepts, the **recommended approach** is using **inline structures** directly in your `.mthds` files. Inline structures support all field types including nested concepts: ```toml [concept.Customer] diff --git a/docs/home/6-build-reliable-ai-workflows/concepts/inline-structures.md b/docs/home/6-build-reliable-ai-workflows/concepts/inline-structures.md index 06f0025a1..7d82053f8 100644 --- a/docs/home/6-build-reliable-ai-workflows/concepts/inline-structures.md +++ b/docs/home/6-build-reliable-ai-workflows/concepts/inline-structures.md @@ -1,6 +1,6 @@ # Inline Structure Definition -Define structured concepts directly in your `.plx` files using pipelex syntax. This is the **recommended approach** for most use cases, offering rapid development without Python boilerplate. +Define structured concepts directly in your `.mthds` files using pipelex syntax. This is the **recommended approach** for most use cases, offering rapid development without Python boilerplate. For an introduction to concepts themselves, see [Define Your Concepts](define_your_concepts.md). For advanced features requiring Python classes, see [Python StructuredContent Classes](python-classes.md). @@ -246,11 +246,11 @@ The `pipelex build structures` command generates Python classes from your inline ### Usage ```bash -# Generate from a directory of .plx files +# Generate from a directory of .mthds files pipelex build structures ./my_pipelines/ -# Generate from a specific .plx file -pipelex build structures ./my_pipeline/bundle.plx +# Generate from a specific .mthds file +pipelex build structures ./my_pipeline/bundle.mthds # Specify output directory pipelex build structures ./my_pipelines/ -o ./generated/ @@ -306,5 +306,5 @@ See [Python StructuredContent Classes](python-classes.md) for advanced features. - [Define Your Concepts](define_your_concepts.md) - Learn about concept semantics and naming - [Python StructuredContent Classes](python-classes.md) - Advanced features with Python -- [Writing Workflows Tutorial](../../2-get-started/pipe-builder.md) - Get started with structured outputs +- [Writing Methods Tutorial](../../2-get-started/pipe-builder.md) - Get started with structured outputs diff --git a/docs/home/6-build-reliable-ai-workflows/concepts/native-concepts.md b/docs/home/6-build-reliable-ai-workflows/concepts/native-concepts.md index 98515c181..9ba73cf3b 100644 --- a/docs/home/6-build-reliable-ai-workflows/concepts/native-concepts.md +++ b/docs/home/6-build-reliable-ai-workflows/concepts/native-concepts.md @@ -1,12 +1,12 @@ # Native Concepts -Pipelex includes several built-in native concepts that cover common data types in AI workflows. These concepts come with predefined structures and are automatically available in all pipelines—no setup required. +Pipelex includes several built-in native concepts that cover common data types in AI methods. These concepts come with predefined structures and are automatically available in all pipelines—no setup required. For an introduction to concepts, see [Define Your Concepts](define_your_concepts.md). ## What Are Native Concepts? -Native concepts are ready-to-use building blocks for AI workflows. They represent common data types you'll frequently work with: text, images, documents, numbers, and combinations thereof. +Native concepts are ready-to-use building blocks for AI methods. They represent common data types you'll frequently work with: text, images, documents, numbers, and combinations thereof. **Key characteristics:** @@ -133,7 +133,7 @@ class DynamicContent(StuffContent): pass ``` -**Use for:** Workflows where the content structure isn't known in advance. +**Use for:** Methods where the content structure isn't known in advance. ### JSONContent @@ -189,7 +189,7 @@ output = "Page" This extracts each page with both its text/images and a visual representation. -### In Complex Workflows +### In Complex Methods ```toml [pipe.create_report] @@ -223,7 +223,7 @@ Refine native concepts when: - ✅ You need semantic specificity (e.g., `Invoice` vs `Document`) - ✅ You want to add custom structure on top of the base structure -- ✅ Building domain-specific workflows +- ✅ Building domain-specific methods - ✅ Need type safety for specific document types ## Common Patterns @@ -286,5 +286,5 @@ Analyze this image: $image" - [Define Your Concepts](define_your_concepts.md) - Learn about concept semantics - [Inline Structures](inline-structures.md) - Add structure to refined concepts - [Python StructuredContent Classes](python-classes.md) - Advanced customization -- [Writing Workflows Tutorial](../../2-get-started/pipe-builder.md) - Use native concepts in pipelines +- [Writing Methods Tutorial](../../2-get-started/pipe-builder.md) - Use native concepts in pipelines diff --git a/docs/home/6-build-reliable-ai-workflows/concepts/python-classes.md b/docs/home/6-build-reliable-ai-workflows/concepts/python-classes.md index c2d46a837..dc19439c7 100644 --- a/docs/home/6-build-reliable-ai-workflows/concepts/python-classes.md +++ b/docs/home/6-build-reliable-ai-workflows/concepts/python-classes.md @@ -122,7 +122,7 @@ age = { type = "integer", description = "User's age", required = false } **Step 2: Generate the base class** ```bash -pipelex build structures ./my_pipeline.plx -o ./structures/ +pipelex build structures ./my_pipeline.mthds -o ./structures/ ``` **Step 3: Add custom validation** @@ -151,7 +151,7 @@ class UserProfile(StructuredContent): return v ``` -**Step 4: Update your .plx file** +**Step 4: Update your .mthds file** ```toml [concept] @@ -184,7 +184,7 @@ in_stock = { type = "boolean", description = "Stock availability", default_value **2. Generate the Python class:** ```bash -pipelex build structures ./ecommerce.plx -o ./structures/ +pipelex build structures ./ecommerce.mthds -o ./structures/ ``` **3. Add your custom logic** to the generated file: @@ -217,7 +217,7 @@ class Product(StructuredContent): return f"${self.price:.2f}" ``` -**4. Update your `.plx` file:** +**4. Update your `.mthds` file:** ```toml domain = "ecommerce" @@ -255,5 +255,5 @@ Product = "A product in the catalog" - [Inline Structures](inline-structures.md) - Fast prototyping with TOML - [Define Your Concepts](define_your_concepts.md) - Learn about concept semantics and naming -- [Writing Workflows Tutorial](../../2-get-started/pipe-builder.md) - Get started with structured outputs +- [Writing Methods Tutorial](../../2-get-started/pipe-builder.md) - Get started with structured outputs diff --git a/docs/home/6-build-reliable-ai-workflows/concepts/refining-concepts.md b/docs/home/6-build-reliable-ai-workflows/concepts/refining-concepts.md index 6412e8d1c..a35097158 100644 --- a/docs/home/6-build-reliable-ai-workflows/concepts/refining-concepts.md +++ b/docs/home/6-build-reliable-ai-workflows/concepts/refining-concepts.md @@ -1,6 +1,6 @@ # Refining Concepts -Concept refinement allows you to create more specific versions of existing concepts while inheriting their structure. This provides semantic clarity and type safety for domain-specific workflows. +Concept refinement allows you to create more specific versions of existing concepts while inheriting their structure. This provides semantic clarity and type safety for domain-specific methods. ## What is Concept Refinement? @@ -37,7 +37,7 @@ inputs = { contract = "Contract" } # Clear what type of document is expected output = "ContractTerms" ``` -### 3. Domain-Specific Workflows +### 3. Domain-Specific Methods Build pipelines tailored to specific use cases: @@ -287,7 +287,7 @@ refines = "Document" - ✅ Your concept is semantically a specific type of an existing concept - ✅ The base concept's structure is sufficient for your needs - ✅ You want to inherit existing validation and behavior -- ✅ You're building domain-specific workflows with clear document/content types +- ✅ You're building domain-specific methods with clear document/content types - ✅ You need to create specialized versions of an existing concept **Examples:** diff --git a/docs/home/6-build-reliable-ai-workflows/domain.md b/docs/home/6-build-reliable-ai-workflows/domain.md index 93b86d62c..6d79b0cd2 100644 --- a/docs/home/6-build-reliable-ai-workflows/domain.md +++ b/docs/home/6-build-reliable-ai-workflows/domain.md @@ -1,6 +1,6 @@ # Understanding Domains -A domain in Pipelex is a **semantic namespace** that organizes related concepts and pipes. It's declared at the top of every `.plx` file and serves as an identifier for grouping related functionality. +A domain in Pipelex is a **semantic namespace** that organizes related concepts and pipes. It's declared at the top of every `.mthds` file and serves as an identifier for grouping related functionality. ## What is a Domain? @@ -12,7 +12,7 @@ A domain is defined by three properties: ## Declaring a Domain -Every `.plx` file must declare its domain at the beginning: +Every `.mthds` file must declare its domain at the beginning: ```toml domain = "invoice_processing" @@ -68,14 +68,14 @@ This creates two concepts: The domain code prevents naming conflicts. Multiple bundles can define concepts with the same name if they're in different domains: ```toml -# finance.plx +# finance.mthds domain = "finance" [concept] Report = "A financial report" ``` ```toml -# marketing.plx +# marketing.mthds domain = "marketing" [concept] Report = "A marketing campaign report" @@ -85,17 +85,17 @@ Result: Two different concepts (`finance.Report` and `marketing.Report`) with no ### Multiple Bundles, Same Domain -Multiple `.plx` files can declare the same domain. They all contribute to that domain's namespace: +Multiple `.mthds` files can declare the same domain. They all contribute to that domain's namespace: ```toml -# finance_invoices.plx +# finance_invoices.mthds domain = "finance" [concept] Invoice = "..." ``` ```toml -# finance_payments.plx +# finance_payments.mthds domain = "finance" [concept] Payment = "..." @@ -171,6 +171,6 @@ Individual pipes can override the domain system prompt by defining their own `sy ## Related Documentation - [Pipelex Bundle Specification](./pipelex-bundle-specification.md) - How domains are declared in bundles -- [Kick off a Pipelex Workflow Project](./kick-off-a-pipelex-workflow-project.md) - Getting started +- [Kick off a Pipelex Method Project](./kick-off-a-pipelex-workflow-project.md) - Getting started - [Define Your Concepts](./concepts/define_your_concepts.md) - Creating concepts within domains - [Designing Pipelines](./pipes/index.md) - Building pipes within domains diff --git a/docs/home/6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md b/docs/home/6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md index f5e4d368b..2c0e8c32a 100644 --- a/docs/home/6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md +++ b/docs/home/6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md @@ -1,10 +1,10 @@ -# Kicking off a Pipelex Workflow Project +# Kicking off a Pipelex Method Project ## Creating Your First Pipeline -A pipeline in Pipelex is a collection of related concepts and pipes. Start by creating a PLX file in your project: +A pipeline in Pipelex is a collection of related concepts and pipes. Start by creating a MTHDS file in your project: -`tutorial.plx` +`tutorial.mthds` ```toml domain = "tutorial" description = "My first Pipelex library" @@ -48,20 +48,20 @@ See more about domains in [Understanding Domains](./domain.md) Consistent naming makes your pipeline code discoverable and maintainable: -### PLX Files -- Use lowercase with underscores: `legal_contracts.plx`, `customer_service.plx` -- Match the domain name when possible: domain "legal" → `legal.plx` -- For multi-word domains, use underscores: domain "customer_service" → `customer_service.plx` +### MTHDS Files +- Use lowercase with underscores: `legal_contracts.mthds`, `customer_service.mthds` +- Match the domain name when possible: domain "legal" → `legal.mthds` +- For multi-word domains, use underscores: domain "customer_service" → `customer_service.mthds` See more about pipelex bundle specification in [Pipelex Bundle Specification](./pipelex-bundle-specification.md) ### Python Model Files -- It is recommended to name structure files with a `_struct.py` suffix: `legal.plx` → `legal_struct.py` +- It is recommended to name structure files with a `_struct.py` suffix: `legal.mthds` → `legal_struct.py` - Pipelex will automatically discover and load structure classes from all Python files in your project (excluding common directories like `.venv`, `.git`, etc.) ## Project Structure -**Key principle:** Put `.plx` files where they belong in YOUR codebase. Pipelex automatically finds them. +**Key principle:** Put `.mthds` files where they belong in YOUR codebase. Pipelex automatically finds them. ### Recommended Patterns @@ -72,11 +72,11 @@ your-project/ │ ├── finance/ │ │ ├── models.py │ │ ├── services.py -│ │ ├── invoices.plx # Pipeline with finance code +│ │ ├── invoices.mthds # Pipeline with finance code │ │ └── invoices_struct.py # Structure classes │ └── legal/ │ ├── models.py -│ ├── contracts.plx # Pipeline with legal code +│ ├── contracts.mthds # Pipeline with legal code │ └── contracts_struct.py ├── .pipelex/ # Config at repo root │ ├── pipelex.toml @@ -89,9 +89,9 @@ your-project/ your-project/ ├── my_project/ │ ├── pipelines/ # All pipelines together -│ │ ├── finance.plx +│ │ ├── finance.mthds │ │ ├── finance_struct.py -│ │ ├── legal.plx +│ │ ├── legal.mthds │ │ └── legal_struct.py │ └── core/ │ └── (your code) @@ -102,7 +102,7 @@ your-project/ ``` your-project/ ├── my_project/ -│ ├── invoice_pipeline.plx +│ ├── invoice_pipeline.mthds │ ├── invoice_struct.py │ └── main.py └── .pipelex/ @@ -110,7 +110,7 @@ your-project/ ### Key Points -- **Flexible placement**: `.plx` files work anywhere in your project +- **Flexible placement**: `.mthds` files work anywhere in your project - **Automatic discovery**: Pipelex scans and finds them automatically - **Configuration location**: `.pipelex/` stays at repository root - **Naming convention**: Use `_struct.py` suffix for structure files diff --git a/docs/home/6-build-reliable-ai-workflows/libraries.md b/docs/home/6-build-reliable-ai-workflows/libraries.md index 87d980035..f072a0ce9 100644 --- a/docs/home/6-build-reliable-ai-workflows/libraries.md +++ b/docs/home/6-build-reliable-ai-workflows/libraries.md @@ -10,7 +10,7 @@ A Library is composed of three core components: - **ConceptLibrary**: Manages all concept definitions across domains - **PipeLibrary**: Manages all pipe definitions -These three components together form what we call a **Pipelex Bundle** (the content you define in `.plx` files). Learn more about bundle structure and syntax in the [Pipelex Bundle Specification](./pipelex-bundle-specification.md). +These three components together form what we call a **Pipelex Bundle** (the content you define in `.mthds` files). Learn more about bundle structure and syntax in the [Pipelex Bundle Specification](./pipelex-bundle-specification.md). ## Understanding Library Scope @@ -18,7 +18,7 @@ When you execute pipelines using `execute_pipeline` or `start_pipeline`, a libra - Contains the pipes and concepts available for execution - Provides isolation between different pipeline runs when using different library IDs -- Can be loaded from local directories or from PLX content strings +- Can be loaded from local directories or from MTHDS content strings ## Uniqueness Rules @@ -41,7 +41,7 @@ Libraries enforce specific uniqueness constraints to maintain consistency: Currently, all libraries are **local**, meaning they are loaded from: - Directories on your filesystem (using `library_dirs` parameter) -- PLX content strings (using `plx_content` parameter) +- MTHDS content strings (using `plx_content` parameter) - The current working directory (default behavior) ```python @@ -90,7 +90,7 @@ The library is populated based on the parameters you provide: **Option A: Loading from directories** ```python -# Loads all .plx files from specified directories +# Loads all .mthds files from specified directories pipe_output = await execute_pipeline( pipe_code="my_pipe", library_dirs=["./pipelines"], @@ -98,10 +98,10 @@ pipe_output = await execute_pipeline( ) ``` -**Option B: Loading from PLX content** +**Option B: Loading from MTHDS content** ```python -# Loads only the provided PLX content +# Loads only the provided MTHDS content plx_content = """ domain = "marketing" @@ -165,12 +165,12 @@ pipe_output = await execute_pipeline( ) ``` -### 2. Use PLX Content for Dynamic Pipelines +### 2. Use MTHDS Content for Dynamic Pipelines When generating or modifying pipelines dynamically, use `plx_content`: ```python -# Generate PLX content dynamically +# Generate MTHDS content dynamically plx_content = generate_custom_pipeline(user_requirements) pipe_output = await execute_pipeline( @@ -208,7 +208,7 @@ output2 = await execute_pipeline( ## Related Documentation - [Executing Pipelines](pipes/executing-pipelines.md) - Learn how to execute pipelines with different library configurations -- [Pipelex Bundle Specification](./pipelex-bundle-specification.md) - Understand the structure of PLX files +- [Pipelex Bundle Specification](./pipelex-bundle-specification.md) - Understand the structure of MTHDS files - [Domains](./domain.md) - Learn about organizing pipes into domains - [Concepts](./concepts/define_your_concepts.md) - Understand how concepts work within libraries diff --git a/docs/home/6-build-reliable-ai-workflows/pipe-builder.md b/docs/home/6-build-reliable-ai-workflows/pipe-builder.md index 364cbce44..65c9e1263 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipe-builder.md +++ b/docs/home/6-build-reliable-ai-workflows/pipe-builder.md @@ -3,7 +3,7 @@ Pipelex provides powerful tools to automatically generate complete, working pipelines from natural language requirements. This feature leverages AI to translate your ideas into fully functional pipeline code, dramatically speeding up development. !!! tip "Pipe Builder Requirements" - For now, the pipe builder requires access to **Claude 4.5 Sonnet**, either through Pipelex Inference, or using your own key through Anthropic, Amazon Bedrock or BlackboxAI. Don't hesitate to join our [Discord](https://go.pipelex.com/discord) to get a key or see [Configure AI Providers](../../home/5-setup/configure-ai-providers.md) for details. Otherwise, you can also create the workflows yourself, following our [documentation guide](./kick-off-a-pipelex-workflow-project.md). + For now, the pipe builder requires access to **Claude 4.5 Sonnet**, either through Pipelex Inference, or using your own key through Anthropic, Amazon Bedrock or BlackboxAI. Don't hesitate to join our [Discord](https://go.pipelex.com/discord) to get a key or see [Configure AI Providers](../../home/5-setup/configure-ai-providers.md) for details. Otherwise, you can also create the methods yourself, following our [documentation guide](./kick-off-a-pipelex-workflow-project.md). ## Overview @@ -23,7 +23,7 @@ This command runs a validation/fix loop to ensure the generated pipeline is corr By default, the build command creates a numbered directory with three files: -1. **`bundle.plx`** - Your complete pipeline definition with domain, concepts, and pipes +1. **`bundle.mthds`** - Your complete pipeline definition with domain, concepts, and pipes 2. **`inputs.json`** - A pre-filled template showing the inputs your pipeline expects 3. **`run_{pipe_code}.py`** - A ready-to-run Python script you can customize and execute @@ -39,7 +39,7 @@ pipelex build pipe "Take a photo as input, and render the opposite of the photo" pipelex build pipe "Take a photo as input, and render the opposite of the photo" \ -o photo_inverter -# Single file only: creates results/photo_inverter_01.plx +# Single file only: creates results/photo_inverter_01.mthds pipelex build pipe "Take a photo as input, and render the opposite of the photo" \ -o photo_inverter --no-extras @@ -52,7 +52,7 @@ pipelex build pipe "Take a photo as input, and render the opposite of the photo" - `-o, --output-name`: Base name for the generated file or directory (without extension) - `--output-dir`: Directory where files will be generated (default: `results`) -- `--no-extras`: Skip generating `inputs.json` and runner, only generate the `.plx` bundle +- `--no-extras`: Skip generating `inputs.json` and runner, only generate the `.mthds` bundle - `--no-output`: Build the pipeline but don't save any files ## Quick Start Example @@ -97,7 +97,7 @@ When you run a build command, Pipelex automatically creates: - **Domain definition**: The namespace for your pipeline - **Concepts**: Structured data types for inputs and outputs - **Pipes**: The processing steps and LLM operations -- **Python structures**: When structured output is needed (saved alongside the `.plx` file with `_struct.py` suffix) +- **Python structures**: When structured output is needed (saved alongside the `.mthds` file with `_struct.py` suffix) All generated pipelines follow Pipelex best practices and conventions automatically. @@ -105,10 +105,10 @@ All generated pipelines follow Pipelex best practices and conventions automatica After generating your pipeline: -1. **Review the generated `.plx` file** to understand the structure +1. **Review the generated `.mthds` file** to understand the structure 2. **Test the pipeline** using the generated example code 3. **Iterate if needed** by modifying the natural language description and regenerating -4. **Customize** the pipeline by editing the `.plx` file directly for fine-tuning +4. **Customize** the pipeline by editing the `.mthds` file directly for fine-tuning ## How It Works @@ -169,7 +169,7 @@ For each pipe signature, generates the complete specification: Finally, the builder: - Names the domain based on your brief - Assembles all concepts and pipes into a complete bundle -- Generates the `.plx` file with proper syntax +- Generates the `.mthds` file with proper syntax - Creates Python structure files (`*_struct.py`) when needed - Validates the pipeline and fixes deterministic issues @@ -177,9 +177,9 @@ Finally, the builder: Want to see how the Pipe Builder works internally? Check out the source code: -- **Main pipeline**: [`pipelex/builder/builder.plx`](https://github.com/pipelex/pipelex/tree/main/pipelex/builder/builder.plx) -- **Pipe design**: [`pipelex/builder/pipe/pipe_design.plx`](https://github.com/pipelex/pipelex/tree/main/pipelex/builder/pipe/pipe_design.plx) -- **Concept building**: [`pipelex/builder/concept/concept.plx`](https://github.com/pipelex/pipelex/tree/main/pipelex/builder/concept/concept.plx) +- **Main pipeline**: [`pipelex/builder/builder.mthds`](https://github.com/pipelex/pipelex/tree/main/pipelex/builder/builder.mthds) +- **Pipe design**: [`pipelex/builder/pipe/pipe_design.mthds`](https://github.com/pipelex/pipelex/tree/main/pipelex/builder/pipe/pipe_design.mthds) +- **Concept building**: [`pipelex/builder/concept/concept.mthds`](https://github.com/pipelex/pipelex/tree/main/pipelex/builder/concept/concept.mthds) The Pipe Builder is a great example of a complex, multi-stage Pipelex pipeline in action. diff --git a/docs/home/6-build-reliable-ai-workflows/pipelex-bundle-specification.md b/docs/home/6-build-reliable-ai-workflows/pipelex-bundle-specification.md index 695631852..66ee5643e 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipelex-bundle-specification.md +++ b/docs/home/6-build-reliable-ai-workflows/pipelex-bundle-specification.md @@ -1,10 +1,10 @@ # Pipelex Bundle Specification -A **Pipelex bundle** is the fundamental unit of organization in Pipelex. It's a single `.plx` file that defines a cohesive set of concepts and pipes for a specific domain of work. +A **Pipelex bundle** is the fundamental unit of organization in Pipelex. It's a single `.mthds` file that defines a cohesive set of concepts and pipes for a specific domain of work. ## What is a Pipelex Bundle? -A Pipelex bundle (`.plx` file) brings together: +A Pipelex bundle (`.mthds` file) brings together: - **Domain declaration** - The semantic namespace for all concepts and pipes in this bundle - **Concepts** - The knowledge structures that flow through your pipes (optional) @@ -12,9 +12,9 @@ A Pipelex bundle (`.plx` file) brings together: Think of a bundle as a self-contained module that solves a specific problem domain. For example, you might have: -- `invoice_processing.plx` - Bundle for invoice extraction and validation -- `marketing.plx` - Bundle for generating marketing content -- `document_analysis.plx` - Bundle for analyzing documents +- `invoice_processing.mthds` - Bundle for invoice extraction and validation +- `marketing.mthds` - Bundle for generating marketing content +- `document_analysis.mthds` - Bundle for analyzing documents ## Bundle Structure @@ -50,7 +50,7 @@ Every bundle **must** declare a domain. Only the `domain` field is mandatory; al ```toml domain = "invoice_processing" description = "Tools for extracting and validating invoice data" -source = "path/to/invoice_processing.plx" +source = "path/to/invoice_processing.mthds" system_prompt = "You are an expert in financial document processing." main_pipe = "extract_and_validate_invoice" ``` @@ -242,5 +242,5 @@ prompt = "..." - [Understanding Domains](./domain.md) - Deep dive into domain organization - [Designing Pipelines](./pipes/index.md) - Learn how to design and compose pipes - [Define Your Concepts](./concepts/define_your_concepts.md) - Complete guide to concept definitions -- [Kick off a Pipelex Workflow Project](./kick-off-a-pipelex-workflow-project.md) - Start a new project +- [Kick off a Pipelex Method Project](./kick-off-a-pipelex-workflow-project.md) - Start a new project diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/executing-pipelines.md b/docs/home/6-build-reliable-ai-workflows/pipes/executing-pipelines.md index 5bdeda873..ec1e07c96 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/executing-pipelines.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/executing-pipelines.md @@ -1,26 +1,26 @@ # Executing Pipelines -Once your pipes are defined in `.plx` files, you can execute them in multiple ways. +Once your pipes are defined in `.mthds` files, you can execute them in multiple ways. ## The Simplest Approach: Run a Bundle File -The easiest way to execute a pipeline is to point directly to your `.plx` bundle file. No library configuration needed. +The easiest way to execute a pipeline is to point directly to your `.mthds` bundle file. No library configuration needed. ### Using the CLI ```bash # Run the bundle's main_pipe -pipelex run path/to/my_bundle.plx +pipelex run path/to/my_bundle.mthds # Run a specific pipe from the bundle -pipelex run path/to/my_bundle.plx --pipe my_specific_pipe +pipelex run path/to/my_bundle.mthds --pipe my_specific_pipe # Run with inputs -pipelex run path/to/my_bundle.plx --inputs inputs.json +pipelex run path/to/my_bundle.mthds --inputs inputs.json ``` !!! tip "Preparing Inputs" - You can generate an input template with `pipelex build inputs path/to/my_bundle.plx`, which creates a `results/inputs.json` file with the required input structure. + You can generate an input template with `pipelex build inputs path/to/my_bundle.mthds`, which creates a `results/inputs.json` file with the required input structure. ### Using Python @@ -32,7 +32,7 @@ Pipelex.make() # Run the bundle's main_pipe pipe_output = await execute_pipeline( - bundle_uri="path/to/my_bundle.plx", + bundle_uri="path/to/my_bundle.mthds", inputs={ "my_input": { "concept": "Text", @@ -43,14 +43,14 @@ pipe_output = await execute_pipeline( # Or run a specific pipe from the bundle pipe_output = await execute_pipeline( - bundle_uri="path/to/my_bundle.plx", + bundle_uri="path/to/my_bundle.mthds", pipe_code="my_specific_pipe", inputs={...}, ) ``` !!! info "How `main_pipe` Works" - When you run a bundle without specifying a `pipe_code`, Pipelex executes the bundle's `main_pipe` (declared at the top of the `.plx` file). If no `main_pipe` is defined and no `pipe_code` is provided, an error is raised. + When you run a bundle without specifying a `pipe_code`, Pipelex executes the bundle's `main_pipe` (declared at the top of the `.mthds` file). If no `main_pipe` is defined and no `pipe_code` is provided, an error is raised. If you provide both `bundle_uri` and `pipe_code`, the explicit `pipe_code` takes priority over `main_pipe`. @@ -76,9 +76,9 @@ When using `execute_pipeline` or `start_pipeline`, you can control library behav - **`library_id`**: A unique identifier for the library instance. If not specified, it defaults to the `pipeline_run_id` (a unique ID generated for each pipeline execution). -- **`library_dirs`**: A list of directory paths to load pipe definitions from. **These directories must contain both your `.plx` files AND any Python files defining `StructuredContent` classes** (e.g., `*_struct.py` files). If not specified, Pipelex falls back to the `PIPELEXPATH` environment variable, then to the current working directory. +- **`library_dirs`**: A list of directory paths to load pipe definitions from. **These directories must contain both your `.mthds` files AND any Python files defining `StructuredContent` classes** (e.g., `*_struct.py` files). If not specified, Pipelex falls back to the `PIPELEXPATH` environment variable, then to the current working directory. -- **`plx_content`**: When provided, Pipelex will load only this PLX content into the library, bypassing directory scanning. This is useful for dynamic pipeline execution without file-based definitions. +- **`plx_content`**: When provided, Pipelex will load only this MTHDS content into the library, bypassing directory scanning. This is useful for dynamic pipeline execution without file-based definitions. !!! info "Python Structure Classes" If your concepts use Python `StructuredContent` classes instead of inline structures, those Python files must be in the directories specified by `library_dirs`. Pipelex auto-discovers and registers these classes during library loading. Learn more about [Python StructuredContent Classes](../concepts/python-classes.md). @@ -150,9 +150,9 @@ pipe_output = await execute_pipeline( !!! tip "Listing available pipes" Use the `pipelex show pipes` command to list all the pipes available in your project. -### Using PLX Content Directly +### Using MTHDS Content Directly -You can directly pass PLX content as a string to `execute_pipeline`, useful for dynamic pipeline execution without file-based definitions. +You can directly pass MTHDS content as a string to `execute_pipeline`, useful for dynamic pipeline execution without file-based definitions. ```python from pipelex.pipelex import Pipelex @@ -219,7 +219,7 @@ Pipelex.make() # Start the pipeline without waiting pipeline_run_id, task = await start_pipeline( - bundle_uri="path/to/my_bundle.plx", + bundle_uri="path/to/my_bundle.mthds", inputs={ "description": { "concept": "ProductDescription", diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/index.md b/docs/home/6-build-reliable-ai-workflows/pipes/index.md index 08e6d0a4e..ad466e927 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/index.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/index.md @@ -1,19 +1,19 @@ # Designing Pipelines -In Pipelex, a pipeline is not just a rigid sequence of steps; it's a dynamic and intelligent workflow built by composing individual, reusable components called **pipes**. This approach allows you to break down complex AI tasks into manageable, testable, and reliable units. +In Pipelex, a pipeline is not just a rigid sequence of steps; it's a dynamic and intelligent method built by composing individual, reusable components called **pipes**. This approach allows you to break down complex AI tasks into manageable, testable, and reliable units. This guide provides an overview of how to design your pipelines. ## The Building Blocks: Pipes -A pipeline is composed of pipes. There are two fundamental types of pipes you will use to build your workflows: +A pipeline is composed of pipes. There are two fundamental types of pipes you will use to build your methods: * **[Pipe Operators](./pipe-operators/index.md)**: These are the "workers" of your pipeline. They perform concrete actions like calling an LLM (`PipeLLM`), extracting text from a document (`PipeExtract`), or running a Python function (`PipeFunc`). Each operator is a specialized tool designed for a specific task. -* **[Pipe Controllers](./pipe-controllers/index.md)**: These are the "managers" of your pipeline. They don't perform tasks themselves but orchestrate the execution flow of other pipes. They define the logic of your workflow, such as running pipes in sequence (`PipeSequence`), in parallel (`PipeParallel`), or based on a condition (`PipeCondition`). +* **[Pipe Controllers](./pipe-controllers/index.md)**: These are the "managers" of your pipeline. They don't perform tasks themselves but orchestrate the execution flow of other pipes. They define the logic of your method, such as running pipes in sequence (`PipeSequence`), in parallel (`PipeParallel`), or based on a condition (`PipeCondition`). -## Designing a Pipeline: Composition in PLX +## Designing a Pipeline: Composition in MTHDS -The most common way to design a pipeline is by defining and composing pipes in a `.plx` configuration file. This provides a clear, declarative way to see the structure of your workflow. +The most common way to design a pipeline is by defining and composing pipes in a `.mthds` configuration file. This provides a clear, declarative way to see the structure of your method. Each pipe, whether it's an operator or a controller, is defined in its own `[pipe.]` table. The `` becomes the unique identifier for that pipe. @@ -36,13 +36,13 @@ Each pipe, whether it's an operator or a controller, is defined in its own `[pip ❌ [pipe.GENERATE_TAGLINE] # All caps not allowed ``` -Let's look at a simple example. Imagine we want a workflow that: +Let's look at a simple example. Imagine we want a method that: 1. Takes a product description. 2. Generates a short, catchy marketing tagline for it. We can achieve this with a `PipeLLM` operator. -`marketing_pipeline.plx` +`marketing_pipeline.mthds` ```toml domain = "marketing" description = "Marketing content generation domain" @@ -75,7 +75,7 @@ The output concept is very important. Indeed, the output of your pipe will be co ### Understanding the Pipe Contract -Every pipe defines a **contract** through its `inputs` and `output` fields. This contract is fundamental to how Pipelex ensures reliability in your workflows: +Every pipe defines a **contract** through its `inputs` and `output` fields. This contract is fundamental to how Pipelex ensures reliability in your methods: * **`inputs`**: This dictionary defines the **mandatory and necessary** data that must be present in the [Working Memory](working-memory.md) before the pipe can execute. Each key in the dictionary becomes a variable name that you can reference in your pipe's logic (e.g., in prompts), and each value specifies the concept type that the data must conform to. If any required input is missing or doesn't match the expected concept, the pipeline will fail a clear error message. You can specify multiple inputs by using a list of concepts. For example, `inputs = { description = "ProductDescription", keywords = "Keyword[]" }` will require a `ProductDescription` and a list of `Keyword`s. (See more about [Understanding Multiplicity](./understanding-multiplicity.md) for details.) @@ -83,12 +83,12 @@ You can specify multiple inputs by using a list of concepts. For example, `input * **`output`**: This field declares what the pipe will produce. The output will always be an instance of the specified concept. The structure and type of the output depend on the concept definition (See more about concepts [here](../concepts/native-concepts.md)). * You can specify **multiple outputs** using bracket notation (e.g., `Keyword[]` for a variable list, or `Image[3]` for exactly 3 images) -### Multi-Step Workflows +### Multi-Step Methods -To create a multi-step workflow, you use a controller. The `PipeSequence` controller is the most common one. It executes a series of pipes in a specific order. +To create a multi-step method, you use a controller. The `PipeSequence` controller is the most common one. It executes a series of pipes in a specific order. -`marketing_pipeline.plx` +`marketing_pipeline.mthds` ```toml domain = "marketing" description = "Marketing content generation domain" diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeBatch.md b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeBatch.md index 0d8496d5f..05c979cf9 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeBatch.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeBatch.md @@ -16,9 +16,9 @@ This is the ideal controller for processing collections of documents, images, or ## Configuration -`PipeBatch` is configured in your pipeline's `.plx` file. +`PipeBatch` is configured in your pipeline's `.mthds` file. -### PLX Parameters +### MTHDS Parameters | Parameter | Type | Description | Required | | ------------------ | ------------ | ------------------------------------------------------------------------------------------------------------------------------------------------ | -------- | diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeCondition.md b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeCondition.md index beb94532e..d459e9827 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeCondition.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeCondition.md @@ -13,9 +13,9 @@ The `PipeCondition` controller adds branching logic to your pipelines. It evalua ## Configuration -`PipeCondition` is configured in your pipeline's `.plx` file. +`PipeCondition` is configured in your pipeline's `.mthds` file. -### PLX Parameters +### MTHDS Parameters | Parameter | Type | Description | Required | | ------------------------------ | -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------ | diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeParallel.md b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeParallel.md index f3243188e..9c58bd5b3 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeParallel.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeParallel.md @@ -16,9 +16,9 @@ You must use `add_each_output`, `combined_output`, or both. ## Configuration -`PipeParallel` is configured in your pipeline's `.plx` file. +`PipeParallel` is configured in your pipeline's `.mthds` file. -### PLX Parameters +### MTHDS Parameters | Parameter | Type | Description | Required | | ----------------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------- | diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeSequence.md b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeSequence.md index 0ca243123..8249dd9d8 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeSequence.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeSequence.md @@ -1,6 +1,6 @@ # PipeSequence -The `PipeSequence` controller is used to execute a series of pipes one after another. It is the fundamental building block for creating linear workflows where the output of one step becomes the input for the next. +The `PipeSequence` controller is used to execute a series of pipes one after another. It is the fundamental building block for creating linear methods where the output of one step becomes the input for the next. ## How it works @@ -12,9 +12,9 @@ A `PipeSequence` defines a list of `steps`. Each step calls another pipe and giv ## Configuration -`PipeSequence` is configured in your pipeline's `.plx` file. +`PipeSequence` is configured in your pipeline's `.mthds` file. -### PLX Parameters +### MTHDS Parameters | Parameter | Type | Description | Required | | ---------- | --------------- | -------------------------------------------------------------------------------------------------------------- | -------- | diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/index.md b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/index.md index 48b56f70b..0cb7e229a 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/index.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-controllers/index.md @@ -1,13 +1,13 @@ # Pipe Controllers -Pipe controllers are the orchestrators of a Pipelex pipeline. While [Pipe Operators](../pipe-operators/index.md) perform the work, pipe controllers define the workflow and manage the execution logic. They allow you to run other pipes in sequence, in parallel, or conditionally. +Pipe controllers are the orchestrators of a Pipelex pipeline. While [Pipe Operators](../pipe-operators/index.md) perform the work, pipe controllers define the method and manage the execution logic. They allow you to run other pipes in sequence, in parallel, or conditionally. ## Core Controllers Here are the primary pipe controllers available in Pipelex: - [**`PipeSequence`**](./PipeSequence.md): The most fundamental controller. It runs a series of pipes one after another, passing the results from one step to the next. -- [**`PipeParallel`**](./PipeParallel.md): Executes multiple independent pipes at the same time, significantly speeding up workflows where tasks don't depend on each other. +- [**`PipeParallel`**](./PipeParallel.md): Executes multiple independent pipes at the same time, significantly speeding up methods where tasks don't depend on each other. - [**`PipeBatch`**](./PipeBatch.md): Performs a "map" operation. It takes a list of items and runs the same pipe on every single item in parallel. - [**`PipeCondition`**](./PipeCondition.md): Adds branching logic (`if/else`) to your pipeline. It evaluates an expression and chooses which pipe to run next based on the result. diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeExtract.md b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeExtract.md index 942d847ed..77e9cd8f2 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeExtract.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeExtract.md @@ -19,7 +19,7 @@ The `PageContent` object has the following structure: ## Configuration -`PipeExtract` is configured in your pipeline's `.plx` file. +`PipeExtract` is configured in your pipeline's `.mthds` file. ### OCR Models and Backend System @@ -37,7 +37,7 @@ Common OCR model handles: OCR presets are defined in your model deck configuration and can include parameters like `max_nb_images` and `image_min_size`. -### PLX Parameters +### MTHDS Parameters | Parameter | Type | Description | Required | | --------------------------- | ------- | ---------------------------------------------------------------------------------------------------------------------------------------- | -------- | diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeFunc.md b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeFunc.md index 355bb4401..2d72f6d74 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeFunc.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeFunc.md @@ -81,13 +81,13 @@ async def concatenate_texts(working_memory: WorkingMemory) -> TextContent: pass ``` -Then use `function_name = "custom_concat"` in your `.plx` file. +Then use `function_name = "custom_concat"` in your `.mthds` file. ## Configuration -Once the function is registered, you can use it in your `.plx` file. +Once the function is registered, you can use it in your `.mthds` file. -### PLX Parameters +### MTHDS Parameters | Parameter | Type | Description | Required | | --------------- | ------ | --------------------------------------------------------------------------- | -------- | @@ -98,7 +98,7 @@ Once the function is registered, you can use it in your `.plx` file. ### Example -This PLX snippet shows how to use the `concatenate_texts` function defined above. It assumes two previous pipes have produced outputs named `text_a` and `text_b`. +This MTHDS snippet shows how to use the `concatenate_texts` function defined above. It assumes two previous pipes have produced outputs named `text_a` and `text_b`. ```toml [pipe.combine_them] diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeImgGen.md b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeImgGen.md index db438bb6c..8c1e54b92 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeImgGen.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeImgGen.md @@ -10,7 +10,7 @@ The pipe can be configured to generate a single image or a list of images. ## Configuration -`PipeImgGen` is configured in your pipeline's `.plx` file. +`PipeImgGen` is configured in your pipeline's `.mthds` file. ### The `prompt` Field is Required @@ -55,7 +55,7 @@ Common image generation model handles: Image generation presets are defined in your model deck configuration and can include parameters like `quality`, `guidance_scale`, and `safety_tolerance`. -### PLX Parameters +### MTHDS Parameters | Parameter | Type | Description | Required | | ----------------------- | --------------- | ----------------------------------------------------------------------------------------------------------------------------- | -------- | diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeLLM.md b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeLLM.md index c558f9548..987d083b6 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeLLM.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeLLM.md @@ -204,9 +204,9 @@ Analyze the document and explain how it relates to the context: $reference_doc ## Configuration -`PipeLLM` is configured in your pipeline's `.plx` file. +`PipeLLM` is configured in your pipeline's `.mthds` file. -### PLX Parameters +### MTHDS Parameters | Parameter | Type | Description | Required | | --------------------------- | ------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------- | diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/index.md b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/index.md index 8a131c38a..9b1daeb01 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/index.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-operators/index.md @@ -2,7 +2,7 @@ Pipe operators are the fundamental building blocks in Pipelex, representing a single, focused task. They are the "verbs" of your pipeline that perform the actual work. -Each operator specializes in a specific kind of action, from interacting with Large Language Models to executing custom Python code. You combine these operators using [Pipe Controllers](../pipe-controllers/index.md) to create complex workflows. +Each operator specializes in a specific kind of action, from interacting with Large Language Models to executing custom Python code. You combine these operators using [Pipe Controllers](../pipe-controllers/index.md) to create complex methods. ## Core Operators diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-output.md b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-output.md index 8fc5ad331..a8857dde9 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/pipe-output.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/pipe-output.md @@ -51,7 +51,7 @@ invoice = pipe_output.main_stuff_as(content_type=Invoice) ### Option 2: Inline Structure -If the output concept was defined with [inline structures](../concepts/inline-structures.md) directly in the `.plx` file, the generated class is not importable. Use the `PipeOutput` accessor methods instead: +If the output concept was defined with [inline structures](../concepts/inline-structures.md) directly in the `.mthds` file, the generated class is not importable. Use the `PipeOutput` accessor methods instead: ```python pipe_output = await execute_pipeline( @@ -176,6 +176,6 @@ This allows you to access intermediate results from multi-step pipelines. See [W - [Working Memory](working-memory.md) - Understanding data flow between pipes - [Executing Pipelines](executing-pipelines.md) - How to run pipelines -- [Inline Structures](../concepts/inline-structures.md) - Defining structures in `.plx` files +- [Inline Structures](../concepts/inline-structures.md) - Defining structures in `.mthds` files - [Python StructuredContent Classes](../concepts/python-classes.md) - Defining structures in Python diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/provide-inputs.md b/docs/home/6-build-reliable-ai-workflows/pipes/provide-inputs.md index 9936664fb..8ca6c3b9e 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/provide-inputs.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/provide-inputs.md @@ -7,13 +7,13 @@ When running Pipelex pipelines, you need to provide input data that matches what The Pipelex CLI can generate a template JSON file with all the required inputs for your pipeline: ```bash -pipelex build inputs path/to/my_pipe.plx +pipelex build inputs path/to/my_pipe.mthds ``` This creates a `results/inputs.json` file with the structure needed for your pipeline. You can then fill in the values and use it with: ```bash -pipelex run path/to/my_pipe.plx --inputs results/inputs.json +pipelex run path/to/my_pipe.mthds --inputs results/inputs.json ``` See more about the options of the CLI [here](../../9-tools/cli/index.md). diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/understanding-multiplicity.md b/docs/home/6-build-reliable-ai-workflows/pipes/understanding-multiplicity.md index c813942a2..73cdb00c2 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/understanding-multiplicity.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/understanding-multiplicity.md @@ -1,6 +1,6 @@ # Understanding Multiplicity -Multiplicity in Pipelex defines how many items a particular stuff can comprise in a particular context. This applies to any of the pipe input variables and also to the output of the pipe. This idea is fundamental to building flexible AI workflows that can handle both single items and collections. +Multiplicity in Pipelex defines how many items a particular stuff can comprise in a particular context. This applies to any of the pipe input variables and also to the output of the pipe. This idea is fundamental to building flexible AI methods that can handle both single items and collections. This guide explains the philosophy behind multiplicity in Pipelex and how to use it effectively in your pipelines. @@ -23,7 +23,7 @@ Each of these definitions describes a single, coherent entity. The essence of wh ### Lists Are Circumstantial, Not Essential -The number of items you're working with is a circumstantial detail of your workflow, not part of the concept's identity: +The number of items you're working with is a circumstantial detail of your method, not part of the concept's identity: - A pipe that extracts keywords from text might find 3 keywords or 30—but each is still a `Keyword` - A pipe that generates product ideas might produce 5 ideas or 10—but each remains a `ProductIdea` @@ -355,7 +355,7 @@ Use variable input multiplicity when: - The pipe should handle batches of unknown size - You're aggregating or summarizing multiple items -- The workflow involves collecting items before processing +- The method involves collecting items before processing - You want maximum flexibility in how the pipe is called ### When to Use Fixed Input (Brackets with Number `[N]`) diff --git a/docs/home/7-configuration/config-technical/inference-backend-config.md b/docs/home/7-configuration/config-technical/inference-backend-config.md index eb5554566..9ac2f24f4 100644 --- a/docs/home/7-configuration/config-technical/inference-backend-config.md +++ b/docs/home/7-configuration/config-technical/inference-backend-config.md @@ -438,7 +438,7 @@ default-large-context-text = "gemini-2.5-flash" default-small = "gemini-2.5-flash-lite" ``` -When using aliases in `.plx` files or other configurations, prefix them with `@`: +When using aliases in `.mthds` files or other configurations, prefix them with `@`: ```toml model = "@best-claude" # References the best-claude alias @@ -468,7 +468,7 @@ vision-cheap = { model = "@default-small-vision", temperature = 0.5 } vision-diagram = { model = "@default-premium-vision", temperature = 0.3 } ``` -When using presets in `.plx` files, prefix them with `$`: +When using presets in `.mthds` files, prefix them with `$`: ```toml model = "$engineering-structured" # Uses preset for structured extraction @@ -486,7 +486,7 @@ Extract presets combine document extraction model selection with optimized param extract-testing = { model = "@default-extract-document", max_nb_images = 5, image_min_size = 50 } ``` -You can also use aliases directly in `.plx` files for document extraction: +You can also use aliases directly in `.mthds` files for document extraction: ```toml model = "@default-extract-document" # Uses default document extraction alias @@ -505,7 +505,7 @@ gen-image-fast = { model = "@default-small", quality = "low" } gen-image-high-quality = { model = "@default-premium", quality = "high" } ``` -When using image generation presets in `.plx` files, prefix them with `$`: +When using image generation presets in `.mthds` files, prefix them with `$`: ```toml model = "$gen-image" # Uses default image generation preset @@ -558,7 +558,7 @@ small-llm = ["gemini-2.5-flash-lite", "gpt-4o-mini", "claude-3-haiku"] document_extractor = ["azure-document-intelligence", "mistral-document-ai-2505"] ``` -When using waterfalls in `.plx` files, prefix them with `~`: +When using waterfalls in `.mthds` files, prefix them with `~`: ```toml model = "~premium-llm" # Will try claude-4.5-opus, then gemini-3.0-pro, then gpt-5.2 diff --git a/docs/home/7-configuration/config-technical/library-config.md b/docs/home/7-configuration/config-technical/library-config.md index 25ba0a450..38204d747 100644 --- a/docs/home/7-configuration/config-technical/library-config.md +++ b/docs/home/7-configuration/config-technical/library-config.md @@ -1,8 +1,8 @@ # Pipeline Discovery and Loading -When running pipelines, Pipelex needs to find your `.plx` bundle files. There are two approaches: +When running pipelines, Pipelex needs to find your `.mthds` bundle files. There are two approaches: -1. **Point to the bundle file directly** - The simplest option. Just pass the path to your `.plx` file. No configuration needed. +1. **Point to the bundle file directly** - The simplest option. Just pass the path to your `.mthds` file. No configuration needed. 2. **Configure library directories** - For larger projects. Pipelex scans directories to discover all bundles, letting you reference pipes by code. @@ -10,26 +10,26 @@ Most users should start with the first approach. ## The Simplest Way: Use the Bundle Path Directly -If you just want to run a pipe from a single `.plx` file, **you don't need any library configuration**. Simply point to your bundle file: +If you just want to run a pipe from a single `.mthds` file, **you don't need any library configuration**. Simply point to your bundle file: ```bash # CLI: run the bundle's main_pipe -pipelex run path/to/my_bundle.plx +pipelex run path/to/my_bundle.mthds # CLI: run a specific pipe from the bundle -pipelex run path/to/my_bundle.plx --pipe my_pipe +pipelex run path/to/my_bundle.mthds --pipe my_pipe ``` ```python # Python: run the bundle's main_pipe pipe_output = await execute_pipeline( - bundle_uri="path/to/my_bundle.plx", + bundle_uri="path/to/my_bundle.mthds", inputs={...}, ) # Python: run a specific pipe from the bundle pipe_output = await execute_pipeline( - bundle_uri="path/to/my_bundle.plx", + bundle_uri="path/to/my_bundle.mthds", pipe_code="my_pipe", inputs={...}, ) @@ -38,7 +38,7 @@ pipe_output = await execute_pipeline( This is the recommended approach for newcomers and simple projects. Pipelex reads the file directly - no discovery needed. !!! tip "When to use library directories" - The library directory configuration below is useful when you have **multiple bundles across different directories** and want to reference pipes by code without specifying the bundle path each time. For most use cases, pointing to the `.plx` file directly is simpler. + The library directory configuration below is useful when you have **multiple bundles across different directories** and want to reference pipes by code without specifying the bundle path each time. For most use cases, pointing to the `.mthds` file directly is simpler. --- @@ -46,7 +46,7 @@ This is the recommended approach for newcomers and simple projects. Pipelex read When you initialize Pipelex with `Pipelex.make()`, the system: -1. **Scans your project directory** for all `.plx` files +1. **Scans your project directory** for all `.mthds` files 2. **Discovers Python structure classes** that inherit from `StructuredContent` 3. **Loads pipeline definitions** including domains, concepts, and pipes 4. **Registers custom functions** decorated with `@pipe_func()` @@ -55,7 +55,7 @@ All of this happens automatically - no configuration needed. ## Configuring Library Directories -When executing pipelines, Pipelex needs to know where to find your `.plx` files and Python structure classes. You can configure this using a **3-tier priority system** that gives you flexibility from global defaults to per-execution overrides. +When executing pipelines, Pipelex needs to know where to find your `.mthds` files and Python structure classes. You can configure this using a **3-tier priority system** that gives you flexibility from global defaults to per-execution overrides. ### The 3-Tier Priority System @@ -119,7 +119,7 @@ pipelex run my_pipe -L /path/to/pipelines pipelex run my_pipe -L /path/to/shared_pipes -L /path/to/project_pipes # Combined with other options -pipelex run my_bundle.plx --inputs data.json -L /path/to/pipelines +pipelex run my_bundle.mthds --inputs data.json -L /path/to/pipelines # Available on multiple commands pipelex validate --all -L /path/to/pipelines/dir @@ -239,7 +239,7 @@ output = await execute_pipeline( 4. **Use empty list `[]` for isolated execution**: When you want to execute only from `plx_content` without loading any file-based definitions. -5. **Include structure class directories**: Remember that `library_dirs` must contain both `.plx` files AND Python files defining `StructuredContent` classes. +5. **Include structure class directories**: Remember that `library_dirs` must contain both `.mthds` files AND Python files defining `StructuredContent` classes. ## Excluded Directories @@ -255,11 +255,11 @@ To improve performance and avoid loading unnecessary files, Pipelex automaticall - `.env` - Environment files - `results` - Common output directory -Files in these directories will not be scanned, even if they contain `.plx` files or structure classes. +Files in these directories will not be scanned, even if they contain `.mthds` files or structure classes. ## Project Organization -**Golden rule:** Put `.plx` files where they make sense in YOUR project. Pipelex finds them automatically. +**Golden rule:** Put `.mthds` files where they make sense in YOUR project. Pipelex finds them automatically. ### Common Patterns @@ -273,11 +273,11 @@ your_project/ │ ├── finance/ │ │ ├── models.py │ │ ├── services.py -│ │ ├── invoices.plx # With finance code +│ │ ├── invoices.mthds # With finance code │ │ └── invoices_struct.py │ └── legal/ │ ├── models.py -│ ├── contracts.plx # With legal code +│ ├── contracts.mthds # With legal code │ └── contracts_struct.py ├── .pipelex/ └── requirements.txt @@ -297,9 +297,9 @@ Group all pipelines in one place: your_project/ ├── my_project/ │ ├── pipelines/ # All pipelines here -│ │ ├── finance.plx +│ │ ├── finance.mthds │ │ ├── finance_struct.py -│ │ ├── legal.plx +│ │ ├── legal.mthds │ │ └── legal_struct.py │ └── core/ └── .pipelex/ @@ -321,10 +321,10 @@ your_project/ ├── my_project/ │ ├── features/ │ │ ├── document_processing/ -│ │ │ ├── extract.plx +│ │ │ ├── extract.mthds │ │ │ └── extract_struct.py │ │ └── image_generation/ -│ │ ├── generate.plx +│ │ ├── generate.mthds │ │ └── generate_struct.py │ └── main.py └── .pipelex/ @@ -337,11 +337,11 @@ your_project/ ├── my_project/ │ ├── finance/ │ │ ├── pipelines/ -│ │ │ └── invoices.plx +│ │ │ └── invoices.mthds │ │ └── invoice_struct.py │ ├── legal/ │ │ ├── pipelines/ -│ │ │ └── contracts.plx +│ │ │ └── contracts.mthds │ │ └── contract_struct.py │ └── main.py └── .pipelex/ @@ -352,7 +352,7 @@ your_project/ ``` your_project/ ├── my_project/ -│ ├── invoice_processing.plx +│ ├── invoice_processing.mthds │ ├── invoice_struct.py │ └── main.py └── .pipelex/ @@ -364,14 +364,14 @@ Pipelex loads your pipelines in a specific order to ensure dependencies are reso ### 1. Domain Loading -- Loads domain definitions from all `.plx` files +- Loads domain definitions from all `.mthds` files - Each domain must be defined exactly once - Supports system prompts and structure templates per domain ### 2. Concept Loading - Loads native concepts (Text, Image, PDF, etc.) -- Loads custom concepts from `.plx` files +- Loads custom concepts from `.mthds` files - Validates concept definitions and relationships - Links concepts to Python structure classes by name @@ -383,7 +383,7 @@ Pipelex loads your pipelines in a specific order to ensure dependencies are reso ### 4. Pipe Loading -- Loads pipe definitions from `.plx` files +- Loads pipe definitions from `.mthds` files - Validates pipe configurations - Links pipes with their respective domains - Resolves input/output concept references @@ -441,9 +441,9 @@ pipelex show pipe YOUR_PIPE_CODE ### 1. Organization -- Keep related concepts and pipes in the same `.plx` file +- Keep related concepts and pipes in the same `.mthds` file - Use meaningful domain codes that reflect functionality -- Match Python file names with PLX file names (`finance.plx` → `finance.py`) +- Match Python file names with MTHDS file names (`finance.mthds` → `finance.py`) - Group complex pipelines using subdirectories ### 2. Structure Classes @@ -452,7 +452,7 @@ pipelex show pipe YOUR_PIPE_CODE - Name classes to match concept names exactly - Use `_struct.py` suffix for files containing structure classes (e.g., `finance_struct.py`) - Inherit from `StructuredContent` or its subclasses -- Place structure class files near their corresponding `.plx` files +- Place structure class files near their corresponding `.mthds` files - **Keep modules clean**: Avoid module-level code that executes on import (Pipelex imports modules during auto-discovery) ### 3. Custom Functions @@ -474,11 +474,11 @@ pipelex show pipe YOUR_PIPE_CODE ### Pipelines Not Found -**Problem:** Pipelex doesn't find your `.plx` files. +**Problem:** Pipelex doesn't find your `.mthds` files. **Solutions:** -1. Ensure files have the `.plx` extension +1. Ensure files have the `.mthds` extension 2. Check that files are not in excluded directories 3. Verify file permissions allow reading 4. Run `pipelex show pipes` to see what was discovered diff --git a/docs/home/9-tools/cli/build/inputs.md b/docs/home/9-tools/cli/build/inputs.md index f476d4b61..c80ccccfc 100644 --- a/docs/home/9-tools/cli/build/inputs.md +++ b/docs/home/9-tools/cli/build/inputs.md @@ -10,7 +10,7 @@ pipelex build inputs [OPTIONS] **Arguments:** -- `TARGET` - Either a pipe code or a bundle file path (`.plx`) - auto-detected +- `TARGET` - Either a pipe code or a bundle file path (`.mthds`) - auto-detected **Options:** @@ -23,13 +23,13 @@ pipelex build inputs [OPTIONS] **Generate inputs from a bundle (uses main_pipe):** ```bash -pipelex build inputs my_bundle.plx +pipelex build inputs my_bundle.mthds ``` **Specify which pipe to use from a bundle:** ```bash -pipelex build inputs my_bundle.plx --pipe my_pipe +pipelex build inputs my_bundle.mthds --pipe my_pipe ``` **Generate inputs for a pipe using a library directory:** @@ -41,7 +41,7 @@ pipelex build inputs my_domain.my_pipe -L ./my_library/ **Custom output path:** ```bash -pipelex build inputs my_bundle.plx --output custom_inputs.json +pipelex build inputs my_bundle.mthds --output custom_inputs.json ``` ## Output Format diff --git a/docs/home/9-tools/cli/build/output.md b/docs/home/9-tools/cli/build/output.md index 36a90a05f..4945db535 100644 --- a/docs/home/9-tools/cli/build/output.md +++ b/docs/home/9-tools/cli/build/output.md @@ -10,7 +10,7 @@ pipelex build output [OPTIONS] **Arguments:** -- `TARGET` - Either a pipe code or a bundle file path (`.plx`) - auto-detected +- `TARGET` - Either a pipe code or a bundle file path (`.mthds`) - auto-detected **Options:** @@ -27,19 +27,19 @@ pipelex build output [OPTIONS] **Generate output from a bundle (uses main_pipe):** ```bash -pipelex build output my_bundle.plx +pipelex build output my_bundle.mthds ``` **Generate JSON Schema for TypeScript/Zod integration:** ```bash -pipelex build output my_bundle.plx --format schema +pipelex build output my_bundle.mthds --format schema ``` **Specify which pipe to use from a bundle:** ```bash -pipelex build output my_bundle.plx --pipe my_pipe +pipelex build output my_bundle.mthds --pipe my_pipe ``` **Generate output for a pipe using a library directory:** @@ -51,7 +51,7 @@ pipelex build output my_domain.my_pipe -L ./my_library/ **Custom output path:** ```bash -pipelex build output my_bundle.plx --output expected_output.json +pipelex build output my_bundle.mthds --output expected_output.json ``` ## Output Formats diff --git a/docs/home/9-tools/cli/build/pipe.md b/docs/home/9-tools/cli/build/pipe.md index 3914d6382..9fc2679e6 100644 --- a/docs/home/9-tools/cli/build/pipe.md +++ b/docs/home/9-tools/cli/build/pipe.md @@ -6,7 +6,7 @@ !!! tip "Built with Pipelex" The Pipe Builder is itself a Pipelex pipeline! This showcases the power of Pipelex: a tool that builds pipelines... using a pipeline. -The Pipe Builder is an AI-powered tool that generates Pipelex pipelines from natural language descriptions. Describe what you want to achieve, and the builder translates your requirements into a working `.plx` file. +The Pipe Builder is an AI-powered tool that generates Pipelex pipelines from natural language descriptions. Describe what you want to achieve, and the builder translates your requirements into a working `.mthds` file. !!! info "Deep Dive" Want to understand how the Pipe Builder works under the hood? See [Pipe Builder Deep Dive](../../pipe-builder.md) for the full explanation of its multi-step generation process. @@ -26,7 +26,7 @@ pipelex build pipe [OPTIONS] - `--output-name`, `-o` - Base name for the generated file or directory (without extension) - `--output-dir` - Directory where files will be generated - `--no-output` - Skip saving the pipeline to file (useful for testing) -- `--no-extras` - Skip generating `inputs.json` and `runner.py`, only generate the PLX file +- `--no-extras` - Skip generating `inputs.json` and `runner.py`, only generate the MTHDS file - `--builder-pipe` - Builder pipe to use for generating the pipeline (default: `pipe_builder`) - `--graph` / `--no-graph` - Generate execution graphs for both build process and built pipeline - `--graph-full-data` / `--graph-no-data` - Include or exclude full serialized data in graphs (requires `--graph`) @@ -37,7 +37,7 @@ The resulting pipeline will be saved in a folder (e.g., `pipeline_01/`) containi | File | Description | |------|-------------| -| `bundle.plx` | The pipeline definition | +| `bundle.mthds` | The pipeline definition | | `inputs.json` | Template for pipeline inputs | | `run_{pipe_code}.py` | Python script to run the pipeline | | `structures/` | Generated Pydantic models for your concepts | @@ -45,7 +45,7 @@ The resulting pipeline will be saved in a folder (e.g., `pipeline_01/`) containi | `bundle_view.svg` | SVG visualization of the build process and plan | | `__init__.py` | Python package init file | -The HTML and SVG files provide a visual representation of the resulting workflow. +The HTML and SVG files provide a visual representation of the resulting method. ## Examples @@ -67,7 +67,7 @@ pipelex build pipe "Extract data from invoices" -o invoice_extractor pipelex build pipe "Analyze customer feedback" --output-dir ./pipelines/ ``` -**Generate only the PLX file (no extras):** +**Generate only the MTHDS file (no extras):** ```bash pipelex build pipe "Summarize documents" --no-extras @@ -87,7 +87,7 @@ pipelex build pipe "Take a CV in a PDF file and a Job offer text, and analyze if pipelex build pipe "Extract structured data from invoice images" ``` -**Multi-step Workflows:** +**Multi-step Methods:** ```bash pipelex build pipe "Given an RFP PDF, build a compliance matrix" @@ -111,12 +111,12 @@ The Pipe Builder is in active development and currently: After generating your pipeline: -1. **Validate it**: `pipelex validate your_pipe.plx` - See [Validate Commands](../validate.md) -2. **Run it**: `pipelex run your_pipe.plx` - See [Run Command](../run.md) -3. **Generate a runner**: `pipelex build runner your_pipe.plx` - See [Build Runner](runner.md) +1. **Validate it**: `pipelex validate your_pipe.mthds` - See [Validate Commands](../validate.md) +2. **Run it**: `pipelex run your_pipe.mthds` - See [Run Command](../run.md) +3. **Generate a runner**: `pipelex build runner your_pipe.mthds` - See [Build Runner](runner.md) 4. **Generate structures**: `pipelex build structures ./` - See [Build Structures](structures.md) -5. **Generate input template**: `pipelex build inputs your_pipe.plx` - See [Build Inputs](inputs.md) -6. **View output structure**: `pipelex build output your_pipe.plx` - See [Build Output](output.md) +5. **Generate input template**: `pipelex build inputs your_pipe.mthds` - See [Build Inputs](inputs.md) +6. **View output structure**: `pipelex build output your_pipe.mthds` - See [Build Output](output.md) ## Related Documentation diff --git a/docs/home/9-tools/cli/build/runner.md b/docs/home/9-tools/cli/build/runner.md index fcede599d..0b99d2c03 100644 --- a/docs/home/9-tools/cli/build/runner.md +++ b/docs/home/9-tools/cli/build/runner.md @@ -10,11 +10,11 @@ pipelex build runner [OPTIONS] **Arguments:** -- `TARGET` - Bundle file path (`.plx`) +- `TARGET` - Bundle file path (`.mthds`) **Options:** -- `--pipe` - Pipe code to use (optional if the `.plx` declares a `main_pipe`) +- `--pipe` - Pipe code to use (optional if the `.mthds` declares a `main_pipe`) - `--output`, `-o` - Path to save the generated Python file (defaults to target's directory) - `--library-dirs`, `-L` - Directories to search for pipe definitions. Can be specified multiple times. @@ -23,25 +23,25 @@ pipelex build runner [OPTIONS] **Generate runner from a bundle (uses main_pipe):** ```bash -pipelex build runner my_bundle.plx +pipelex build runner my_bundle.mthds ``` **Specify which pipe to use from a bundle:** ```bash -pipelex build runner my_bundle.plx --pipe my_pipe +pipelex build runner my_bundle.mthds --pipe my_pipe ``` **With additional library directories:** ```bash -pipelex build runner my_bundle.plx -L ./shared_pipes/ -L ./common/ +pipelex build runner my_bundle.mthds -L ./shared_pipes/ -L ./common/ ``` **Custom output path:** ```bash -pipelex build runner my_bundle.plx --output custom_runner.py +pipelex build runner my_bundle.mthds --output custom_runner.py ``` ## What Gets Generated diff --git a/docs/home/9-tools/cli/build/structures.md b/docs/home/9-tools/cli/build/structures.md index dcb6611e8..60551cc20 100644 --- a/docs/home/9-tools/cli/build/structures.md +++ b/docs/home/9-tools/cli/build/structures.md @@ -10,7 +10,7 @@ pipelex build structures [OPTIONS] **Arguments:** -- `TARGET` - Either a library directory containing `.plx` files, or a specific `.plx` file +- `TARGET` - Either a library directory containing `.mthds` files, or a specific `.mthds` file **Options:** @@ -27,7 +27,7 @@ pipelex build structures ./my_pipelines/ **Generate structures from a specific bundle file:** ```bash -pipelex build structures ./my_pipeline/bundle.plx +pipelex build structures ./my_pipeline/bundle.mthds ``` **Generate structures to a specific output directory:** @@ -55,7 +55,7 @@ Now you have your structures as Python code: ## Example Output -For a concept defined in a `.plx` file like: +For a concept defined in a `.mthds` file like: ```toml [concept.CandidateProfile] diff --git a/docs/home/9-tools/cli/index.md b/docs/home/9-tools/cli/index.md index 485ebed79..9112a69b1 100644 --- a/docs/home/9-tools/cli/index.md +++ b/docs/home/9-tools/cli/index.md @@ -23,8 +23,8 @@ The Pipelex CLI is organized into several command groups: 2. **Development Workflow** - - Write or generate pipelines in `.plx` files - - Validate with `pipelex validate your_pipe_code` or `pipelex validate your_bundle.plx` during development + - Write or generate pipelines in `.mthds` files + - Validate with `pipelex validate your_pipe_code` or `pipelex validate your_bundle.mthds` during development - Run `pipelex validate --all` before committing changes 3. **Running Pipelines** diff --git a/docs/home/9-tools/cli/run.md b/docs/home/9-tools/cli/run.md index 5e3ae8616..e15978c9b 100644 --- a/docs/home/9-tools/cli/run.md +++ b/docs/home/9-tools/cli/run.md @@ -8,11 +8,11 @@ Execute a pipeline with optional inputs and outputs. pipelex run [TARGET] [OPTIONS] ``` -Executes a pipeline, either from a standalone bundle (.plx) file or from your project's pipe library. +Executes a pipeline, either from a standalone bundle (.mthds) file or from your project's pipe library. **Arguments:** -- `TARGET` - Either a pipe code or a bundle file path, auto-detected according to presence of the .plx file extension +- `TARGET` - Either a pipe code or a bundle file path, auto-detected according to presence of the .mthds file extension **Options:** @@ -22,7 +22,7 @@ Executes a pipeline, either from a standalone bundle (.plx) file or from your pr - `--output`, `-o` - Path to save output JSON (defaults to `results/run_{pipe_code}.json`) - `--no-output` - Skip saving output to file - `--no-pretty-print` - Skip pretty printing the main output -- `--library-dir`, `-L` - Directory to search for pipe definitions (.plx files). Can be specified multiple times. +- `--library-dir`, `-L` - Directory to search for pipe definitions (.mthds files). Can be specified multiple times. **Examples:** @@ -34,10 +34,10 @@ pipelex run hello_world pipelex run write_weekly_report --inputs weekly_report_data.json # Run a bundle file (uses its main_pipe) -pipelex run my_bundle.plx +pipelex run my_bundle.mthds # Run a specific pipe from a bundle -pipelex run my_bundle.plx --pipe extract_invoice +pipelex run my_bundle.mthds --pipe extract_invoice # Run with explicit options pipelex run --pipe hello_world --output my_output.json diff --git a/docs/home/9-tools/cli/show.md b/docs/home/9-tools/cli/show.md index 35a5819f3..1b27f3c53 100644 --- a/docs/home/9-tools/cli/show.md +++ b/docs/home/9-tools/cli/show.md @@ -38,7 +38,7 @@ pipelex show pipes This includes: - Internal Pipelex pipes (like the pipe builder) -- Pipes from your project's `.plx` files +- Pipes from your project's `.mthds` files - Pipes that are part of imported packages ## Show Pipe Definition diff --git a/docs/home/9-tools/cli/validate.md b/docs/home/9-tools/cli/validate.md index f710657cd..993131533 100644 --- a/docs/home/9-tools/cli/validate.md +++ b/docs/home/9-tools/cli/validate.md @@ -65,33 +65,33 @@ pipelex validate my_pipe -L ./pipelines ## Validate Bundle ```bash -pipelex validate BUNDLE_FILE.plx -pipelex validate --bundle BUNDLE_FILE.plx +pipelex validate BUNDLE_FILE.mthds +pipelex validate --bundle BUNDLE_FILE.mthds ``` -Validates all pipes defined in a bundle file. The command automatically detects `.plx` files as bundles. +Validates all pipes defined in a bundle file. The command automatically detects `.mthds` files as bundles. **Arguments:** -- `BUNDLE_FILE.plx` - Path to the bundle file (auto-detected by `.plx` extension) +- `BUNDLE_FILE.mthds` - Path to the bundle file (auto-detected by `.mthds` extension) **Options:** -- `--bundle BUNDLE_FILE.plx` - Explicitly specify the bundle file path +- `--bundle BUNDLE_FILE.mthds` - Explicitly specify the bundle file path - `--library-dir`, `-L` - Directory to search for additional pipe definitions. Can be specified multiple times. **Examples:** ```bash # Validate a bundle (auto-detected) -pipelex validate my_pipeline.plx -pipelex validate pipelines/invoice_processor.plx +pipelex validate my_pipeline.mthds +pipelex validate pipelines/invoice_processor.mthds # Validate a bundle (explicit option) -pipelex validate --bundle my_pipeline.plx +pipelex validate --bundle my_pipeline.mthds # Validate a bundle with additional library directories -pipelex validate my_bundle.plx -L ./shared_pipes +pipelex validate my_bundle.mthds -L ./shared_pipes ``` !!! note @@ -100,22 +100,22 @@ pipelex validate my_bundle.plx -L ./shared_pipes ## Validate Specific Pipe in Bundle ```bash -pipelex validate --bundle BUNDLE_FILE.plx --pipe PIPE_CODE +pipelex validate --bundle BUNDLE_FILE.mthds --pipe PIPE_CODE ``` Validates all pipes in a bundle, while ensuring a specific pipe exists in that bundle. The entire bundle is validated, not just the specified pipe. **Options:** -- `--bundle BUNDLE_FILE.plx` - Path to the bundle file +- `--bundle BUNDLE_FILE.mthds` - Path to the bundle file - `--pipe PIPE_CODE` - Pipe code that must exist in the bundle **Examples:** ```bash # Validate bundle and ensure specific pipe exists in it -pipelex validate --bundle my_pipeline.plx --pipe extract_invoice -pipelex validate --bundle invoice_processor.plx --pipe validate_amounts +pipelex validate --bundle my_pipeline.mthds --pipe extract_invoice +pipelex validate --bundle invoice_processor.mthds --pipe validate_amounts ``` !!! important "Bundle Validation Behavior" @@ -125,7 +125,7 @@ pipelex validate --bundle invoice_processor.plx --pipe validate_amounts All validation commands check: -- Syntax correctness of `.plx` files +- Syntax correctness of `.mthds` files - Concept and pipe definitions are valid - Input/output connections are correct - All referenced pipes and concepts exist diff --git a/docs/home/9-tools/pipe-builder.md b/docs/home/9-tools/pipe-builder.md index e7ab37a5b..f4e41eded 100644 --- a/docs/home/9-tools/pipe-builder.md +++ b/docs/home/9-tools/pipe-builder.md @@ -3,7 +3,7 @@ !!! warning "Beta Feature" The Pipe Builder is currently in beta and progressing fast. Expect frequent improvements and changes. -The Pipe Builder is an AI-powered tool that generates complete Pipelex pipelines from natural language descriptions. Describe what you want to achieve, and the builder creates a production-ready `.plx` file with concepts, pipes, and all the necessary structure. +The Pipe Builder is an AI-powered tool that generates complete Pipelex pipelines from natural language descriptions. Describe what you want to achieve, and the builder creates a production-ready `.mthds` file with concepts, pipes, and all the necessary structure. ## What It Does @@ -13,7 +13,7 @@ The Pipe Builder takes a brief description like: And generates: -- **Domain concepts** - Data structures for your workflow (e.g., `CVAnalysis`, `InterviewQuestion`) +- **Domain concepts** - Data structures for your method (e.g., `CVAnalysis`, `InterviewQuestion`) - **Pipe operators** - LLM calls, extractions, image generation steps - **Pipe controllers** - Sequences, batches, parallel branches, conditions to orchestrate the flow - **A complete bundle** - Ready to validate and run @@ -89,7 +89,7 @@ Finally, everything is assembled into a complete Pipelex bundle: ## The Builder Pipeline -The Pipe Builder is defined in [`pipelex/builder/builder.plx`](https://github.com/Pipelex/pipelex/blob/main/pipelex/builder/builder.plx). The main orchestrator is a `PipeSequence` called `pipe_builder` that chains together: +The Pipe Builder is defined in [`pipelex/builder/builder.mthds`](https://github.com/Pipelex/pipelex/blob/main/pipelex/builder/builder.mthds). The main orchestrator is a `PipeSequence` called `pipe_builder` that chains together: ``` draft_the_plan → draft_the_concepts → structure_concepts → draft_flow → review_flow → design_pipe_signatures → write_bundle_header → detail_pipe_spec (batched) → assemble_pipelex_bundle_spec diff --git a/docs/index.md b/docs/index.md index 130ba76b0..de022e08f 100644 --- a/docs/index.md +++ b/docs/index.md @@ -6,9 +6,9 @@ title: "What is Pipelex?" # What is Pipelex? -Pipelex is an open-source language that enables agents to build and run **repeatable AI workflows**. Instead of cramming everything into one complex prompt, you break tasks into focused steps, each pipe handling one clear transformation. +Pipelex is an open-source language that enables agents to build and run **repeatable AI methods**. Instead of cramming everything into one complex prompt, you break tasks into focused steps, each pipe handling one clear transformation. -Each pipe processes information using **Concepts** (typing with meaning) to ensure your pipelines make sense. The Pipelex language (`.plx` files) is simple and human-readable, even for non-technical users. +Each pipe processes information using **Concepts** (typing with meaning) to ensure your pipelines make sense. The Pipelex language (`.mthds` files) is simple and human-readable, even for non-technical users. Each step can be structured and validated, so you benefit from the reliability of software, and the intelligence of AI. @@ -16,20 +16,20 @@ Each step can be structured and validated, so you benefit from the reliability o ## Key Features -### 🔄 Repeatable AI Workflows -Build workflows that produce consistent, reliable results every time they run. +### 🔄 Repeatable AI Methods +Build methods that produce consistent, reliable results every time they run. ### 🧩 Concept-Driven Design Use semantic typing (Concepts) to ensure each step of your pipeline makes sense and connects logically. ### 📝 Human-Readable Language -Write workflows in `.plx` files that are easy to read, edit, and maintain—even for non-developers. +Write methods in `.mthds` files that are easy to read, edit, and maintain—even for non-developers. ### 🤖 AI-Assisted Development -Generate and iterate on workflows using natural language with your favorite AI coding assistant. +Generate and iterate on methods using natural language with your favorite AI coding assistant. ### 🔧 Production-Ready -Validate, test, and deploy AI workflows with the same confidence as traditional software. +Validate, test, and deploy AI methods with the same confidence as traditional software. --- @@ -46,7 +46,7 @@ Pipelex solves these problems by: - **Breaking down complexity** into focused, manageable steps - **Ensuring consistency** through structured validation -- **Enabling iteration** with clear, editable workflows +- **Enabling iteration** with clear, editable methods - **Facilitating collaboration** with human-readable syntax --- diff --git a/docs/under-the-hood/architecture-overview.md b/docs/under-the-hood/architecture-overview.md index fa78e43df..7919d22bb 100644 --- a/docs/under-the-hood/architecture-overview.md +++ b/docs/under-the-hood/architecture-overview.md @@ -4,7 +4,7 @@ title: "Architecture Overview" # Architecture Overview -Pipelex is a Python framework for building and running **repeatable AI workflows** using a declarative language (`.plx` files). +Pipelex is a Python framework for building and running **repeatable AI methods** using a declarative language (`.mthds` files). --- @@ -51,7 +51,7 @@ Located in [`pipelex/core/`](https://github.com/Pipelex/pipelex/tree/main/pipele - **Concepts** - Semantic types with meaning (not just data types) - **Stuffs** - Knowledge objects combining a concept type with content - **Working Memory** - Runtime storage for data flowing through pipes -- **Bundles** - Complete pipeline definitions loaded from `.plx` files +- **Bundles** - Complete pipeline definitions loaded from `.mthds` files --- @@ -93,9 +93,9 @@ Each plugin translates Pipelex's unified interface into provider-specific API ca ```mermaid flowchart TB - subgraph PLX[".plx Pipeline Files"] + subgraph MTHDS[".mthds Pipeline Files"] direction LR - D1["Declarative workflow definitions"] + D1["Declarative method definitions"] end subgraph HL["HIGH-LEVEL: Business Logic"] @@ -145,7 +145,7 @@ flowchart TB A1["External Services"] end - PLX --> HL + MTHDS --> HL HL --> LL LL --> API ``` diff --git a/docs/under-the-hood/index.md b/docs/under-the-hood/index.md index 4c850076e..e3027ec11 100644 --- a/docs/under-the-hood/index.md +++ b/docs/under-the-hood/index.md @@ -19,7 +19,7 @@ Welcome to the technical deep-dives of Pipelex. This section is for contributors - **Module Deep-Dives** - Detailed explanations of specific subsystems !!! info "Not Required for Using Pipelex" - You don't need to read this section to use Pipelex effectively. The [Home](../index.md) section covers everything you need to build workflows. + You don't need to read this section to use Pipelex effectively. The [Home](../index.md) section covers everything you need to build methods. --- diff --git a/docs/under-the-hood/reasoning-controls.md b/docs/under-the-hood/reasoning-controls.md index 904970c02..f465f8dd8 100644 --- a/docs/under-the-hood/reasoning-controls.md +++ b/docs/under-the-hood/reasoning-controls.md @@ -33,7 +33,7 @@ deep-analysis = { model = "@default-premium", temperature = 0.1, reasoning_effor ``` ```toml -# In a .plx file +# In a .mthds file [pipe.analyze_contract] type = "PipeLLM" model = "$deep-analysis" @@ -148,7 +148,7 @@ config: theme: base --- flowchart TB - A["LLMSetting
(PLX talent or API)"] -->|make_llm_job_params| B["LLMJobParams
reasoning_effort / reasoning_budget"] + A["LLMSetting
(MTHDS talent or API)"] -->|make_llm_job_params| B["LLMJobParams
reasoning_effort / reasoning_budget"] B --> C{Provider Worker} C -->|OpenAI Completions| D["_resolve_reasoning_effort()
-> effort string"] diff --git a/pipelex/builder/CLAUDE.md b/pipelex/builder/CLAUDE.md index 211b99e5d..b7c115f95 100644 --- a/pipelex/builder/CLAUDE.md +++ b/pipelex/builder/CLAUDE.md @@ -1,11 +1,11 @@ # Builder -Transforms high-level specifications into valid, executable Pipelex pipeline bundles (`.plx` files). The builder is a spec-to-PLX compiler with built-in iterative repair. +Transforms high-level specifications into valid, executable Pipelex pipeline bundles (`.mthds` files). The builder is a spec-to-MTHDS compiler with built-in iterative repair. ## Core Flow ``` -PipelexBundleSpec → to_blueprint() → PipelexBundleBlueprint → PLX file +PipelexBundleSpec → to_blueprint() → PipelexBundleBlueprint → MTHDS file ↑ | | validate_bundle() | | @@ -21,7 +21,7 @@ builder.py # reconstruct_bundle_with_pipe_fixes() helper builder_loop.py # BuilderLoop — the main orchestration class builder_errors.py # Error types exceptions.py # Exception types -conventions.py # File naming defaults (bundle.plx, inputs.json) +conventions.py # File naming defaults (bundle.mthds, inputs.json) bundle_spec.py # PipelexBundleSpec — top-level spec model bundle_header_spec.py # Bundle header info runner_code.py # Code generation utilities @@ -91,9 +91,9 @@ The `build` command in `pipelex/cli/agent_cli/commands/build_cmd.py` calls `buil 1. Runs a "builder pipe" (itself a Pipelex pipeline) that generates a `PipelexBundleSpec` 2. Passes it to `BuilderLoop.build_and_fix()` -3. Converts the result to PLX via `PlxFactory.make_plx_content()` +3. Converts the result to MTHDS via `MthdsFactory.make_plx_content()` 4. Saves to `pipelex-wip/` with incremental naming ## Talent System -Talents are abstract capability labels mapped to concrete model presets. Each talent enum (in `talents/`) maps to a `$preset` code used in PLX files. When modifying talents, update both the enum and its preset mapping dict. +Talents are abstract capability labels mapped to concrete model presets. Each talent enum (in `talents/`) maps to a `$preset` code used in MTHDS files. When modifying talents, update both the enum and its preset mapping dict. diff --git a/pipelex/builder/agentic_builder.plx b/pipelex/builder/agentic_builder.mthds similarity index 100% rename from pipelex/builder/agentic_builder.plx rename to pipelex/builder/agentic_builder.mthds diff --git a/pipelex/builder/builder.plx b/pipelex/builder/builder.mthds similarity index 100% rename from pipelex/builder/builder.plx rename to pipelex/builder/builder.mthds diff --git a/pipelex/builder/builder_loop.py b/pipelex/builder/builder_loop.py index afd69a6d6..b7e5d2a3f 100644 --- a/pipelex/builder/builder_loop.py +++ b/pipelex/builder/builder_loop.py @@ -24,7 +24,7 @@ from pipelex.core.pipes.variable_multiplicity import format_concept_with_multiplicity, parse_concept_with_multiplicity from pipelex.graph.graphspec import GraphSpec from pipelex.hub import get_required_pipe -from pipelex.language.plx_factory import PlxFactory +from pipelex.language.mthds_factory import MthdsFactory from pipelex.pipe_controllers.condition.special_outcome import SpecialOutcome from pipelex.pipeline.execute import execute_pipeline from pipelex.pipeline.validate_bundle import ValidateBundleError, validate_bundle @@ -69,15 +69,15 @@ async def build_and_fix( if is_save_first_iteration_enabled: try: - plx_content = PlxFactory.make_plx_content(blueprint=pipelex_bundle_spec.to_blueprint()) + plx_content = MthdsFactory.make_mthds_content(blueprint=pipelex_bundle_spec.to_blueprint()) first_iteration_path = get_incremental_file_path( base_path=output_dir or "results/pipe-builder", base_name="generated_pipeline_1st_iteration", - extension="plx", + extension="mthds", ) save_text_to_path(text=plx_content, path=str(first_iteration_path), create_directory=True) except PipelexBundleSpecBlueprintError as exc: - log.warning(f"Could not save first iteration PLX: {exc}") + log.warning(f"Could not save first iteration MTHDS: {exc}") max_attempts = get_config().pipelex.builder_config.fix_loop_max_attempts for attempt in range(1, max_attempts + 1): @@ -693,15 +693,15 @@ def _fix_bundle_validation_error( # Save second iteration if we made any changes (pipes or concepts) if (fixed_pipes or added_concepts) and is_save_second_iteration_enabled: try: - plx_content = PlxFactory.make_plx_content(blueprint=pipelex_bundle_spec.to_blueprint()) + plx_content = MthdsFactory.make_mthds_content(blueprint=pipelex_bundle_spec.to_blueprint()) second_iteration_path = get_incremental_file_path( base_path=output_dir or "results/pipe-builder", base_name="generated_pipeline_2nd_iteration", - extension="plx", + extension="mthds", ) save_text_to_path(text=plx_content, path=str(second_iteration_path)) except PipelexBundleSpecBlueprintError as exc: - log.warning(f"Could not save second iteration PLX: {exc}") + log.warning(f"Could not save second iteration MTHDS: {exc}") return pipelex_bundle_spec diff --git a/pipelex/builder/concept/concept_fixer.plx b/pipelex/builder/concept/concept_fixer.mthds similarity index 100% rename from pipelex/builder/concept/concept_fixer.plx rename to pipelex/builder/concept/concept_fixer.mthds diff --git a/pipelex/builder/conventions.py b/pipelex/builder/conventions.py index 481150108..01ad111fd 100644 --- a/pipelex/builder/conventions.py +++ b/pipelex/builder/conventions.py @@ -4,5 +4,7 @@ and expected by the runner when auto-detecting from a directory. """ -DEFAULT_BUNDLE_FILE_NAME = "bundle.plx" +from pipelex.core.interpreter.helpers import MTHDS_EXTENSION + +DEFAULT_BUNDLE_FILE_NAME = f"bundle{MTHDS_EXTENSION}" DEFAULT_INPUTS_FILE_NAME = "inputs.json" diff --git a/pipelex/builder/pipe/pipe_design.plx b/pipelex/builder/pipe/pipe_design.mthds similarity index 100% rename from pipelex/builder/pipe/pipe_design.plx rename to pipelex/builder/pipe/pipe_design.mthds diff --git a/pipelex/builder/synthetic_inputs/synthesize_image.plx b/pipelex/builder/synthetic_inputs/synthesize_image.mthds similarity index 100% rename from pipelex/builder/synthetic_inputs/synthesize_image.plx rename to pipelex/builder/synthetic_inputs/synthesize_image.mthds diff --git a/pipelex/cli/_cli.py b/pipelex/cli/_cli.py index 0a12c394f..22954c482 100644 --- a/pipelex/cli/_cli.py +++ b/pipelex/cli/_cli.py @@ -143,12 +143,12 @@ def doctor_command( app.add_typer( - build_app, name="build", help="Generate AI workflows from natural language requirements: pipelines in .plx format and python code to run them" + build_app, name="build", help="Generate AI methods from natural language requirements: pipelines in .mthds format and python code to run them" ) app.command(name="validate", help="Validate pipes: static validation for syntax and dependencies, dry-run execution for logic and consistency")( validate_cmd ) -app.command(name="run", help="Run a pipe, optionally providing a specific bundle file (.plx)")(run_cmd) +app.command(name="run", help="Run a pipe, optionally providing a specific bundle file (.mthds)")(run_cmd) app.add_typer(graph_app, name="graph", help="Generate and render execution graphs") app.add_typer(show_app, name="show", help="Show configuration, pipes, and list AI models") app.command(name="which", help="Locate where a pipe is defined, similar to 'which' for executables")(which_cmd) diff --git a/pipelex/cli/agent_cli/CLAUDE.md b/pipelex/cli/agent_cli/CLAUDE.md index 9c579f367..f6e0b33ea 100644 --- a/pipelex/cli/agent_cli/CLAUDE.md +++ b/pipelex/cli/agent_cli/CLAUDE.md @@ -1,6 +1,6 @@ # Agent CLI (`pipelex-agent`) -Machine-first CLI for building, running, and validating Pipelex workflow bundles (`.plx` files). All output is structured JSON to stdout (success) or stderr (error). No Rich formatting, no interactive prompts. +Machine-first CLI for building, running, and validating Pipelex method bundles (`.mthds` files). All output is structured JSON to stdout (success) or stderr (error). No Rich formatting, no interactive prompts. ## Companion: Agent Skills @@ -28,7 +28,7 @@ commands/ inputs_cmd.py # inputs — generate example input JSON concept_cmd.py # concept — JSON spec → concept TOML pipe_cmd.py # pipe — JSON spec → pipe TOML - assemble_cmd.py # assemble — combine TOML parts into .plx + assemble_cmd.py # assemble — combine TOML parts into .mthds graph_cmd.py # graph — render execution graph HTML models_cmd.py # models — list presets, aliases, talent mappings doctor_cmd.py # doctor — config health check @@ -38,14 +38,14 @@ commands/ | Command | Does | |---------|------| -| `build` | Runs BuilderLoop to generate a `.plx` from a natural language prompt | +| `build` | Runs BuilderLoop to generate a `.mthds` from a natural language prompt | | `run` | Executes a pipeline, returns JSON with main_stuff + working_memory | | `validate` | Dry-runs pipes/bundles, returns validation status per pipe | | `inputs` | Generates example input JSON for a given pipe | | `concept` | Converts a JSON concept spec into TOML | | `pipe` | Converts a JSON pipe spec (typed) into TOML | -| `assemble` | Merges concept + pipe TOML sections into a complete `.plx` file | -| `graph` | Generates graph visualization (HTML) from a .plx bundle via dry-run | +| `assemble` | Merges concept + pipe TOML sections into a complete `.mthds` file | +| `graph` | Generates graph visualization (HTML) from a .mthds bundle via dry-run | | `models` | Lists available model presets, aliases, waterfalls, and talent mappings | | `doctor` | Checks config, credentials, models health | diff --git a/pipelex/cli/agent_cli/_agent_cli.py b/pipelex/cli/agent_cli/_agent_cli.py index 239275b84..7380cce41 100644 --- a/pipelex/cli/agent_cli/_agent_cli.py +++ b/pipelex/cli/agent_cli/_agent_cli.py @@ -105,7 +105,7 @@ def run_command( ] = None, bundle: Annotated[ str | None, - typer.Option("--bundle", help="Bundle file path (.plx)"), + typer.Option("--bundle", help="Bundle file path (.mthds)"), ] = None, inputs: Annotated[ str | None, @@ -125,7 +125,7 @@ def run_command( ] = False, library_dir: Annotated[ list[str] | None, - typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.plx files)"), + typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.mthds files)"), ] = None, ) -> None: """Execute a pipeline and output JSON results.""" @@ -153,7 +153,7 @@ def validate_command( ] = None, bundle: Annotated[ str | None, - typer.Option("--bundle", help="Bundle file path (.plx)"), + typer.Option("--bundle", help="Bundle file path (.mthds)"), ] = None, validate_all: Annotated[ bool, @@ -161,7 +161,7 @@ def validate_command( ] = False, library_dir: Annotated[ list[str] | None, - typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.plx files)"), + typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.mthds files)"), ] = None, ) -> None: """Validate a pipe, bundle, or all pipes and output JSON results.""" @@ -186,7 +186,7 @@ def inputs_command( ] = None, library_dir: Annotated[ list[str] | None, - typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.plx files)"), + typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.mthds files)"), ] = None, ) -> None: """Generate example input JSON for a pipe.""" @@ -231,7 +231,7 @@ def pipe_command( pipe_cmd(pipe_type=pipe_type, spec=spec, spec_file=spec_file) -@app.command(name="assemble", help="Assemble a complete .plx bundle from TOML parts") +@app.command(name="assemble", help="Assemble a complete .mthds bundle from TOML parts") def assemble_command( domain: Annotated[ str, @@ -243,7 +243,7 @@ def assemble_command( ], output: Annotated[ str, - typer.Option("--output", "-o", help="Output file path for the assembled bundle (.plx)"), + typer.Option("--output", "-o", help="Output file path for the assembled bundle (.mthds)"), ], description: Annotated[ str | None, @@ -262,7 +262,7 @@ def assemble_command( typer.Option("--pipes", "-p", help="TOML file(s) or inline TOML containing pipe definitions"), ] = None, ) -> None: - """Assemble a complete .plx bundle from individual TOML parts.""" + """Assemble a complete .mthds bundle from individual TOML parts.""" assemble_cmd( domain=domain, main_pipe=main_pipe, @@ -274,11 +274,11 @@ def assemble_command( ) -@app.command(name="graph", help="Generate graph visualization from a .plx bundle") +@app.command(name="graph", help="Generate graph visualization from a .mthds bundle") def graph_command( target: Annotated[ str, - typer.Argument(help="Path to a .plx bundle file"), + typer.Argument(help="Path to a .mthds bundle file"), ], graph_format: Annotated[ GraphFormat, @@ -286,10 +286,10 @@ def graph_command( ] = GraphFormat.REACTFLOW, library_dir: Annotated[ list[str] | None, - typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.plx files)"), + typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.mthds files)"), ] = None, ) -> None: - """Generate graph visualization from a .plx bundle.""" + """Generate graph visualization from a .mthds bundle.""" graph_cmd(target=target, graph_format=graph_format, library_dir=library_dir) diff --git a/pipelex/cli/agent_cli/commands/agent_output.py b/pipelex/cli/agent_cli/commands/agent_output.py index 8ddf5846e..1d17c2182 100644 --- a/pipelex/cli/agent_cli/commands/agent_output.py +++ b/pipelex/cli/agent_cli/commands/agent_output.py @@ -27,8 +27,8 @@ "ArgumentError": "Check command usage with 'pipelex-agent --help'", "JSONDecodeError": "Verify the JSON input is valid (check for trailing commas, unquoted keys, etc.)", # Interpreter errors - "PipelexInterpreterError": "Check PLX file TOML syntax and ensure all referenced concepts and pipes are defined", - "PLXDecodeError": "The PLX file has TOML syntax errors; validate TOML syntax before retrying", + "PipelexInterpreterError": "Check MTHDS file TOML syntax and ensure all referenced concepts and pipes are defined", + "PLXDecodeError": "The MTHDS file has TOML syntax errors; validate TOML syntax before retrying", # Configuration/initialization errors "TelemetryConfigValidationError": "Run 'pipelex init telemetry' to create a valid telemetry configuration", "GatewayTermsNotAcceptedError": "Run 'pipelex init config' to accept gateway terms, or disable pipelex_gateway in backends.toml", @@ -58,7 +58,7 @@ } AGENT_ERROR_DOMAINS: dict[str, str] = { - # input = agent can fix (bad .plx, wrong args, bad JSON) + # input = agent can fix (bad .mthds, wrong args, bad JSON) "ValidateBundleError": "input", "PipeValidationError": "input", "FileNotFoundError": "input", diff --git a/pipelex/cli/agent_cli/commands/assemble_cmd.py b/pipelex/cli/agent_cli/commands/assemble_cmd.py index da7c19257..8adc64297 100644 --- a/pipelex/cli/agent_cli/commands/assemble_cmd.py +++ b/pipelex/cli/agent_cli/commands/assemble_cmd.py @@ -69,7 +69,7 @@ def assemble_cmd( ], output: Annotated[ str, - typer.Option("--output", "-o", help="Output file path for the assembled bundle (.plx)"), + typer.Option("--output", "-o", help="Output file path for the assembled bundle (.mthds)"), ], description: Annotated[ str | None, @@ -88,7 +88,7 @@ def assemble_cmd( typer.Option("--pipes", "-p", help="TOML file(s) or inline TOML containing pipe definitions"), ] = None, ) -> None: - """Assemble a complete .plx bundle from individual TOML parts. + """Assemble a complete .mthds bundle from individual TOML parts. Combines domain configuration, concepts, and pipes into a single valid Pipelex bundle file. Each --concepts and --pipes argument can be either @@ -98,11 +98,11 @@ def assemble_cmd( Examples: pipelex-agent assemble --domain my_domain --main-pipe main - --concepts concepts.toml --pipes pipes.toml --output bundle.plx + --concepts concepts.toml --pipes pipes.toml --output bundle.mthds pipelex-agent assemble --domain my_domain --main-pipe main --concepts '[concept.MyInput]' --pipes '[pipe.main]' - --output bundle.plx + --output bundle.mthds """ try: # Create base document with domain header diff --git a/pipelex/cli/agent_cli/commands/build_cmd.py b/pipelex/cli/agent_cli/commands/build_cmd.py index 55ba93d56..c1aea35b7 100644 --- a/pipelex/cli/agent_cli/commands/build_cmd.py +++ b/pipelex/cli/agent_cli/commands/build_cmd.py @@ -40,7 +40,7 @@ def build_cmd( """Build a pipeline from a prompt and output JSON with paths. Outputs to pipelex-wip/ directory with incremental naming (pipeline_01, pipeline_02, etc.). - Generates PLX bundle only (no inputs.json or runner.py). + Generates MTHDS bundle only (no inputs.json or runner.py). Outputs JSON to stdout on success, JSON to stderr on error with exit code 1. """ diff --git a/pipelex/cli/agent_cli/commands/build_core.py b/pipelex/cli/agent_cli/commands/build_core.py index 0330ef4af..707e5b078 100644 --- a/pipelex/cli/agent_cli/commands/build_core.py +++ b/pipelex/cli/agent_cli/commands/build_core.py @@ -11,8 +11,9 @@ from pipelex.builder.conventions import DEFAULT_INPUTS_FILE_NAME from pipelex.builder.exceptions import PipelexBundleSpecBlueprintError from pipelex.config import get_config +from pipelex.core.interpreter.helpers import MTHDS_EXTENSION from pipelex.hub import get_required_pipe -from pipelex.language.plx_factory import PlxFactory +from pipelex.language.mthds_factory import MthdsFactory from pipelex.system.configuration.configs import PipelineExecutionConfig from pipelex.tools.misc.file_utils import ( ensure_directory_for_file_path, @@ -27,7 +28,7 @@ class BuildPipeResult(BaseModel): """Result of building a pipe, containing output paths and metadata.""" output_dir: Path - plx_file: Path + mthds_file: Path inputs_file: Path | None = None main_pipe_code: str domain: str @@ -44,7 +45,7 @@ def to_agent_json(self) -> dict[str, Any]: """ result: dict[str, Any] = { "output_dir": str(self.output_dir), - "plx_file": str(self.plx_file), + "mthds_file": str(self.mthds_file), "main_pipe_code": self.main_pipe_code, "domain": self.domain, } @@ -128,24 +129,24 @@ async def build_pipe_core( # Determine base output directory base_dir = output_dir or builder_config.default_output_dir - # Determine output path - always generate directory with bundle.plx + # Determine output path - always generate directory with bundle.mthds dir_name = output_name or builder_config.default_directory_base_name - bundle_file_name = Path(f"{builder_config.default_bundle_file_name}.plx") + bundle_file_name = Path(f"{builder_config.default_bundle_file_name}{MTHDS_EXTENSION}") extras_output_dir = get_incremental_directory_path( base_path=base_dir, base_name=dir_name, ) - plx_file_path = Path(extras_output_dir) / bundle_file_name + mthds_file_path = Path(extras_output_dir) / bundle_file_name - # Save the PLX file - ensure_directory_for_file_path(file_path=str(plx_file_path)) + # Save the MTHDS file + ensure_directory_for_file_path(file_path=str(mthds_file_path)) try: - plx_content = PlxFactory.make_plx_content(blueprint=pipelex_bundle_spec.to_blueprint()) + mthds_content = MthdsFactory.make_mthds_content(blueprint=pipelex_bundle_spec.to_blueprint()) except PipelexBundleSpecBlueprintError as exc: msg = f"Failed to convert bundle spec to blueprint: {exc}" raise BuildPipeError(message=msg) from exc - save_text_to_path(text=plx_content, path=str(plx_file_path)) + save_text_to_path(text=mthds_content, path=str(mthds_file_path)) main_pipe_code = pipelex_bundle_spec.main_pipe or "" domain = pipelex_bundle_spec.domain or "" @@ -170,7 +171,7 @@ async def build_pipe_core( return BuildPipeResult( output_dir=Path(extras_output_dir), - plx_file=plx_file_path, + mthds_file=mthds_file_path, inputs_file=inputs_file_path, main_pipe_code=main_pipe_code, domain=domain, diff --git a/pipelex/cli/agent_cli/commands/graph_cmd.py b/pipelex/cli/agent_cli/commands/graph_cmd.py index 32ce593a2..4eb342447 100644 --- a/pipelex/cli/agent_cli/commands/graph_cmd.py +++ b/pipelex/cli/agent_cli/commands/graph_cmd.py @@ -1,4 +1,4 @@ -"""Agent CLI graph command - generate graph HTML from a .plx bundle via dry-run.""" +"""Agent CLI graph command - generate graph HTML from a .mthds bundle via dry-run.""" import asyncio from pathlib import Path @@ -33,7 +33,7 @@ class GraphFormat(StrEnum): def graph_cmd( target: Annotated[ str, - typer.Argument(help="Path to a .plx bundle file"), + typer.Argument(help="Path to a .mthds bundle file"), ], graph_format: Annotated[ GraphFormat, @@ -41,10 +41,10 @@ def graph_cmd( ] = GraphFormat.REACTFLOW, library_dir: Annotated[ list[str] | None, - typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.plx files)"), + typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.mthds files)"), ] = None, ) -> None: - """Generate graph visualization from a .plx bundle. + """Generate graph visualization from a .mthds bundle. Performs a dry-run of the pipeline with mock inputs to produce the execution graph, then renders it as HTML. @@ -52,9 +52,9 @@ def graph_cmd( Outputs JSON to stdout on success, JSON to stderr on error with exit code 1. Examples: - pipelex-agent graph bundle.plx - pipelex-agent graph bundle.plx --format mermaidflow - pipelex-agent graph bundle.plx -L ./my_pipes/ + pipelex-agent graph bundle.mthds + pipelex-agent graph bundle.mthds --format mermaidflow + pipelex-agent graph bundle.mthds -L ./my_pipes/ """ input_path = Path(target) @@ -62,7 +62,7 @@ def graph_cmd( agent_error(f"File not found: {target}", "FileNotFoundError") if not is_pipelex_file(input_path): - agent_error(f"Expected a .plx bundle file, got: {input_path.name}", "ArgumentError") + agent_error(f"Expected a .mthds bundle file, got: {input_path.name}", "ArgumentError") # Read PLX content and extract main pipe try: diff --git a/pipelex/cli/agent_cli/commands/inputs_cmd.py b/pipelex/cli/agent_cli/commands/inputs_cmd.py index ea4cdc4b7..992846a57 100644 --- a/pipelex/cli/agent_cli/commands/inputs_cmd.py +++ b/pipelex/cli/agent_cli/commands/inputs_cmd.py @@ -33,7 +33,7 @@ async def _inputs_core( Args: pipe_code: The pipe code to generate inputs for. - bundle_path: Path to the bundle file (.plx). + bundle_path: Path to the bundle file (.mthds). library_dirs: List of library directories to search for pipe definitions. Returns: @@ -87,7 +87,7 @@ def inputs_cmd( ] = None, library_dir: Annotated[ list[str] | None, - typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.plx files)"), + typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.mthds files)"), ] = None, ) -> None: """Generate example input JSON for a pipe and output JSON results. @@ -96,8 +96,8 @@ def inputs_cmd( Examples: pipelex-agent inputs my_pipe - pipelex-agent inputs my_bundle.plx - pipelex-agent inputs my_bundle.plx --pipe my_pipe + pipelex-agent inputs my_bundle.mthds + pipelex-agent inputs my_bundle.mthds --pipe my_pipe pipelex-agent inputs my_pipe -L ./my_pipes """ # Validate that at least one target is provided @@ -112,7 +112,7 @@ def inputs_cmd( target_path = Path(target) if target_path.is_dir(): agent_error( - f"'{target}' is a directory. The inputs command requires a .plx file or a pipe code.", + f"'{target}' is a directory. The inputs command requires a .mthds file or a pipe code.", "ArgumentError", ) diff --git a/pipelex/cli/agent_cli/commands/run_cmd.py b/pipelex/cli/agent_cli/commands/run_cmd.py index 36f46147a..1abafd7e1 100644 --- a/pipelex/cli/agent_cli/commands/run_cmd.py +++ b/pipelex/cli/agent_cli/commands/run_cmd.py @@ -138,7 +138,7 @@ def run_cmd( ] = None, bundle: Annotated[ str | None, - typer.Option("--bundle", help="Bundle file path (.plx)"), + typer.Option("--bundle", help="Bundle file path (.mthds)"), ] = None, inputs: Annotated[ str | None, @@ -158,7 +158,7 @@ def run_cmd( ] = False, library_dir: Annotated[ list[str] | None, - typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.plx files)"), + typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.mthds files)"), ] = None, ) -> None: """Execute a pipeline and output JSON results. @@ -167,9 +167,9 @@ def run_cmd( Examples: pipelex-agent run my_pipe --inputs data.json - pipelex-agent run my_bundle.plx --pipe my_pipe + pipelex-agent run my_bundle.mthds --pipe my_pipe pipelex-agent run my_pipe --dry-run --mock-inputs - pipelex-agent run my_bundle.plx --graph + pipelex-agent run my_bundle.mthds --graph """ # Validate that at least one target is provided provided_options = sum([target is not None, pipe is not None, bundle is not None]) diff --git a/pipelex/cli/agent_cli/commands/validate_cmd.py b/pipelex/cli/agent_cli/commands/validate_cmd.py index 07064b3e4..120a76a51 100644 --- a/pipelex/cli/agent_cli/commands/validate_cmd.py +++ b/pipelex/cli/agent_cli/commands/validate_cmd.py @@ -170,7 +170,7 @@ def validate_cmd( ] = None, bundle: Annotated[ str | None, - typer.Option("--bundle", help="Bundle file path (.plx)"), + typer.Option("--bundle", help="Bundle file path (.mthds)"), ] = None, validate_all: Annotated[ bool, @@ -178,7 +178,7 @@ def validate_cmd( ] = False, library_dir: Annotated[ list[str] | None, - typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.plx files)"), + typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.mthds files)"), ] = None, ) -> None: """Validate a pipe, bundle, or all pipes and output JSON results. @@ -187,7 +187,7 @@ def validate_cmd( Examples: pipelex-agent validate my_pipe - pipelex-agent validate my_bundle.plx + pipelex-agent validate my_bundle.mthds pipelex-agent validate --all -L ./my_pipes """ library_dirs = [Path(lib_dir) for lib_dir in library_dir] if library_dir else None diff --git a/pipelex/cli/commands/build/app.py b/pipelex/cli/commands/build/app.py index a135cdaf0..8aa94dc88 100644 --- a/pipelex/cli/commands/build/app.py +++ b/pipelex/cli/commands/build/app.py @@ -13,4 +13,4 @@ build_app.command("output", help="Generate example output representation for a pipe (JSON, Python, or TypeScript)")(generate_output_cmd) build_app.command("pipe", help="Build a Pipelex bundle with one validation/fix loop correcting deterministic issues")(build_pipe_cmd) build_app.command("runner", help="Build the Python code to run a pipe with the necessary inputs")(prepare_runner_cmd) -build_app.command("structures", help="Generate Python structure files from concept definitions in PLX files")(build_structures_command) +build_app.command("structures", help="Generate Python structure files from concept definitions in MTHDS files")(build_structures_command) diff --git a/pipelex/cli/commands/build/inputs_cmd.py b/pipelex/cli/commands/build/inputs_cmd.py index af3d1bbc5..b23ed22d9 100644 --- a/pipelex/cli/commands/build/inputs_cmd.py +++ b/pipelex/cli/commands/build/inputs_cmd.py @@ -41,7 +41,7 @@ async def _generate_inputs_core( Args: pipe_code: The pipe code to generate inputs for. - bundle_path: Path to the bundle file (.plx). + bundle_path: Path to the bundle file (.mthds). output_path: Path to save the generated JSON file. """ if bundle_path: @@ -100,7 +100,7 @@ async def _generate_inputs_core( if output_path: final_output_path = output_path elif bundle_path: - # Place inputs.json in the same directory as the PLX file + # Place inputs.json in the same directory as the MTHDS file bundle_dir = bundle_path.parent final_output_path = bundle_dir / DEFAULT_INPUTS_FILE_NAME else: @@ -123,14 +123,14 @@ def generate_inputs_cmd( ] = None, pipe: Annotated[ str | None, - typer.Option("--pipe", help="Pipe code, can be omitted if you specify a bundle (.plx) that declares a main pipe"), + typer.Option("--pipe", help="Pipe code, can be omitted if you specify a bundle (.mthds) that declares a main pipe"), ] = None, library_dir: Annotated[ list[str] | None, typer.Option( "--library-dir", "-L", - help="Directory to search for pipe definitions (.plx files). Can be specified multiple times.", + help="Directory to search for pipe definitions (.mthds files). Can be specified multiple times.", ), ] = None, output_path: Annotated[ @@ -147,8 +147,8 @@ def generate_inputs_cmd( Examples: pipelex build inputs my_pipe - pipelex build inputs my_bundle.plx - pipelex build inputs my_bundle.plx --pipe my_pipe + pipelex build inputs my_bundle.mthds + pipelex build inputs my_bundle.mthds --pipe my_pipe pipelex build inputs my_pipe --output custom_inputs.json pipelex build inputs my_pipe -L ./my_pipes """ @@ -167,7 +167,7 @@ def generate_inputs_cmd( target_path = Path(target) if target_path.is_dir(): typer.secho( - f"Failed to run: '{target}' is a directory. The inputs command requires a .plx file or a pipe code.", + f"Failed to run: '{target}' is a directory. The inputs command requires a .mthds file or a pipe code.", fg=typer.colors.RED, err=True, ) diff --git a/pipelex/cli/commands/build/output_cmd.py b/pipelex/cli/commands/build/output_cmd.py index c54597ee2..d4c6abf98 100644 --- a/pipelex/cli/commands/build/output_cmd.py +++ b/pipelex/cli/commands/build/output_cmd.py @@ -42,7 +42,7 @@ async def _generate_output_core( Args: pipe_code: The pipe code to generate output for. - bundle_path: Path to the bundle file (.plx). + bundle_path: Path to the bundle file (.mthds). output_path: Path to save the generated file. output_format: The format to generate (JSON, PYTHON, or SCHEMA). """ @@ -102,7 +102,7 @@ async def _generate_output_core( if output_path: final_output_path = output_path elif bundle_path: - # Place output file in the same directory as the PLX file + # Place output file in the same directory as the MTHDS file bundle_dir = Path(bundle_path).parent match output_format: case ConceptRepresentationFormat.JSON: @@ -137,14 +137,14 @@ def generate_output_cmd( ] = None, pipe: Annotated[ str | None, - typer.Option("--pipe", help="Pipe code, can be omitted if you specify a bundle (.plx) that declares a main pipe"), + typer.Option("--pipe", help="Pipe code, can be omitted if you specify a bundle (.mthds) that declares a main pipe"), ] = None, library_dir: Annotated[ list[str] | None, typer.Option( "--library-dir", "-L", - help="Directory to search for pipe definitions (.plx files). Can be specified multiple times.", + help="Directory to search for pipe definitions (.mthds files). Can be specified multiple times.", ), ] = None, output_path: Annotated[ @@ -183,9 +183,9 @@ def generate_output_cmd( pipelex build output my_pipe --format schema - pipelex build output my_bundle.plx + pipelex build output my_bundle.mthds - pipelex build output my_bundle.plx --pipe my_pipe + pipelex build output my_bundle.mthds --pipe my_pipe pipelex build output my_pipe --output custom_output.json @@ -222,7 +222,7 @@ def generate_output_cmd( target_path = Path(target) if target_path.is_dir(): typer.secho( - f"Failed to run: '{target}' is a directory. The output command requires a .plx file or a pipe code.", + f"Failed to run: '{target}' is a directory. The output command requires a .mthds file or a pipe code.", fg=typer.colors.RED, err=True, ) diff --git a/pipelex/cli/commands/build/pipe_cmd.py b/pipelex/cli/commands/build/pipe_cmd.py index c0be5206a..9168dd7f4 100644 --- a/pipelex/cli/commands/build/pipe_cmd.py +++ b/pipelex/cli/commands/build/pipe_cmd.py @@ -22,11 +22,12 @@ handle_model_choice_error, ) from pipelex.config import get_config +from pipelex.core.interpreter.helpers import MTHDS_EXTENSION from pipelex.core.pipes.exceptions import PipeOperatorModelChoiceError from pipelex.core.pipes.variable_multiplicity import parse_concept_with_multiplicity from pipelex.graph.graph_factory import generate_graph_outputs, save_graph_outputs_to_dir from pipelex.hub import get_console, get_report_delegate, get_required_pipe, get_telemetry_manager -from pipelex.language.plx_factory import PlxFactory +from pipelex.language.mthds_factory import MthdsFactory from pipelex.pipe_operators.exceptions import PipeOperatorModelAvailabilityError from pipelex.pipe_run.pipe_run_mode import PipeRunMode from pipelex.pipelex import PACKAGE_VERSION, Pipelex @@ -99,7 +100,7 @@ def build_pipe_cmd( ] = False, no_extras: Annotated[ bool, - typer.Option("--no-extras", help="Skip generating inputs.json and runner.py, only generate the PLX file"), + typer.Option("--no-extras", help="Skip generating inputs.json and runner.py, only generate the MTHDS file"), ] = False, bundle_view: Annotated[ bool, @@ -168,41 +169,41 @@ async def run_pipeline(): base_dir = output_dir or builder_config.default_output_dir # Determine output path and whether to generate extras - bundle_file_name = Path(f"{builder_config.default_bundle_file_name}.plx") + bundle_file_name = Path(f"{builder_config.default_bundle_file_name}{MTHDS_EXTENSION}") if no_extras: - # Generate single file: {base_dir}/{name}_01.plx + # Generate single file: {base_dir}/{name}_01.mthds name = output_name or builder_config.default_bundle_file_name - plx_file_path = get_incremental_file_path( + mthds_file_path = get_incremental_file_path( base_path=base_dir, base_name=name, - extension="plx", + extension="mthds", ) extras_output_dir = "" # Not used in no_extras mode else: - # Generate directory with extras: {base_dir}/{name}_01/bundle.plx + extras + # Generate directory with extras: {base_dir}/{name}_01/bundle.mthds + extras dir_name = output_name or builder_config.default_directory_base_name extras_output_dir = get_incremental_directory_path( base_path=base_dir, base_name=dir_name, ) - plx_file_path = Path(extras_output_dir) / bundle_file_name + mthds_file_path = Path(extras_output_dir) / bundle_file_name - # Save the PLX file - ensure_directory_for_file_path(file_path=str(plx_file_path)) + # Save the MTHDS file + ensure_directory_for_file_path(file_path=str(mthds_file_path)) try: - plx_content = PlxFactory.make_plx_content(blueprint=pipelex_bundle_spec.to_blueprint()) + mthds_content = MthdsFactory.make_mthds_content(blueprint=pipelex_bundle_spec.to_blueprint()) except PipelexBundleSpecBlueprintError as exc: typer.secho(f"❌ Failed to convert bundle spec to blueprint: {exc}", fg=typer.colors.RED) raise typer.Exit(1) from exc - save_text_to_path(text=plx_content, path=str(plx_file_path)) - log.verbose(f"Pipelex bundle saved to: {plx_file_path}") + save_text_to_path(text=mthds_content, path=str(mthds_file_path)) + log.verbose(f"Pipelex bundle saved to: {mthds_file_path}") if no_extras: end_time = time.time() console = get_console() console.print(f"\n[green]✓[/green] [bold]Pipeline built successfully ({end_time - start_time:.1f}s)[/bold]") - console.print(f" Output: {plx_file_path}") + console.print(f" Output: {mthds_file_path}") return # Generate extras (inputs and runner) @@ -294,7 +295,7 @@ async def run_pipeline(): # pass empty library_dirs to avoid loading any libraries set at env var or instance level: # we don't want any other pipeline to interfere with the pipeline we just built built_pipe_output = await execute_pipeline( - plx_content=plx_content, + plx_content=mthds_content, pipe_run_mode=PipeRunMode.DRY, execution_config=built_pipe_execution_config, library_dirs=[], @@ -319,7 +320,7 @@ async def run_pipeline(): console = get_console() console.print(f"\n[green]✓[/green] [bold]Pipeline built successfully ({end_time - start_time:.1f}s)[/bold]") console.print(f" Output saved to [bold magenta]{extras_output_dir}[/bold magenta]:") - console.print(f" [green]✓[/green] bundle.plx → {domain_code} → main pipe [red]{main_pipe_code}[/red]") + console.print(f" [green]✓[/green] bundle.mthds → {domain_code} → main pipe [red]{main_pipe_code}[/red]") if saved_bundle_view_formats: console.print(f" [green]✓[/green] bundle_view: {', '.join(saved_bundle_view_formats)}") if saved_structure_names: diff --git a/pipelex/cli/commands/build/runner_cmd.py b/pipelex/cli/commands/build/runner_cmd.py index 7d52bb3ae..3537e7409 100644 --- a/pipelex/cli/commands/build/runner_cmd.py +++ b/pipelex/cli/commands/build/runner_cmd.py @@ -88,7 +88,7 @@ async def prepare_runner( if output_path: final_output_path = output_path else: - # Place runner in the same directory as the PLX file + # Place runner in the same directory as the MTHDS file bundle_dir = Path(bundle_path).parent final_output_path = bundle_dir / f"run_{pipe_code}.py" output_dir = Path(final_output_path).parent @@ -161,11 +161,11 @@ async def prepare_runner( def prepare_runner_cmd( target: Annotated[ str | None, - typer.Argument(help="Bundle file path (.plx)"), + typer.Argument(help="Bundle file path (.mthds)"), ] = None, pipe: Annotated[ str | None, - typer.Option("--pipe", help="Pipe code to use (optional if the .plx declares a main_pipe)"), + typer.Option("--pipe", help="Pipe code to use (optional if the .mthds declares a main_pipe)"), ] = None, output_path: Annotated[ str | None, @@ -173,7 +173,7 @@ def prepare_runner_cmd( ] = None, library_dirs: Annotated[ list[str] | None, - typer.Option("--library-dirs", "-L", help="Directories to search for pipe definitions (.plx files). Can be specified multiple times."), + typer.Option("--library-dirs", "-L", help="Directories to search for pipe definitions (.mthds files). Can be specified multiple times."), ] = None, ) -> None: """Prepare a Python runner file for a pipe. @@ -186,9 +186,9 @@ def prepare_runner_cmd( Custom concept types will have their structure recursively generated. Examples: - pipelex build runner my_bundle.plx - pipelex build runner my_bundle.plx --pipe my_pipe - pipelex build runner my_bundle.plx --output runner.py + pipelex build runner my_bundle.mthds + pipelex build runner my_bundle.mthds --pipe my_pipe + pipelex build runner my_bundle.mthds --output runner.py """ # Show help if no target provided if target is None: @@ -201,10 +201,10 @@ def prepare_runner_cmd( output_path_path = Path(output_path) if output_path else None library_dirs_paths = [Path(lib_dir) for lib_dir in library_dirs] if library_dirs else None - # Validate: target must be a .plx file + # Validate: target must be a .mthds file if not is_pipelex_file(target_path): typer.secho( - f"Failed to run: '{target}' is not a .plx file.", + f"Failed to run: '{target}' is not a .mthds file.", fg=typer.colors.RED, err=True, ) diff --git a/pipelex/cli/commands/build/structures_cmd.py b/pipelex/cli/commands/build/structures_cmd.py index 46692cc83..77ec06f55 100644 --- a/pipelex/cli/commands/build/structures_cmd.py +++ b/pipelex/cli/commands/build/structures_cmd.py @@ -293,7 +293,7 @@ def generate_structures_from_blueprints( def build_structures_command( target: Annotated[ str, - typer.Argument(help="Target directory to scan for .plx files, or a specific .plx file"), + typer.Argument(help="Target directory to scan for .mthds files, or a specific .mthds file"), ], output_dir: Annotated[ str | None, @@ -304,7 +304,7 @@ def build_structures_command( typer.Option( "--library-dir", "-L", - help="Directory to search for pipe definitions (.plx files). Can be specified multiple times.", + help="Directory to search for pipe definitions (.mthds files). Can be specified multiple times.", ), ] = None, force: Annotated[ @@ -316,14 +316,14 @@ def build_structures_command( ), ] = False, ) -> None: - """Generate Python structure classes from concept definitions in .plx files. + """Generate Python structure classes from concept definitions in .mthds files. Examples: - pipelex build structures my_bundle.plx + pipelex build structures my_bundle.mthds pipelex build structures ./my_pipes/ - pipelex build structures my_bundle.plx -o ./generated/ - pipelex build structures my_bundle.plx -L ./shared_pipes/ - pipelex build structures my_bundle.plx --force + pipelex build structures my_bundle.mthds -o ./generated/ + pipelex build structures my_bundle.mthds -L ./shared_pipes/ + pipelex build structures my_bundle.mthds --force """ def _build_structures_cmd(): @@ -337,12 +337,12 @@ def _build_structures_cmd(): library_dirs_paths, _ = resolve_library_dirs(library_dir) # Determine if target is a file or directory - is_plx_file = target_path.is_file() and is_pipelex_file(target_path) + is_mthds_file = target_path.is_file() and is_pipelex_file(target_path) pipelex_instance = make_pipelex_for_cli(context=ErrorContext.BUILD, library_dirs=library_dir) try: - if is_plx_file: - # Single PLX file: output to parent directory + if is_mthds_file: + # Single MTHDS file: output to parent directory base_dir = target_path.parent output_directory = Path(output_dir) if output_dir else base_dir / "structures" @@ -367,9 +367,9 @@ def _build_structures_cmd(): skip_existing_check=force, ) else: - # Directory: scan for all PLX files + # Directory: scan for all MTHDS files if not target_path.is_dir(): - typer.secho(f"❌ Target is not a directory or .plx file: {target_path}", fg=typer.colors.RED, err=True) + typer.secho(f"❌ Target is not a directory or .mthds file: {target_path}", fg=typer.colors.RED, err=True) raise typer.Exit(1) output_directory = Path(output_dir) if output_dir else target_path / "structures" diff --git a/pipelex/cli/commands/run_cmd.py b/pipelex/cli/commands/run_cmd.py index 751c4c28e..719bbe922 100644 --- a/pipelex/cli/commands/run_cmd.py +++ b/pipelex/cli/commands/run_cmd.py @@ -20,7 +20,7 @@ ) from pipelex.config import get_config from pipelex.core.interpreter.exceptions import PipelexInterpreterError, PLXDecodeError -from pipelex.core.interpreter.helpers import is_pipelex_file +from pipelex.core.interpreter.helpers import MTHDS_EXTENSION, is_pipelex_file from pipelex.core.interpreter.interpreter import PipelexInterpreter from pipelex.core.pipes.exceptions import PipeOperatorModelChoiceError from pipelex.core.stuffs.stuff_viewer import render_stuff_viewer @@ -43,15 +43,15 @@ def run_cmd( target: Annotated[ str | None, - typer.Argument(help="Pipe code, bundle file path (.plx), or pipeline directory (auto-detected)"), + typer.Argument(help="Pipe code, bundle file path (.mthds), or pipeline directory (auto-detected)"), ] = None, pipe: Annotated[ str | None, - typer.Option("--pipe", help="Pipe code to run, can be omitted if you specify a bundle (.plx) that declares a main pipe"), + typer.Option("--pipe", help="Pipe code to run, can be omitted if you specify a bundle (.mthds) that declares a main pipe"), ] = None, bundle: Annotated[ str | None, - typer.Option("--bundle", help="Bundle file path (.plx) - runs its main_pipe unless you specify a pipe code"), + typer.Option("--bundle", help="Bundle file path (.mthds) - runs its main_pipe unless you specify a pipe code"), ] = None, inputs: Annotated[ str | None, @@ -101,20 +101,20 @@ def run_cmd( ] = False, library_dir: Annotated[ list[str] | None, - typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.plx files). Can be specified multiple times."), + typer.Option("--library-dir", "-L", help="Directory to search for pipe definitions (.mthds files). Can be specified multiple times."), ] = None, ) -> None: """Execute a pipeline from a specific bundle file (or not), specifying its pipe code or not. If the bundle is provided, it will run its main pipe unless you specify a pipe code. If the pipe code is provided, you don't need to provide a bundle file if it's already part of the imported packages. - If a directory is provided, it auto-detects bundle.plx and inputs.json inside it. + If a directory is provided, it auto-detects bundle.mthds and inputs.json inside it. Examples: pipelex run my_pipe - pipelex run --bundle my_bundle.plx - pipelex run --bundle my_bundle.plx --pipe my_pipe + pipelex run --bundle my_bundle.mthds + pipelex run --bundle my_bundle.mthds --pipe my_pipe pipelex run --pipe my_pipe --inputs data.json - pipelex run my_bundle.plx --inputs data.json + pipelex run my_bundle.mthds --inputs data.json pipelex run pipeline_01/ pipelex run pipeline_01/ --pipe my_pipe pipelex run my_pipe --working-memory-path results.json --no-pretty-print @@ -158,30 +158,30 @@ def run_cmd( ) raise typer.Exit(1) - # Find .plx: try default name first, then fall back to single .plx + # Find .mthds: try default name first, then fall back to single .mthds bundle_file = target_path / DEFAULT_BUNDLE_FILE_NAME if bundle_file.is_file(): bundle_path = str(bundle_file) else: - plx_files = list(target_path.glob("*.plx")) - if len(plx_files) == 0: + mthds_files = list(target_path.glob(f"*{MTHDS_EXTENSION}")) + if len(mthds_files) == 0: typer.secho( - f"Failed to run: no .plx bundle file found in directory '{target}'", + f"Failed to run: no .mthds bundle file found in directory '{target}'", fg=typer.colors.RED, err=True, ) raise typer.Exit(1) - if len(plx_files) > 1: - plx_names = ", ".join(plx_file.name for plx_file in plx_files) + if len(mthds_files) > 1: + mthds_names = ", ".join(mthds_file.name for mthds_file in mthds_files) typer.secho( - f"Failed to run: multiple .plx files found in '{target}' ({plx_names}) " + f"Failed to run: multiple .mthds files found in '{target}' ({mthds_names}) " f"and no '{DEFAULT_BUNDLE_FILE_NAME}'. " - f"Pass the .plx file directly, e.g.: pipelex run {target_path / plx_files[0].name}", + f"Pass the .mthds file directly, e.g.: pipelex run {target_path / mthds_files[0].name}", fg=typer.colors.RED, err=True, ) raise typer.Exit(1) - bundle_path = str(plx_files[0]) + bundle_path = str(mthds_files[0]) # Auto-detect inputs if --inputs not explicitly provided inputs_file = target_path / DEFAULT_INPUTS_FILE_NAME @@ -207,7 +207,7 @@ def run_cmd( bundle_path = target if bundle: typer.secho( - "Failed to run: cannot use option --bundle if you're already passing a bundle file (.plx) as positional argument", + "Failed to run: cannot use option --bundle if you're already passing a bundle file (.mthds) as positional argument", fg=typer.colors.RED, err=True, ) diff --git a/pipelex/cli/commands/show_cmd.py b/pipelex/cli/commands/show_cmd.py index 750f5cb1a..6060e6034 100644 --- a/pipelex/cli/commands/show_cmd.py +++ b/pipelex/cli/commands/show_cmd.py @@ -201,7 +201,7 @@ def show_pipe_cmd( typer.Option( "--library-dir", "-L", - help="Directory to search for pipe definitions (.plx files). Can be specified multiple times.", + help="Directory to search for pipe definitions (.mthds files). Can be specified multiple times.", ), ] = None, ) -> None: diff --git a/pipelex/cli/commands/validate_cmd.py b/pipelex/cli/commands/validate_cmd.py index f7701216e..263c2813b 100644 --- a/pipelex/cli/commands/validate_cmd.py +++ b/pipelex/cli/commands/validate_cmd.py @@ -77,7 +77,7 @@ def do_validate_all_libraries_and_dry_run( def validate_cmd( target: Annotated[ str | None, - typer.Argument(help="Pipe code or bundle file path (auto-detected based on .plx extension)"), + typer.Argument(help="Pipe code or bundle file path (auto-detected based on .mthds extension)"), ] = None, pipe: Annotated[ str | None, @@ -87,7 +87,7 @@ def validate_cmd( str | None, typer.Option( "--bundle", - help="Bundle file path (.plx) - validates all pipes in the bundle", + help="Bundle file path (.mthds) - validates all pipes in the bundle", ), ] = None, validate_all: Annotated[ @@ -99,7 +99,7 @@ def validate_cmd( typer.Option( "--library-dir", "-L", - help="Directory to search for pipe definitions (.plx files). Can be specified multiple times.", + help="Directory to search for pipe definitions (.mthds files). Can be specified multiple times.", ), ] = None, ) -> None: @@ -107,9 +107,9 @@ def validate_cmd( Examples: pipelex validate my_pipe - pipelex validate my_bundle.plx - pipelex validate --bundle my_bundle.plx - pipelex validate --bundle my_bundle.plx --pipe my_pipe + pipelex validate my_bundle.mthds + pipelex validate --bundle my_bundle.mthds + pipelex validate --bundle my_bundle.mthds --pipe my_pipe pipelex validate --all """ if validate_all: @@ -149,7 +149,7 @@ def validate_cmd( bundle_path = target_path if bundle: typer.secho( - "Failed to validate: cannot use option --bundle if you're already passing a bundle file (.plx) as positional argument", + "Failed to validate: cannot use option --bundle if you're already passing a bundle file (.mthds) as positional argument", fg=typer.colors.RED, err=True, ) diff --git a/pipelex/cli/error_handlers.py b/pipelex/cli/error_handlers.py index 82cb8961b..cc8cf3787 100644 --- a/pipelex/cli/error_handlers.py +++ b/pipelex/cli/error_handlers.py @@ -235,7 +235,7 @@ def handle_build_validation_failure(exc: ValidateBundleError) -> NoReturn: # Display build-specific tips console.print( "[bold green]💡 Tip:[/bold green] Try rephrasing your prompt or simplifying the pipeline requirements. " - "Breaking complex workflows into smaller steps can also help." + "Breaking complex methods into smaller steps can also help." ) console.print(f"[dim]Learn more: {URLs.documentation}[/dim]") console.print(f"[dim]Join our Discord for help: {URLs.discord}[/dim]\n") diff --git a/pipelex/core/interpreter/helpers.py b/pipelex/core/interpreter/helpers.py index 517994258..3c50de101 100644 --- a/pipelex/core/interpreter/helpers.py +++ b/pipelex/core/interpreter/helpers.py @@ -2,17 +2,19 @@ from pipelex.types import StrEnum +MTHDS_EXTENSION = ".mthds" + def is_pipelex_file(file_path: Path) -> bool: - """Check if a file is a Pipelex PLX file based on its extension. + """Check if a file is a Pipelex MTHDS file based on its extension. Args: file_path: Path to the file to check Returns: - True if the file has .plx extension, False otherwise + True if the file has .mthds extension, False otherwise """ - return file_path.suffix == ".plx" + return file_path.suffix == MTHDS_EXTENSION class ValidationErrorScope(StrEnum): diff --git a/pipelex/hub.py b/pipelex/hub.py index 5349111b1..7ada0b9c0 100644 --- a/pipelex/hub.py +++ b/pipelex/hub.py @@ -523,7 +523,7 @@ def get_pipe_source(pipe_code: str) -> Path | None: pipe_code: The pipe code to look up. Returns: - Path to the .plx file the pipe was loaded from, or None if unknown. + Path to the .mthds file the pipe was loaded from, or None if unknown. """ return get_pipelex_hub().get_library_manager().get_pipe_source(pipe_code=pipe_code) diff --git a/pipelex/language/mthds_config.py b/pipelex/language/mthds_config.py new file mode 100644 index 000000000..353009ad8 --- /dev/null +++ b/pipelex/language/mthds_config.py @@ -0,0 +1,28 @@ +from pipelex.system.configuration.config_model import ConfigModel + + +class MthdsConfigStrings(ConfigModel): + prefer_literal: bool + force_multiline: bool + length_limit_to_multiline: int + ensure_trailing_newline: bool + ensure_leading_blank_line: bool + + +class MthdsConfigInlineTables(ConfigModel): + spaces_inside_curly_braces: bool + + +class MthdsConfigForConcepts(ConfigModel): + structure_field_ordering: list[str] + + +class MthdsConfigForPipes(ConfigModel): + field_ordering: list[str] + + +class MthdsConfig(ConfigModel): + strings: MthdsConfigStrings + inline_tables: MthdsConfigInlineTables + concepts: MthdsConfigForConcepts + pipes: MthdsConfigForPipes diff --git a/pipelex/language/plx_factory.py b/pipelex/language/mthds_factory.py similarity index 95% rename from pipelex/language/plx_factory.py rename to pipelex/language/mthds_factory.py index ecc480091..6d84862aa 100644 --- a/pipelex/language/plx_factory.py +++ b/pipelex/language/mthds_factory.py @@ -13,7 +13,7 @@ if TYPE_CHECKING: from pipelex.core.bundles.pipelex_bundle_blueprint import PipelexBundleBlueprint - from pipelex.system.configuration.configs import PlxConfig + from pipelex.system.configuration.configs import MthdsConfig class SectionKey(StrEnum): @@ -27,10 +27,10 @@ class SectionKey(StrEnum): PIPE_CATEGORY_FIELD_KEY = "pipe_category" -class PlxFactory: +class MthdsFactory: @classmethod - def _plx_config(cls) -> PlxConfig: - return get_config().pipelex.plx_config + def _mthds_config(cls) -> MthdsConfig: + return get_config().pipelex.mthds_config @classmethod def format_tomlkit_string(cls, text: str) -> Any: # Can't type this because of tomlkit @@ -39,7 +39,7 @@ def format_tomlkit_string(cls, text: str) -> Any: # Can't type this because of - When multiline, `ensure_trailing_newline` puts the closing quotes on their own line. - When multiline, `ensure_leading_blank_line` inserts a real blank line at the start of the string. """ - strings_config = cls._plx_config().strings + strings_config = cls._mthds_config().strings needs_multiline = strings_config.force_multiline or ("\n" in text) or len(text) > strings_config.length_limit_to_multiline normalized = text @@ -144,7 +144,7 @@ def convert_mapping_to_table( else: # No field ordering provided, use original logic for field_key, field_value in mapping.items(): - # Skip the category field as it's not needed in PLX output (pipe metadata) + # Skip the category field as it's not needed in MTHDS output (pipe metadata) if field_key == PIPE_CATEGORY_FIELD_KEY: continue @@ -241,9 +241,9 @@ def make_template_table(cls, template_value: Mapping[str, Any]) -> Any: @classmethod def make_construct_table(cls, construct_value: Mapping[str, Any]) -> Any: - """Create a nested table for construct section in PLX format. + """Create a nested table for construct section in MTHDS format. - The construct_value should already be in PLX format (from ConstructBlueprint.to_plx_dict()) + The construct_value should already be in MTHDS format (from ConstructBlueprint.to_plx_dict()) with field names at the root, not wrapped in a 'fields' key. """ tbl = table() @@ -265,7 +265,7 @@ def make_table_obj_for_pipe(cls, section_value: Mapping[str, Any]) -> Any: log.verbose(f"Field is a mapping: key = {field_key}, value = {field_value}") field_value = cast("Mapping[str, Any]", field_value) # Convert pipe configuration to table (handles template field specially) - table_obj.add(field_key, cls.convert_mapping_to_table(field_value, field_ordering=cls._plx_config().pipes.field_ordering)) + table_obj.add(field_key, cls.convert_mapping_to_table(field_value, field_ordering=cls._mthds_config().pipes.field_ordering)) return table_obj @classmethod @@ -314,7 +314,7 @@ def make_table_obj_for_concept(cls, section_value: Mapping[str, Any]) -> Any: structure_table_obj.add( structure_field_key, cls.convert_dicts_to_inline_tables( - value=filtered_value, field_ordering=cls._plx_config().concepts.structure_field_ordering + value=filtered_value, field_ordering=cls._mthds_config().concepts.structure_field_ordering ), ) concept_table_obj.add("structure", structure_table_obj) @@ -326,7 +326,7 @@ def make_table_obj_for_concept(cls, section_value: Mapping[str, Any]) -> Any: return table_obj @classmethod - def dict_to_plx_styled_toml(cls, data: Mapping[str, Any]) -> str: + def dict_to_mthds_styled_toml(cls, data: Mapping[str, Any]) -> str: """Top-level keys become tables; second-level mappings become tables; inline tables start at third level.""" log.verbose("=" * 100) data = remove_none_values_from_dict(data=data) @@ -355,16 +355,16 @@ def dict_to_plx_styled_toml(cls, data: Mapping[str, Any]) -> str: document_root.add(section_key, table_obj_for_concept) toml_output = tomlkit.dumps(document_root) # pyright: ignore[reportUnknownMemberType] - if cls._plx_config().inline_tables.spaces_inside_curly_braces: + if cls._mthds_config().inline_tables.spaces_inside_curly_braces: return cls.add_spaces_to_inline_tables(toml_output) return toml_output @classmethod - def make_plx_content(cls, blueprint: PipelexBundleBlueprint) -> str: - # Use context to signal PLX format serialization to ConstructBlueprint + def make_mthds_content(cls, blueprint: PipelexBundleBlueprint) -> str: + # Use context to signal MTHDS format serialization to ConstructBlueprint blueprint_dict = blueprint.model_dump( serialize_as_any=True, by_alias=True, - context={"format": "plx"}, + context={"format": "mthds"}, ) - return cls.dict_to_plx_styled_toml(data=blueprint_dict) + return cls.dict_to_mthds_styled_toml(data=blueprint_dict) diff --git a/pipelex/language/plx_config.py b/pipelex/language/plx_config.py deleted file mode 100644 index 639fb40cc..000000000 --- a/pipelex/language/plx_config.py +++ /dev/null @@ -1,28 +0,0 @@ -from pipelex.system.configuration.config_model import ConfigModel - - -class PlxConfigStrings(ConfigModel): - prefer_literal: bool - force_multiline: bool - length_limit_to_multiline: int - ensure_trailing_newline: bool - ensure_leading_blank_line: bool - - -class PlxConfigInlineTables(ConfigModel): - spaces_inside_curly_braces: bool - - -class PlxConfigForConcepts(ConfigModel): - structure_field_ordering: list[str] - - -class PlxConfigForPipes(ConfigModel): - field_ordering: list[str] - - -class PlxConfig(ConfigModel): - strings: PlxConfigStrings - inline_tables: PlxConfigInlineTables - concepts: PlxConfigForConcepts - pipes: PlxConfigForPipes diff --git a/pipelex/libraries/library.py b/pipelex/libraries/library.py index c1e9faf13..fcf647ae6 100644 --- a/pipelex/libraries/library.py +++ b/pipelex/libraries/library.py @@ -27,7 +27,7 @@ class Library(BaseModel): domain_library: DomainLibrary concept_library: ConceptLibrary pipe_library: PipeLibrary - loaded_plx_paths: list[Path] = Field(default_factory=empty_list_factory_of(Path)) + loaded_mthds_paths: list[Path] = Field(default_factory=empty_list_factory_of(Path)) def get_domain_library(self) -> DomainLibrary: return self.domain_library @@ -42,7 +42,7 @@ def teardown(self) -> None: self.pipe_library.teardown() self.concept_library.teardown() self.domain_library.teardown() - self.loaded_plx_paths = [] + self.loaded_mthds_paths = [] def validate_library(self) -> None: self.validate_domain_library_with_libraries() diff --git a/pipelex/libraries/library_manager.py b/pipelex/libraries/library_manager.py index 7f5d697dc..95f1f2653 100644 --- a/pipelex/libraries/library_manager.py +++ b/pipelex/libraries/library_manager.py @@ -30,7 +30,7 @@ from pipelex.libraries.library_factory import LibraryFactory from pipelex.libraries.library_manager_abstract import LibraryManagerAbstract from pipelex.libraries.library_utils import ( - get_pipelex_plx_files_from_dirs, + get_pipelex_mthds_files_from_dirs, ) from pipelex.libraries.pipe.exceptions import PipeLibraryError from pipelex.system.registries.class_registry_utils import ClassRegistryUtils @@ -46,7 +46,7 @@ class LibraryManager(LibraryManagerAbstract): def __init__(self): # UNTITLED library is the fallback library for all others self._libraries: dict[str, Library] = {} - self._pipe_source_map: dict[str, Path] = {} # pipe_code -> source .plx file + self._pipe_source_map: dict[str, Path] = {} # pipe_code -> source .mthds file ############################################################ # Manager lifecycle @@ -122,7 +122,7 @@ def get_pipe_source(self, pipe_code: str) -> Path | None: pipe_code: The pipe code to look up. Returns: - Path to the .plx file the pipe was loaded from, or None if unknown. + Path to the .mthds file the pipe was loaded from, or None if unknown. """ return self._pipe_source_map.get(pipe_code) @@ -146,25 +146,25 @@ def load_libraries( library_dirs = [] all_dirs: list[Path] = [] - all_plx_paths: list[Path] = [] + all_mthds_paths: list[Path] = [] all_dirs.extend(library_dirs) - all_plx_paths.extend(get_pipelex_plx_files_from_dirs(set(library_dirs))) + all_mthds_paths.extend(get_pipelex_mthds_files_from_dirs(set(library_dirs))) if library_file_paths: - all_plx_paths.extend(library_file_paths) + all_mthds_paths.extend(library_file_paths) # Combine and deduplicate seen_absolute_paths: set[str] = set() - valid_plx_paths: list[Path] = [] - for plx_path in all_plx_paths: + valid_mthds_paths: list[Path] = [] + for mthds_path in all_mthds_paths: try: - absolute_path = str(plx_path.resolve()) + absolute_path = str(mthds_path.resolve()) except (OSError, RuntimeError): # For paths that can't be resolved (e.g., in zipped packages), use string representation - absolute_path = str(plx_path) + absolute_path = str(mthds_path) if absolute_path not in seen_absolute_paths: - valid_plx_paths.append(plx_path) + valid_mthds_paths.append(mthds_path) seen_absolute_paths.add(absolute_path) # Import modules and register in global registries @@ -188,9 +188,9 @@ def load_libraries( ) log.verbose(f"Auto-registered {num_registered} StructuredContent classes from loaded modules") - # Load PLX files into the specific library - log.verbose(f"Loading plx files from: {[str(p) for p in valid_plx_paths]}") - return self._load_plx_files_into_library(library_id=library_id, valid_plx_paths=valid_plx_paths) + # Load MTHDS files into the specific library + log.verbose(f"Loading MTHDS files from: {[str(p) for p in valid_mthds_paths]}") + return self._load_mthds_files_into_library(library_id=library_id, valid_mthds_paths=valid_mthds_paths) @override def load_libraries_concepts_only( @@ -207,8 +207,8 @@ def load_libraries_concepts_only( Args: library_id: The ID of the library to load into - library_dirs: List of directories containing PLX files - library_file_paths: List of specific PLX file paths to load + library_dirs: List of directories containing MTHDS files + library_file_paths: List of specific MTHDS file paths to load Returns: List of all concepts that were loaded @@ -222,25 +222,25 @@ def load_libraries_concepts_only( library_dirs = [] all_dirs: list[Path] = [] - all_plx_paths: list[Path] = [] + all_mthds_paths: list[Path] = [] all_dirs.extend(library_dirs) - all_plx_paths.extend(get_pipelex_plx_files_from_dirs(set(library_dirs))) + all_mthds_paths.extend(get_pipelex_mthds_files_from_dirs(set(library_dirs))) if library_file_paths: - all_plx_paths.extend(library_file_paths) + all_mthds_paths.extend(library_file_paths) # Combine and deduplicate seen_absolute_paths: set[str] = set() - valid_plx_paths: list[Path] = [] - for plx_path in all_plx_paths: + valid_mthds_paths: list[Path] = [] + for mthds_path in all_mthds_paths: try: - absolute_path = str(plx_path.resolve()) + absolute_path = str(mthds_path.resolve()) except (OSError, RuntimeError): # For paths that can't be resolved (e.g., in zipped packages), use string representation - absolute_path = str(plx_path) + absolute_path = str(mthds_path) if absolute_path not in seen_absolute_paths: - valid_plx_paths.append(plx_path) + valid_mthds_paths.append(mthds_path) seen_absolute_paths.add(absolute_path) # Import modules and register in global registries @@ -260,19 +260,19 @@ def load_libraries_concepts_only( ) log.debug(f"Auto-registered {num_registered} StructuredContent classes from loaded modules") - # Load PLX files as concepts only (no pipes) - log.debug(f"Loading concepts only from plx files: {[str(p) for p in valid_plx_paths]}") + # Load MTHDS files as concepts only (no pipes) + log.debug(f"Loading concepts only from MTHDS files: {[str(p) for p in valid_mthds_paths]}") library = self.get_library(library_id=library_id) all_concepts: list[Concept] = [] - for plx_path in valid_plx_paths: + for mthds_path in valid_mthds_paths: # Track loaded path (resolve if possible) try: - resolved_path = plx_path.resolve() + resolved_path = mthds_path.resolve() except (OSError, RuntimeError): - resolved_path = plx_path - library.loaded_plx_paths.append(resolved_path) + resolved_path = mthds_path + library.loaded_mthds_paths.append(resolved_path) - blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=plx_path) + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=mthds_path) concepts = self.load_concepts_only_from_blueprints(library_id=library_id, blueprints=[blueprint]) all_concepts.extend(concepts) @@ -284,7 +284,7 @@ def load_from_blueprints(self, library_id: str, blueprints: list[PipelexBundleBl Args: library_id: The ID of the library to load into - blueprints: List of parsed PLX blueprints to load + blueprints: List of parsed MTHDS blueprints to load Returns: List of all pipes that were loaded @@ -370,7 +370,7 @@ def load_concepts_only_from_blueprints( Args: library_id: The ID of the library to load into - blueprints: List of parsed PLX blueprints to load + blueprints: List of parsed MTHDS blueprints to load Returns: List of all concepts that were loaded @@ -418,7 +418,7 @@ def _load_concepts_from_blueprints( later by _rebuild_models_with_forward_refs(). Args: - blueprints: List of parsed PLX blueprints to load + blueprints: List of parsed MTHDS blueprints to load Returns: List of loaded concepts @@ -491,28 +491,28 @@ def _load_concepts_from_blueprints( # Private helper methods ############################################################ - def _load_plx_files_into_library(self, library_id: str, valid_plx_paths: list[Path]) -> list[PipeAbstract]: - """Load PLX files into a specific library. + def _load_mthds_files_into_library(self, library_id: str, valid_mthds_paths: list[Path]) -> list[PipeAbstract]: + """Load MTHDS files into a specific library. This method: - 1. Parses blueprints from PLX files + 1. Parses blueprints from MTHDS files 2. Loads blueprints into the specified library Args: library_id: The ID of the library to load into - valid_plx_paths: List of PLX file paths to load + valid_mthds_paths: List of MTHDS file paths to load """ blueprints: list[PipelexBundleBlueprint] = [] - for plx_file_path in valid_plx_paths: + for mthds_file_path in valid_mthds_paths: try: - blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=plx_file_path) - blueprint.source = str(plx_file_path) + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=mthds_file_path) + blueprint.source = str(mthds_file_path) except FileNotFoundError as file_not_found_error: - msg = f"Could not find PLX bundle at '{plx_file_path}'" + msg = f"Could not find MTHDS bundle at '{mthds_file_path}'" raise LibraryLoadingError(msg) from file_not_found_error except PipelexInterpreterError as interpreter_error: # Forward BLUEPRINT validation errors from interpreter - msg = f"Could not load PLX bundle from '{plx_file_path}' because of: {interpreter_error.message}" + msg = f"Could not load MTHDS bundle from '{mthds_file_path}' because of: {interpreter_error.message}" raise LibraryLoadingError( message=msg, blueprint_validation_errors=interpreter_error.validation_errors, @@ -521,18 +521,18 @@ def _load_plx_files_into_library(self, library_id: str, valid_plx_paths: list[Pa # Store resolved absolute paths for duplicate detection in the library library = self.get_library(library_id=library_id) - for plx_file_path in valid_plx_paths: + for mthds_file_path in valid_mthds_paths: try: - resolved_path = plx_file_path.resolve() + resolved_path = mthds_file_path.resolve() except (OSError, RuntimeError): - resolved_path = plx_file_path - library.loaded_plx_paths.append(resolved_path) + resolved_path = mthds_file_path + library.loaded_mthds_paths.append(resolved_path) try: return self.load_from_blueprints(library_id=library_id, blueprints=blueprints) except ValidationError as validation_error: - validation_error_msg = report_validation_error(category="plx", validation_error=validation_error) - msg = f"Could not load blueprints from {[str(pth) for pth in valid_plx_paths]} because of: {validation_error_msg}" + validation_error_msg = report_validation_error(category="mthds", validation_error=validation_error) + msg = f"Could not load blueprints from {[str(pth) for pth in valid_mthds_paths]} because of: {validation_error_msg}" raise LibraryError( message=msg, ) from validation_error diff --git a/pipelex/libraries/library_manager_abstract.py b/pipelex/libraries/library_manager_abstract.py index b8b1abfcd..10fa677db 100644 --- a/pipelex/libraries/library_manager_abstract.py +++ b/pipelex/libraries/library_manager_abstract.py @@ -42,7 +42,7 @@ def get_pipe_source(self, pipe_code: str) -> Path | None: # noqa: ARG002 pipe_code: The pipe code to look up. Returns: - Path to the .plx file the pipe was loaded from, or None if unknown. + Path to the .mthds file the pipe was loaded from, or None if unknown. """ return None @@ -98,7 +98,7 @@ def load_libraries_concepts_only( Args: library_id: The ID of the library to load into - library_dirs: List of directories containing PLX files + library_dirs: List of directories containing MTHDS files library_file_paths: List of specific PLX file paths to load Returns: diff --git a/pipelex/libraries/library_utils.py b/pipelex/libraries/library_utils.py index 4af6521f9..a3bc1a8af 100644 --- a/pipelex/libraries/library_utils.py +++ b/pipelex/libraries/library_utils.py @@ -4,25 +4,25 @@ from pipelex import log from pipelex.builder import builder from pipelex.config import get_config -from pipelex.core.interpreter.helpers import is_pipelex_file +from pipelex.core.interpreter.helpers import MTHDS_EXTENSION, is_pipelex_file from pipelex.tools.misc.file_utils import find_files_in_dir from pipelex.types import Traversable -def get_pipelex_plx_files_from_package() -> list[Path]: - """Get all PLX files from the pipelex package using importlib.resources. +def get_pipelex_mthds_files_from_package() -> list[Path]: + """Get all MTHDS files from the pipelex package using importlib.resources. This works reliably whether pipelex is installed as a wheel, from source, or as a relative path import. Returns: - List of Path objects to PLX files in pipelex package + List of Path objects to MTHDS files in pipelex package """ - plx_files: list[Path] = [] + mthds_files: list[Path] = [] pipelex_package = files("pipelex") - def _find_plx_in_traversable(traversable: Traversable, collected: list[Path]) -> None: - """Recursively find .plx files in a Traversable.""" + def _find_mthds_in_traversable(traversable: Traversable, collected: list[Path]) -> None: + """Recursively find .mthds files in a Traversable.""" excluded_dirs = get_config().pipelex.scan_config.excluded_dirs try: if not traversable.is_dir(): @@ -30,19 +30,19 @@ def _find_plx_in_traversable(traversable: Traversable, collected: list[Path]) -> for child in traversable.iterdir(): if child.is_file() and is_pipelex_file(Path(child.name)): - plx_path_str = str(child) - collected.append(Path(plx_path_str)) - log.verbose(f"Found pipelex package PLX file: {plx_path_str}") + mthds_path_str = str(child) + collected.append(Path(mthds_path_str)) + log.verbose(f"Found pipelex package MTHDS file: {mthds_path_str}") elif child.is_dir(): # Skip excluded directories if child.name not in excluded_dirs: - _find_plx_in_traversable(child, collected) + _find_mthds_in_traversable(child, collected) except (PermissionError, OSError) as exc: log.warning(f"Could not access {traversable}: {exc}") - _find_plx_in_traversable(pipelex_package, plx_files) - log.verbose(f"Found {len(plx_files)} PLX files in pipelex package") - return plx_files + _find_mthds_in_traversable(pipelex_package, mthds_files) + log.verbose(f"Found {len(mthds_files)} MTHDS files in pipelex package") + return mthds_files def get_pipelex_package_dir_for_imports() -> Path | None: @@ -62,27 +62,27 @@ def get_pipelex_package_dir_for_imports() -> Path | None: return None -def get_pipelex_plx_files_from_dirs(dirs: set[Path]) -> list[Path]: - """Get all valid Pipelex PLX files from the given directories.""" - all_plx_paths: list[Path] = [] +def get_pipelex_mthds_files_from_dirs(dirs: set[Path]) -> list[Path]: + """Get all valid Pipelex MTHDS files from the given directories.""" + all_mthds_paths: list[Path] = [] for dir_path in dirs: if not dir_path.exists(): log.debug(f"Directory does not exist, skipping: {dir_path}") continue - # Find all .plx files in the directory, excluding problematic directories - plx_files = find_files_in_dir( + # Find all .mthds files in the directory, excluding problematic directories + mthds_files = find_files_in_dir( dir_path=str(dir_path), - pattern="*.plx", + pattern=f"*{MTHDS_EXTENSION}", excluded_dirs=list(get_config().pipelex.scan_config.excluded_dirs), force_include_dirs=[str(Path(builder.__file__).parent)], ) # Filter to only include valid Pipelex files - for plx_file in plx_files: - if is_pipelex_file(plx_file): - all_plx_paths.append(plx_file) + for mthds_file in mthds_files: + if is_pipelex_file(mthds_file): + all_mthds_paths.append(mthds_file) else: - log.debug(f"Skipping non-Pipelex PLX file: {plx_file}") - return all_plx_paths + log.debug(f"Skipping non-Pipelex MTHDS file: {mthds_file}") + return all_mthds_paths diff --git a/pipelex/pipe_operators/compose/construct_blueprint.py b/pipelex/pipe_operators/compose/construct_blueprint.py index f88b5024e..b954b1162 100644 --- a/pipelex/pipe_operators/compose/construct_blueprint.py +++ b/pipelex/pipe_operators/compose/construct_blueprint.py @@ -282,10 +282,10 @@ def to_plx_dict(self) -> dict[str, Any]: def serialize_with_context(self, handler: SerializerFunctionWrapHandler, info: SerializationInfo) -> dict[str, Any]: """Serialize with format-aware context. - When context contains {"format": "plx"}, outputs PLX-format dict. + When context contains {"format": "mthds"}, outputs MTHDS-format dict. Otherwise, uses default Pydantic serialization. """ - if info.context and info.context.get("format") == "plx": + if info.context and info.context.get("format") == "mthds": return self.to_plx_dict() result = handler(self) return dict(result) # Ensure dict return type diff --git a/pipelex/pipelex.toml b/pipelex/pipelex.toml index 3b0dcd7ff..9fd783005 100644 --- a/pipelex/pipelex.toml +++ b/pipelex/pipelex.toml @@ -391,23 +391,23 @@ image_urls = [ ] #################################################################################################### -# PLX config +# MTHDS config #################################################################################################### -[pipelex.plx_config.inline_tables] +[pipelex.mthds_config.inline_tables] spaces_inside_curly_braces = true -[pipelex.plx_config.strings] +[pipelex.mthds_config.strings] prefer_literal = false force_multiline = false length_limit_to_multiline = 100 ensure_trailing_newline = true ensure_leading_blank_line = true -[pipelex.plx_config.concepts] +[pipelex.mthds_config.concepts] structure_field_ordering = ["type", "concept_ref", "item_type", "item_concept_ref", "description", "choices", "required"] -[pipelex.plx_config.pipes] +[pipelex.mthds_config.pipes] field_ordering = ["type", "description", "inputs", "output"] #################################################################################################### @@ -423,7 +423,7 @@ llm_handle = "model" llm = "model" llm_to_structure = "model_to_structure" -[migration.migration_maps.plx] +[migration.migration_maps.mthds] img_gen = "model" ocr = "model" llm_handle = "model" diff --git a/pipelex/pipeline/pipeline_run_setup.py b/pipelex/pipeline/pipeline_run_setup.py index b5ab958a1..4ab943373 100644 --- a/pipelex/pipeline/pipeline_run_setup.py +++ b/pipelex/pipeline/pipeline_run_setup.py @@ -80,17 +80,17 @@ async def pipeline_run_setup( pipe_code: Code identifying the pipe to execute. Required when ``plx_content`` is not provided. When both ``plx_content`` and ``pipe_code`` are provided, the - specified pipe from the PLX content will be executed (overriding any + specified pipe from the MTHDS content will be executed (overriding any ``main_pipe`` defined in the content). plx_content: - Complete PLX file content as a string. The pipe to execute is determined by - ``pipe_code`` (if provided) or the ``main_pipe`` property in the PLX content. + Complete MTHDS file content as a string. The pipe to execute is determined by + ``pipe_code`` (if provided) or the ``main_pipe`` property in the MTHDS content. Can be combined with ``library_dirs`` to load additional definitions. bundle_uri: URI identifying the bundle. Used to detect if the bundle was already loaded from library directories (e.g., via PIPELEXPATH) to avoid duplicate domain registration. If provided and the resolved absolute path is already in the - loaded PLX paths, the ``plx_content`` loading will be skipped. + loaded MTHDS paths, the ``plx_content`` loading will be skipped. inputs: Inputs passed to the pipeline. Can be either a ``PipelineInputs`` dictionary or a ``WorkingMemory`` instance. @@ -159,11 +159,11 @@ async def pipeline_run_setup( try: resolved_bundle_uri = Path(bundle_uri).resolve() except (OSError, RuntimeError): - # Use str(Path(...)) to normalize the path (e.g., "./file.plx" -> "file.plx") - # to match the normalization done in library_manager._load_plx_files_into_library + # Use str(Path(...)) to normalize the path (e.g., "./file.mthds" -> "file.mthds") + # to match the normalization done in library_manager._load_mthds_files_into_library resolved_bundle_uri = Path(bundle_uri) current_library = library_manager.get_library(library_id=library_id) - bundle_already_loaded = resolved_bundle_uri in current_library.loaded_plx_paths + bundle_already_loaded = resolved_bundle_uri in current_library.loaded_mthds_paths if bundle_already_loaded: log.verbose(f"Bundle '{bundle_uri}' already loaded from library directories, skipping duplicate load") @@ -177,7 +177,7 @@ async def pipeline_run_setup( elif blueprint.main_pipe: pipe = get_required_pipe(pipe_code=blueprint.main_pipe) else: - msg = "No pipe code or main pipe in the PLX content provided to the pipeline API." + msg = "No pipe code or main pipe in the MTHDS content provided to the pipeline API." raise PipeExecutionError(message=msg) elif pipe_code: pipe = get_required_pipe(pipe_code=pipe_code) diff --git a/pipelex/pipeline/validate_bundle.py b/pipelex/pipeline/validate_bundle.py index e7a068300..fe6172854 100644 --- a/pipelex/pipeline/validate_bundle.py +++ b/pipelex/pipeline/validate_bundle.py @@ -20,7 +20,7 @@ from pipelex.core.pipes.pipe_abstract import PipeAbstract from pipelex.core.validation import report_validation_error from pipelex.hub import get_library_manager, resolve_library_dirs, set_current_library -from pipelex.libraries.library_utils import get_pipelex_plx_files_from_dirs +from pipelex.libraries.library_utils import get_pipelex_mthds_files_from_dirs from pipelex.pipe_run.dry_run import DryRunError, DryRunOutput, dry_run_pipes from pipelex.pipe_run.exceptions import PipeRunError @@ -133,7 +133,7 @@ async def validate_bundle( blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=plx_file_path) loaded_blueprints = [blueprint] - if plx_file_path.resolve() not in library.loaded_plx_paths: + if plx_file_path.resolve() not in library.loaded_mthds_paths: # File not yet loaded - load it from the blueprint loaded_pipes = library_manager.load_from_blueprints(library_id=library_id, blueprints=[blueprint]) else: @@ -163,7 +163,7 @@ async def validate_bundle( ) from pipe_error except ValidationError as validation_error: pipe_validation_errors = categorize_pipe_validation_error(validation_error=validation_error) - validation_error_msg = report_validation_error(category="plx", validation_error=validation_error) + validation_error_msg = report_validation_error(category="mthds", validation_error=validation_error) msg = f"Could not load blueprints because of: {validation_error_msg}" raise ValidateBundleError( message=msg, @@ -182,15 +182,15 @@ async def validate_bundle( async def validate_bundles_from_directory(directory: Path) -> ValidateBundleResult: - plx_files = get_pipelex_plx_files_from_dirs(dirs={directory}) + mthds_files = get_pipelex_mthds_files_from_dirs(dirs={directory}) all_blueprints: list[PipelexBundleBlueprint] = [] library_manager = get_library_manager() library_id, _ = library_manager.open_library() set_current_library(library_id=library_id) try: - for plx_file in plx_files: - blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=plx_file) + for mthds_file in mthds_files: + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=mthds_file) all_blueprints.append(blueprint) loaded_pipes = library_manager.load_libraries(library_id=library_id, library_dirs=[Path(directory)]) @@ -214,7 +214,7 @@ async def validate_bundles_from_directory(directory: Path) -> ValidateBundleResu ) from pipe_error except ValidationError as validation_error: pipe_validation_errors = categorize_pipe_validation_error(validation_error=validation_error) - validation_error_msg = report_validation_error(category="plx", validation_error=validation_error) + validation_error_msg = report_validation_error(category="mthds", validation_error=validation_error) msg = f"Could not load blueprints because of: {validation_error_msg}" raise ValidateBundleError( message=msg, @@ -234,7 +234,7 @@ async def validate_bundles_from_directory(directory: Path) -> ValidateBundleResu class LoadConceptsOnlyResult(BaseModel): - """Result of loading PLX files with concepts only (no pipes).""" + """Result of loading MTHDS files with concepts only (no pipes).""" blueprints: list[PipelexBundleBlueprint] concepts: list[Concept] @@ -246,17 +246,17 @@ def load_concepts_only( blueprints: list[PipelexBundleBlueprint] | None = None, library_dirs: Sequence[Path] | None = None, ) -> LoadConceptsOnlyResult: - """Load PLX files processing only domains and concepts, skipping pipes. + """Load MTHDS files processing only domains and concepts, skipping pipes. This is a lightweight alternative to validate_bundle() that only processes domains and concepts. It does not load pipes, does not perform pipe validation, and does not run dry runs. Args: - plx_file_path: Path to a single PLX file to load (mutually exclusive with others) - plx_content: PLX content string to load (mutually exclusive with others) + plx_file_path: Path to a single MTHDS file to load (mutually exclusive with others) + plx_content: MTHDS content string to load (mutually exclusive with others) blueprints: Pre-parsed blueprints to load (mutually exclusive with others) - library_dirs: Optional directories containing additional PLX library files + library_dirs: Optional directories containing additional MTHDS library files Returns: LoadConceptsOnlyResult with blueprints and loaded concepts @@ -307,7 +307,7 @@ def load_concepts_only( blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=plx_file_path) loaded_blueprints = [blueprint] - if plx_file_path.resolve() not in library.loaded_plx_paths: + if plx_file_path.resolve() not in library.loaded_mthds_paths: # File not yet loaded - load it from the blueprint loaded_concepts = library_manager.load_concepts_only_from_blueprints(library_id=library_id, blueprints=[blueprint]) else: @@ -324,7 +324,7 @@ def load_concepts_only( ) from interpreter_error except ValidationError as validation_error: pipe_validation_errors = categorize_pipe_validation_error(validation_error=validation_error) - validation_error_msg = report_validation_error(category="plx", validation_error=validation_error) + validation_error_msg = report_validation_error(category="mthds", validation_error=validation_error) msg = f"Could not load blueprints because of: {validation_error_msg}" raise ValidateBundleError( message=msg, @@ -333,14 +333,14 @@ def load_concepts_only( def load_concepts_only_from_directory(directory: Path) -> LoadConceptsOnlyResult: - """Load PLX files from a directory, processing only domains and concepts, skipping pipes. + """Load MTHDS files from a directory, processing only domains and concepts, skipping pipes. This is a lightweight alternative to validate_bundles_from_directory() that only processes domains and concepts. It does not load pipes, does not perform pipe validation, and does not run dry runs. Args: - directory: Directory containing PLX files to load + directory: Directory containing MTHDS files to load Returns: LoadConceptsOnlyResult with blueprints and loaded concepts @@ -348,15 +348,15 @@ def load_concepts_only_from_directory(directory: Path) -> LoadConceptsOnlyResult Raises: ValidateBundleError: If loading fails due to interpreter or validation errors """ - plx_files = get_pipelex_plx_files_from_dirs(dirs={directory}) + mthds_files = get_pipelex_mthds_files_from_dirs(dirs={directory}) all_blueprints: list[PipelexBundleBlueprint] = [] library_manager = get_library_manager() library_id, _ = library_manager.open_library() set_current_library(library_id=library_id) try: - for plx_file in plx_files: - blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=plx_file) + for mthds_file in mthds_files: + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=mthds_file) all_blueprints.append(blueprint) loaded_concepts = library_manager.load_concepts_only_from_blueprints(library_id=library_id, blueprints=all_blueprints) @@ -367,7 +367,7 @@ def load_concepts_only_from_directory(directory: Path) -> LoadConceptsOnlyResult ) from interpreter_error except ValidationError as validation_error: pipe_validation_errors = categorize_pipe_validation_error(validation_error=validation_error) - validation_error_msg = report_validation_error(category="plx", validation_error=validation_error) + validation_error_msg = report_validation_error(category="mthds", validation_error=validation_error) msg = f"Could not load blueprints because of: {validation_error_msg}" raise ValidateBundleError( message=msg, diff --git a/pipelex/system/configuration/configs.py b/pipelex/system/configuration/configs.py index d4b1c5880..f99c33171 100644 --- a/pipelex/system/configuration/configs.py +++ b/pipelex/system/configuration/configs.py @@ -6,7 +6,7 @@ from pipelex.cogt.model_backends.prompting_target import PromptingTarget from pipelex.cogt.templating.templating_style import TemplatingStyle from pipelex.graph.graph_config import GraphConfig -from pipelex.language.plx_config import PlxConfig +from pipelex.language.mthds_config import MthdsConfig from pipelex.system.configuration.config_model import ConfigModel from pipelex.system.configuration.config_root import ConfigRoot from pipelex.tools.aws.aws_config import AwsConfig @@ -184,7 +184,7 @@ class Pipelex(ConfigModel): structure_config: StructureConfig prompting_config: PromptingConfig - plx_config: PlxConfig + mthds_config: MthdsConfig dry_run_config: DryRunConfig pipe_run_config: PipeRunConfig diff --git a/tests/e2e/pipelex/concepts/nested_concepts/nested_concepts.plx b/tests/e2e/pipelex/concepts/nested_concepts/nested_concepts.mthds similarity index 100% rename from tests/e2e/pipelex/concepts/nested_concepts/nested_concepts.plx rename to tests/e2e/pipelex/concepts/nested_concepts/nested_concepts.mthds diff --git a/tests/e2e/pipelex/concepts/nested_concepts/test_structure_generator_cli.py b/tests/e2e/pipelex/concepts/nested_concepts/test_structure_generator_cli.py index 3707549bb..819da6a5d 100644 --- a/tests/e2e/pipelex/concepts/nested_concepts/test_structure_generator_cli.py +++ b/tests/e2e/pipelex/concepts/nested_concepts/test_structure_generator_cli.py @@ -33,22 +33,22 @@ async def test_generate_and_import_nested_concept_structures(self): """Test that generated structure files for nested concepts are importable and usable. This test: - 1. Uses the existing nested_concepts.plx file with concept-to-concept references + 1. Uses the existing nested_concepts.mthds file with concept-to-concept references 2. Generates Python structure files via the CLI helper function 3. Dynamically imports the generated modules 4. Instantiates the generated classes 5. Verifies nested concept references work correctly """ - # Path to the PLX file with nested concepts - plx_file_path = Path("tests/e2e/pipelex/concepts/nested_concepts/nested_concepts.plx").resolve() - assert plx_file_path.exists(), f"PLX file not found: {plx_file_path}" + # Path to the MTHDS file with nested concepts + mthds_file_path = Path("tests/e2e/pipelex/concepts/nested_concepts/nested_concepts.mthds").resolve() + assert mthds_file_path.exists(), f"MTHDS file not found: {mthds_file_path}" # Create a temporary directory for generated structures with tempfile.TemporaryDirectory() as temp_dir: output_directory = Path(temp_dir) - # Validate the PLX file to get blueprints - validate_result = await validate_bundle(plx_file_path=plx_file_path) + # Validate the MTHDS file to get blueprints + validate_result = await validate_bundle(plx_file_path=mthds_file_path) blueprints = validate_result.blueprints # Generate structure files diff --git a/tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/cv_batch.plx b/tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/cv_batch.mthds similarity index 100% rename from tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/cv_batch.plx rename to tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/cv_batch.mthds diff --git a/tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/joke_batch.plx b/tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/joke_batch.mthds similarity index 100% rename from tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/joke_batch.plx rename to tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/joke_batch.mthds diff --git a/tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/test_pipe_batch_graph.py b/tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/test_pipe_batch_graph.py index eb44cc17c..939065885 100644 --- a/tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/test_pipe_batch_graph.py +++ b/tests/e2e/pipelex/pipes/pipe_controller/pipe_batch/test_pipe_batch_graph.py @@ -152,7 +152,7 @@ async def test_pipe_batch_generates_batch_edges(self, pipe_run_mode: PipeRunMode ) async def test_joke_batch_graph_outputs(self, pipe_run_mode: PipeRunMode): - """Simple test that runs joke_batch.plx and generates all graph outputs. + """Simple test that runs joke_batch.mthds and generates all graph outputs. This test runs the joke batch pipeline with graph tracing and generates: - graph.json (GraphSpec) diff --git a/tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/discord_newsletter.plx b/tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/discord_newsletter.mthds similarity index 100% rename from tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/discord_newsletter.plx rename to tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/discord_newsletter.mthds diff --git a/tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/test_tweet.plx b/tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/test_tweet.mthds similarity index 100% rename from tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/test_tweet.plx rename to tests/e2e/pipelex/pipes/pipe_controller/pipe_sequence/test_tweet.mthds diff --git a/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_match.plx b/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_match.mthds similarity index 100% rename from tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_match.plx rename to tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_match.mthds diff --git a/tests/e2e/pipelex/pipes/pipe_operators/pipe_img_gen/pipe_img_gen.plx b/tests/e2e/pipelex/pipes/pipe_operators/pipe_img_gen/pipe_img_gen.mthds similarity index 100% rename from tests/e2e/pipelex/pipes/pipe_operators/pipe_img_gen/pipe_img_gen.plx rename to tests/e2e/pipelex/pipes/pipe_operators/pipe_img_gen/pipe_img_gen.mthds diff --git a/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_document_inputs.plx b/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_document_inputs.mthds similarity index 100% rename from tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_document_inputs.plx rename to tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_document_inputs.mthds diff --git a/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_filename_html.plx b/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_filename_html.mthds similarity index 100% rename from tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_filename_html.plx rename to tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_filename_html.mthds diff --git a/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_image_inputs.plx b/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_image_inputs.mthds similarity index 100% rename from tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_image_inputs.plx rename to tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_image_inputs.mthds diff --git a/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_vision.plx b/tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_vision.mthds similarity index 100% rename from tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_vision.plx rename to tests/e2e/pipelex/pipes/pipe_operators/pipe_llm/pipe_llm_vision.mthds diff --git a/tests/integration/pipelex/builder/test_builder_plx_validation.py b/tests/integration/pipelex/builder/test_builder_mthds_validation.py similarity index 78% rename from tests/integration/pipelex/builder/test_builder_plx_validation.py rename to tests/integration/pipelex/builder/test_builder_mthds_validation.py index 68be4481c..b748a8ab9 100644 --- a/tests/integration/pipelex/builder/test_builder_plx_validation.py +++ b/tests/integration/pipelex/builder/test_builder_mthds_validation.py @@ -1,6 +1,6 @@ -"""Tests for validating builder domain PLX files. +"""Tests for validating builder domain MTHDS files. -This module tests that builder.plx and agentic_builder.plx are valid and that +This module tests that builder.mthds and agentic_builder.mthds are valid and that input/output types are correctly declared, especially for pipes that receive batched outputs (lists) from previous steps. """ @@ -18,21 +18,21 @@ class TestData: - """Test data for builder PLX validation tests.""" + """Test data for builder MTHDS validation tests.""" - BUILDER_PLX_PATH: ClassVar[Path] = BUILDER_DIR / "builder.plx" - AGENTIC_BUILDER_PLX_PATH: ClassVar[Path] = BUILDER_DIR / "agentic_builder.plx" - PIPE_DESIGN_PLX_PATH: ClassVar[Path] = BUILDER_DIR / "pipe" / "pipe_design.plx" + BUILDER_MTHDS_PATH: ClassVar[Path] = BUILDER_DIR / "builder.mthds" + AGENTIC_BUILDER_MTHDS_PATH: ClassVar[Path] = BUILDER_DIR / "agentic_builder.mthds" + PIPE_DESIGN_MTHDS_PATH: ClassVar[Path] = BUILDER_DIR / "pipe" / "pipe_design.mthds" -class TestBuilderPlxValidation: - """Tests that builder domain PLX files are valid and type-consistent.""" +class TestBuilderMthdsValidation: + """Tests that builder domain MTHDS files are valid and type-consistent.""" @pytest.mark.asyncio(loop_scope="class") - async def test_builder_plx_loads_and_validates(self): - """Test that builder.plx can be loaded and validated successfully.""" + async def test_builder_mthds_loads_and_validates(self): + """Test that builder.mthds can be loaded and validated successfully.""" result = await validate_bundle( - plx_file_path=TestData.BUILDER_PLX_PATH, + plx_file_path=TestData.BUILDER_MTHDS_PATH, library_dirs=[BUILDER_DIR, BUILDER_DIR / "pipe"], ) @@ -42,10 +42,10 @@ async def test_builder_plx_loads_and_validates(self): assert len(result.pipes) > 0 @pytest.mark.asyncio(loop_scope="class") - async def test_agentic_builder_plx_loads_and_validates(self): - """Test that agentic_builder.plx can be loaded and validated successfully.""" + async def test_agentic_builder_mthds_loads_and_validates(self): + """Test that agentic_builder.mthds can be loaded and validated successfully.""" result = await validate_bundle( - plx_file_path=TestData.AGENTIC_BUILDER_PLX_PATH, + plx_file_path=TestData.AGENTIC_BUILDER_MTHDS_PATH, library_dirs=[BUILDER_DIR, BUILDER_DIR / "pipe"], ) @@ -55,10 +55,10 @@ async def test_agentic_builder_plx_loads_and_validates(self): assert len(result.pipes) > 0 @pytest.mark.asyncio(loop_scope="class") - async def test_pipe_design_plx_loads_and_validates(self): - """Test that pipe_design.plx can be loaded and validated successfully.""" + async def test_pipe_design_mthds_loads_and_validates(self): + """Test that pipe_design.mthds can be loaded and validated successfully.""" result = await validate_bundle( - plx_file_path=TestData.PIPE_DESIGN_PLX_PATH, + plx_file_path=TestData.PIPE_DESIGN_MTHDS_PATH, library_dirs=[BUILDER_DIR, BUILDER_DIR / "pipe"], ) @@ -68,15 +68,15 @@ async def test_pipe_design_plx_loads_and_validates(self): assert len(result.pipes) > 0 def test_assemble_pipelex_bundle_spec_has_list_inputs_in_builder(self): - """Test that assemble_pipelex_bundle_spec declares list inputs correctly in builder.plx. + """Test that assemble_pipelex_bundle_spec declares list inputs correctly in builder.mthds. This test catches the bug where pipe_specs was incorrectly declared as "pipe_design.PipeSpec" instead of "pipe_design.PipeSpec[]" when the pipe receives the output of a batch_over operation which produces a list. - See: builder.plx line 31 (batch_over produces list) and line 332 (input declaration) + See: builder.mthds line 31 (batch_over produces list) and line 332 (input declaration) """ - blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=TestData.BUILDER_PLX_PATH) + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=TestData.BUILDER_MTHDS_PATH) assert blueprint.pipe is not None assert "assemble_pipelex_bundle_spec" in blueprint.pipe @@ -95,12 +95,12 @@ def test_assemble_pipelex_bundle_spec_has_list_inputs_in_builder(self): assert "[]" in concept_specs_input, f"concept_specs must be declared as a list (with []). Got: {concept_specs_input}" def test_detail_all_pipe_specs_outputs_list_in_agentic_builder(self): - """Test that detail_all_pipe_specs declares list output in agentic_builder.plx. + """Test that detail_all_pipe_specs declares list output in agentic_builder.mthds. This test verifies that the PipeBatch that generates pipe_specs correctly declares its output as a list, which is then consumed by assemble_pipelex_bundle_spec. """ - blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=TestData.AGENTIC_BUILDER_PLX_PATH) + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=TestData.AGENTIC_BUILDER_MTHDS_PATH) assert blueprint.pipe is not None assert "detail_all_pipe_specs" in blueprint.pipe @@ -114,10 +114,10 @@ def test_detail_all_pipe_specs_outputs_list_in_agentic_builder(self): def test_batch_over_result_consistency_with_subsequent_inputs(self): """Test that batch_over results are consumed by pipes with matching list inputs. - In builder.plx, pipe_builder uses batch_over on detail_pipe_spec to produce pipe_specs. + In builder.mthds, pipe_builder uses batch_over on detail_pipe_spec to produce pipe_specs. The subsequent assemble_pipelex_bundle_spec must declare pipe_specs as a list input. """ - blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=TestData.BUILDER_PLX_PATH) + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=TestData.BUILDER_MTHDS_PATH) assert blueprint.pipe is not None diff --git a/tests/integration/pipelex/concepts/out_of_order_refines/multi_file/base_domain.plx b/tests/integration/pipelex/concepts/out_of_order_refines/multi_file/base_domain.mthds similarity index 100% rename from tests/integration/pipelex/concepts/out_of_order_refines/multi_file/base_domain.plx rename to tests/integration/pipelex/concepts/out_of_order_refines/multi_file/base_domain.mthds diff --git a/tests/integration/pipelex/concepts/out_of_order_refines/multi_file/middle_domain.plx b/tests/integration/pipelex/concepts/out_of_order_refines/multi_file/middle_domain.mthds similarity index 100% rename from tests/integration/pipelex/concepts/out_of_order_refines/multi_file/middle_domain.plx rename to tests/integration/pipelex/concepts/out_of_order_refines/multi_file/middle_domain.mthds diff --git a/tests/integration/pipelex/concepts/out_of_order_refines/out_of_order_refines.plx b/tests/integration/pipelex/concepts/out_of_order_refines/out_of_order_refines.mthds similarity index 100% rename from tests/integration/pipelex/concepts/out_of_order_refines/out_of_order_refines.plx rename to tests/integration/pipelex/concepts/out_of_order_refines/out_of_order_refines.mthds diff --git a/tests/integration/pipelex/concepts/out_of_order_refines/test_out_of_order_refines.py b/tests/integration/pipelex/concepts/out_of_order_refines/test_out_of_order_refines.py index 717428a65..120669374 100644 --- a/tests/integration/pipelex/concepts/out_of_order_refines/test_out_of_order_refines.py +++ b/tests/integration/pipelex/concepts/out_of_order_refines/test_out_of_order_refines.py @@ -13,7 +13,7 @@ async def test_simple_out_of_order_refines_single_file(self): """Test that concept loading fails when refining concept is defined before base (single file). This test reproduces the bug where: - 1. VIPCustomer is defined BEFORE Customer in the PLX file + 1. VIPCustomer is defined BEFORE Customer in the MTHDS file 2. VIPCustomer refines Customer 3. When loading concepts, VIPCustomer is processed first 4. ConceptFactory._handle_refines tries to generate a structure class @@ -21,30 +21,30 @@ async def test_simple_out_of_order_refines_single_file(self): 5. Customer's class isn't registered yet, so lookup fails 6. Error: "Base class 'Customer' not found in native classes or class registry" """ - plx_file_path = Path(__file__).parent / "out_of_order_refines.plx" - assert plx_file_path.exists(), f"PLX file not found: {plx_file_path}" + mthds_file_path = Path(__file__).parent / "out_of_order_refines.mthds" + assert mthds_file_path.exists(), f"MTHDS file not found: {mthds_file_path}" # validate_bundle internally loads libraries which triggers ConceptFactory.make_from_blueprint # This should fail because VIPCustomer is defined before Customer # with pytest.raises(ConceptFactoryError) as exc_info: - await validate_bundle(plx_file_path=plx_file_path) + await validate_bundle(plx_file_path=mthds_file_path) async def test_multi_level_out_of_order_refines_across_files(self): """Test multi-level refinement chain fails when concepts are out of order across files. This test reproduces a more complex scenario where: - File 1 (base_domain.plx): + File 1 (base_domain.mthds): - Person (root concept with structure) - File 2 (middle_domain.plx) - concepts defined in REVERSE order: + File 2 (middle_domain.mthds) - concepts defined in REVERSE order: - PlatinumCustomer refines VIPCustomer (defined FIRST) - VIPCustomer refines Customer (defined SECOND) - Customer refines Person (defined THIRD) The inheritance chain is: PlatinumCustomer -> VIPCustomer -> Customer -> Person - When loading middle_domain.plx: + When loading middle_domain.mthds: 1. PlatinumCustomer is processed first 2. It tries to refine VIPCustomer, but VIPCustomer is not yet registered 3. Error: "Base class 'VIPCustomer' not found in native classes or class registry" @@ -56,10 +56,10 @@ async def test_multi_level_out_of_order_refines_across_files(self): """ multi_file_dir = Path(__file__).parent / "multi_file" assert multi_file_dir.exists(), f"Multi-file test directory not found: {multi_file_dir}" - assert (multi_file_dir / "base_domain.plx").exists(), "base_domain.plx not found" - assert (multi_file_dir / "middle_domain.plx").exists(), "middle_domain.plx not found" + assert (multi_file_dir / "base_domain.mthds").exists(), "base_domain.mthds not found" + assert (multi_file_dir / "middle_domain.mthds").exists(), "middle_domain.mthds not found" - # validate_bundles_from_directory loads all PLX files in the directory - # Files are loaded in order, but within middle_domain.plx concepts are out of order + # validate_bundles_from_directory loads all MTHDS files in the directory + # Files are loaded in order, but within middle_domain.mthds concepts are out of order # with pytest.raises(ConceptFactoryError) as exc_info: await validate_bundles_from_directory(directory=multi_file_dir) diff --git a/tests/integration/pipelex/concepts/refines_custom_concept/refines_custom_concept.plx b/tests/integration/pipelex/concepts/refines_custom_concept/refines_custom_concept.mthds similarity index 100% rename from tests/integration/pipelex/concepts/refines_custom_concept/refines_custom_concept.plx rename to tests/integration/pipelex/concepts/refines_custom_concept/refines_custom_concept.mthds diff --git a/tests/integration/pipelex/language/test_mthds_factory.py b/tests/integration/pipelex/language/test_mthds_factory.py new file mode 100644 index 000000000..4f072be68 --- /dev/null +++ b/tests/integration/pipelex/language/test_mthds_factory.py @@ -0,0 +1,15 @@ +import pytest + +from pipelex import pretty_print +from pipelex.core.bundles.pipelex_bundle_blueprint import PipelexBundleBlueprint +from pipelex.language.mthds_factory import MthdsFactory +from tests.unit.pipelex.core.test_data import InterpreterTestCases + + +class TestMthdsFactoryIntegration: + @pytest.mark.parametrize(("test_name", "expected_mthds_content", "blueprint"), InterpreterTestCases.VALID_TEST_CASES) + def test_make_mthds_content(self, test_name: str, expected_mthds_content: str, blueprint: PipelexBundleBlueprint): + mthds_content = MthdsFactory.make_mthds_content(blueprint=blueprint) + pretty_print(mthds_content, title=f"MTHDS content {test_name}") + pretty_print(expected_mthds_content, title=f"Expected MTHDS content {test_name}") + assert mthds_content == expected_mthds_content diff --git a/tests/integration/pipelex/language/test_plx_factory.py b/tests/integration/pipelex/language/test_plx_factory.py deleted file mode 100644 index 8930a5473..000000000 --- a/tests/integration/pipelex/language/test_plx_factory.py +++ /dev/null @@ -1,15 +0,0 @@ -import pytest - -from pipelex import pretty_print -from pipelex.core.bundles.pipelex_bundle_blueprint import PipelexBundleBlueprint -from pipelex.language.plx_factory import PlxFactory -from tests.unit.pipelex.core.test_data import InterpreterTestCases - - -class TestPlxFactoryIntegration: - @pytest.mark.parametrize(("test_name", "expected_plx_content", "blueprint"), InterpreterTestCases.VALID_TEST_CASES) - def test_make_plx_content(self, test_name: str, expected_plx_content: str, blueprint: PipelexBundleBlueprint): - plx_content = PlxFactory.make_plx_content(blueprint=blueprint) - pretty_print(plx_content, title=f"Plx content {test_name}") - pretty_print(expected_plx_content, title=f"Expected PLX content {test_name}") - assert plx_content == expected_plx_content diff --git a/tests/integration/pipelex/libraries/test_concept_to_concept_references.py b/tests/integration/pipelex/libraries/test_concept_to_concept_references.py index 5a4e61c4d..0069729f7 100644 --- a/tests/integration/pipelex/libraries/test_concept_to_concept_references.py +++ b/tests/integration/pipelex/libraries/test_concept_to_concept_references.py @@ -1,4 +1,4 @@ -"""Integration tests for concept-to-concept references in PLX files.""" +"""Integration tests for concept-to-concept references in MTHDS files.""" import tempfile from collections.abc import Callable @@ -14,8 +14,8 @@ class TestConceptToConceptReferences: def test_load_concepts_with_single_reference(self, load_test_library: Callable[[list[Path]], None]): """Test loading concepts where one concept references another.""" - # Create a temporary PLX file with concept references - plx_content = """ + # Create a temporary MTHDS file with concept references + mthds_content = """ domain = "testapp" description = "Test domain for concept references" @@ -35,8 +35,8 @@ def test_load_concepts_with_single_reference(self, load_test_library: Callable[[ """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test_concepts.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test_concepts.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") load_test_library([Path(tmp_dir)]) @@ -60,7 +60,7 @@ def test_load_concepts_with_single_reference(self, load_test_library: Callable[[ def test_load_concepts_with_list_of_references(self, load_test_library: Callable[[list[Path]], None]): """Test loading concepts where one concept has a list of references to another.""" - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain for list of concept references" @@ -81,8 +81,8 @@ def test_load_concepts_with_list_of_references(self, load_test_library: Callable """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test_concepts.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test_concepts.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") load_test_library([Path(tmp_dir)]) @@ -103,8 +103,8 @@ def test_load_concepts_with_list_of_references(self, load_test_library: Callable def test_load_concepts_dependency_order(self, load_test_library: Callable[[list[Path]], None]): """Test that concepts are loaded in dependency order (dependencies first).""" - # Define concepts in reverse dependency order in the PLX file - plx_content = """ + # Define concepts in reverse dependency order in the MTHDS file + mthds_content = """ domain = "testapp" description = "Test domain for dependency ordering" @@ -124,8 +124,8 @@ def test_load_concepts_dependency_order(self, load_test_library: Callable[[list[ """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test_concepts.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test_concepts.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") # This should not raise an error - Customer should be loaded before Invoice load_test_library([Path(tmp_dir)]) @@ -142,7 +142,7 @@ def test_load_concepts_dependency_order(self, load_test_library: Callable[[list[ def test_load_concepts_chain_dependencies(self, load_test_library: Callable[[list[Path]], None]): """Test loading concepts with chain dependencies: A -> B -> C.""" - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain for chain dependencies" @@ -168,8 +168,8 @@ def test_load_concepts_chain_dependencies(self, load_test_library: Callable[[lis """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test_concepts.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test_concepts.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") load_test_library([Path(tmp_dir)]) @@ -187,7 +187,7 @@ def test_load_concepts_chain_dependencies(self, load_test_library: Callable[[lis def test_cycle_detection_raises_error(self, load_empty_library: Callable[[], str]): """Test that cyclic dependencies are detected and raise an error.""" - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain with cyclic dependencies" @@ -205,8 +205,8 @@ def test_cycle_detection_raises_error(self, load_empty_library: Callable[[], str """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test_concepts.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test_concepts.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") library_id = load_empty_library() library_manager = get_library_manager() @@ -220,7 +220,7 @@ def test_cycle_detection_raises_error(self, load_empty_library: Callable[[], str def test_cycle_detection_self_reference(self, load_empty_library: Callable[[], str]): """Test that a concept referencing itself is detected as a cycle.""" - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain with self-referencing concept" @@ -233,8 +233,8 @@ def test_cycle_detection_self_reference(self, load_empty_library: Callable[[], s """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test_concepts.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test_concepts.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") library_id = load_empty_library() library_manager = get_library_manager() @@ -247,7 +247,7 @@ def test_cycle_detection_self_reference(self, load_empty_library: Callable[[], s def test_cycle_detection_three_concepts(self, load_empty_library: Callable[[], str]): """Test that a cycle through three concepts (A -> B -> C -> A) is detected.""" - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain with three-concept cycle" @@ -271,8 +271,8 @@ def test_cycle_detection_three_concepts(self, load_empty_library: Callable[[], s """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test_concepts.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test_concepts.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") library_id = load_empty_library() library_manager = get_library_manager() @@ -285,7 +285,7 @@ def test_cycle_detection_three_concepts(self, load_empty_library: Callable[[], s def test_cycle_detection_long_chain(self, load_empty_library: Callable[[], str]): """Test that a cycle through many concepts (A -> B -> C -> D -> E -> A) is detected.""" - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain with long chain cycle" @@ -316,8 +316,8 @@ def test_cycle_detection_long_chain(self, load_empty_library: Callable[[], str]) """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test_concepts.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test_concepts.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") library_id = load_empty_library() library_manager = get_library_manager() @@ -330,7 +330,7 @@ def test_cycle_detection_long_chain(self, load_empty_library: Callable[[], str]) def test_cycle_detection_through_list_field(self, load_empty_library: Callable[[], str]): """Test that cycles through list fields are detected.""" - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain with cycle through list field" @@ -350,8 +350,8 @@ def test_cycle_detection_through_list_field(self, load_empty_library: Callable[[ """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test_concepts.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test_concepts.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") library_id = load_empty_library() library_manager = get_library_manager() @@ -364,7 +364,7 @@ def test_cycle_detection_through_list_field(self, load_empty_library: Callable[[ def test_cycle_detection_partial_cycle_in_graph(self, load_empty_library: Callable[[], str]): """Test cycle detection when cycle is not at the start (D -> E -> F -> D, with A -> B -> C -> D).""" - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain with cycle deeper in the graph" @@ -400,8 +400,8 @@ def test_cycle_detection_partial_cycle_in_graph(self, load_empty_library: Callab """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test_concepts.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test_concepts.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") library_id = load_empty_library() library_manager = get_library_manager() @@ -414,7 +414,7 @@ def test_cycle_detection_partial_cycle_in_graph(self, load_empty_library: Callab def test_cross_domain_concept_reference(self, load_test_library: Callable[[list[Path]], None]): """Test loading concepts with cross-domain references.""" - crm_plx = """ + crm_mthds = """ domain = "crm" description = "CRM domain" @@ -425,7 +425,7 @@ def test_cross_domain_concept_reference(self, load_test_library: Callable[[list[ name = { type = "text", description = "Customer name" } """ - accounting_plx = """ + accounting_mthds = """ domain = "accounting" description = "Accounting domain" @@ -438,11 +438,11 @@ def test_cross_domain_concept_reference(self, load_test_library: Callable[[list[ """ with tempfile.TemporaryDirectory() as tmp_dir: - crm_path = Path(tmp_dir) / "crm.plx" - crm_path.write_text(crm_plx, encoding="utf-8") + crm_path = Path(tmp_dir) / "crm.mthds" + crm_path.write_text(crm_mthds, encoding="utf-8") - accounting_path = Path(tmp_dir) / "accounting.plx" - accounting_path.write_text(accounting_plx, encoding="utf-8") + accounting_path = Path(tmp_dir) / "accounting.mthds" + accounting_path.write_text(accounting_mthds, encoding="utf-8") load_test_library([Path(tmp_dir)]) diff --git a/tests/integration/pipelex/pipeline/test_load_concepts_only.py b/tests/integration/pipelex/pipeline/test_load_concepts_only.py index e4ee9f9bf..a651bfaba 100644 --- a/tests/integration/pipelex/pipeline/test_load_concepts_only.py +++ b/tests/integration/pipelex/pipeline/test_load_concepts_only.py @@ -1,4 +1,4 @@ -"""Integration tests for load_concepts_only functions.""" +"""Integration tests for load_concepts_only functions from MTHDS files.""" import tempfile from collections.abc import Callable @@ -15,12 +15,12 @@ class TestLoadConceptsOnly: - """Integration tests for loading concepts only (no pipes) from PLX files.""" + """Integration tests for loading concepts only (no pipes) from MTHDS files.""" def test_load_concepts_only_single_file(self, load_empty_library: Callable[[], str]): - """Test loading concepts from a single PLX file.""" + """Test loading concepts from a single MTHDS file.""" load_empty_library() - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain" @@ -33,10 +33,10 @@ def test_load_concepts_only_single_file(self, load_empty_library: Callable[[], s """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") - result = load_concepts_only(plx_file_path=plx_path) + result = load_concepts_only(plx_file_path=mthds_path) assert isinstance(result, LoadConceptsOnlyResult) assert len(result.blueprints) == 1 @@ -46,7 +46,7 @@ def test_load_concepts_only_single_file(self, load_empty_library: Callable[[], s def test_load_concepts_only_skips_pipes(self, load_empty_library: Callable[[], str]): """Test that pipes are skipped when loading concepts only.""" load_empty_library() - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain with pipe" @@ -65,10 +65,10 @@ def test_load_concepts_only_skips_pipes(self, load_empty_library: Callable[[], s """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") - result = load_concepts_only(plx_file_path=plx_path) + result = load_concepts_only(plx_file_path=mthds_path) # Concepts should be loaded assert len(result.concepts) == 1 @@ -82,9 +82,9 @@ def test_load_concepts_only_skips_pipes(self, load_empty_library: Callable[[], s assert len(library.pipe_library.root) == 0 def test_load_concepts_only_from_directory(self, load_empty_library: Callable[[], str]): - """Test loading concepts from a directory with multiple PLX files.""" + """Test loading concepts from a directory with multiple MTHDS files.""" load_empty_library() - plx_content_1 = """ + mthds_content_1 = """ domain = "crm" description = "CRM domain" @@ -95,7 +95,7 @@ def test_load_concepts_only_from_directory(self, load_empty_library: Callable[[] name = { type = "text", description = "Customer name" } """ - plx_content_2 = """ + mthds_content_2 = """ domain = "accounting" description = "Accounting domain" @@ -107,8 +107,8 @@ def test_load_concepts_only_from_directory(self, load_empty_library: Callable[[] """ with tempfile.TemporaryDirectory() as tmp_dir: - (Path(tmp_dir) / "crm.plx").write_text(plx_content_1, encoding="utf-8") - (Path(tmp_dir) / "accounting.plx").write_text(plx_content_2, encoding="utf-8") + (Path(tmp_dir) / "crm.mthds").write_text(mthds_content_1, encoding="utf-8") + (Path(tmp_dir) / "accounting.mthds").write_text(mthds_content_2, encoding="utf-8") result = load_concepts_only_from_directory(directory=Path(tmp_dir)) @@ -122,7 +122,7 @@ def test_load_concepts_only_from_directory(self, load_empty_library: Callable[[] def test_load_concepts_only_with_concept_references(self, load_empty_library: Callable[[], str]): """Test loading concepts that reference other concepts.""" load_empty_library() - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain with concept references" @@ -141,10 +141,10 @@ def test_load_concepts_only_with_concept_references(self, load_empty_library: Ca """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") - result = load_concepts_only(plx_file_path=plx_path) + result = load_concepts_only(plx_file_path=mthds_path) assert len(result.concepts) == 2 @@ -161,7 +161,7 @@ def test_load_concepts_only_with_concept_references(self, load_empty_library: Ca def test_load_concepts_only_detects_cycles(self, load_empty_library: Callable[[], str]): """Test that cycle detection still works when loading concepts only.""" load_empty_library() - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain with cycles" @@ -179,17 +179,17 @@ def test_load_concepts_only_detects_cycles(self, load_empty_library: Callable[[] """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") with pytest.raises(Exception, match=r"[Cc]ycle"): - load_concepts_only(plx_file_path=plx_path) + load_concepts_only(plx_file_path=mthds_path) def test_load_concepts_only_with_library_dirs(self, load_empty_library: Callable[[], str]): """Test loading concepts with library dependencies.""" load_empty_library() - # Library PLX with shared concepts - library_plx = """ + # Library MTHDS with shared concepts + library_mthds = """ domain = "shared" description = "Shared library" @@ -201,8 +201,8 @@ def test_load_concepts_only_with_library_dirs(self, load_empty_library: Callable city = { type = "text", description = "City" } """ - # Main PLX that references the library concept - main_plx = """ + # Main MTHDS that references the library concept + main_mthds = """ domain = "main" description = "Main domain" @@ -215,12 +215,12 @@ def test_load_concepts_only_with_library_dirs(self, load_empty_library: Callable """ with tempfile.TemporaryDirectory() as lib_dir, tempfile.TemporaryDirectory() as main_dir: - (Path(lib_dir) / "shared.plx").write_text(library_plx, encoding="utf-8") - main_plx_path = Path(main_dir) / "main.plx" - main_plx_path.write_text(main_plx, encoding="utf-8") + (Path(lib_dir) / "shared.mthds").write_text(library_mthds, encoding="utf-8") + main_mthds_path = Path(main_dir) / "main.mthds" + main_mthds_path.write_text(main_mthds, encoding="utf-8") result = load_concepts_only( - plx_file_path=main_plx_path, + plx_file_path=main_mthds_path, library_dirs=[Path(lib_dir)], ) @@ -238,10 +238,10 @@ def test_load_concepts_only_with_library_dirs(self, load_empty_library: Callable assert address is not None assert customer is not None - def test_load_concepts_only_with_plx_content(self, load_empty_library: Callable[[], str]): - """Test loading concepts from PLX content string.""" + def test_load_concepts_only_with_mthds_content(self, load_empty_library: Callable[[], str]): + """Test loading concepts from MTHDS content string.""" load_empty_library() - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain" @@ -252,7 +252,7 @@ def test_load_concepts_only_with_plx_content(self, load_empty_library: Callable[ name = { type = "text", description = "Item name" } """ - result = load_concepts_only(plx_content=plx_content) + result = load_concepts_only(plx_content=mthds_content) assert len(result.blueprints) == 1 assert len(result.concepts) == 1 @@ -261,7 +261,7 @@ def test_load_concepts_only_with_plx_content(self, load_empty_library: Callable[ def test_load_concepts_only_with_refines(self, load_empty_library: Callable[[], str]): """Test loading concepts with refines relationships.""" load_empty_library() - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain with refines" @@ -277,10 +277,10 @@ def test_load_concepts_only_with_refines(self, load_empty_library: Callable[[], """ with tempfile.TemporaryDirectory() as tmp_dir: - plx_path = Path(tmp_dir) / "test.plx" - plx_path.write_text(plx_content, encoding="utf-8") + mthds_path = Path(tmp_dir) / "test.mthds" + mthds_path.write_text(mthds_content, encoding="utf-8") - result = load_concepts_only(plx_file_path=plx_path) + result = load_concepts_only(plx_file_path=mthds_path) assert len(result.concepts) == 2 @@ -291,7 +291,7 @@ def test_load_concepts_only_with_refines(self, load_empty_library: Callable[[], def test_load_concepts_only_directory_skips_pipes(self, load_empty_library: Callable[[], str]): """Test that pipes are skipped when loading from directory.""" load_empty_library() - plx_content = """ + mthds_content = """ domain = "testapp" description = "Test domain with pipe" @@ -310,7 +310,7 @@ def test_load_concepts_only_directory_skips_pipes(self, load_empty_library: Call """ with tempfile.TemporaryDirectory() as tmp_dir: - (Path(tmp_dir) / "test.plx").write_text(plx_content, encoding="utf-8") + (Path(tmp_dir) / "test.mthds").write_text(mthds_content, encoding="utf-8") result = load_concepts_only_from_directory(directory=Path(tmp_dir)) diff --git a/tests/integration/pipelex/pipes/controller/pipe_batch/uppercase_transformer.plx b/tests/integration/pipelex/pipes/controller/pipe_batch/uppercase_transformer.mthds similarity index 100% rename from tests/integration/pipelex/pipes/controller/pipe_batch/uppercase_transformer.plx rename to tests/integration/pipelex/pipes/controller/pipe_batch/uppercase_transformer.mthds diff --git a/tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_1.plx b/tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_1.mthds similarity index 100% rename from tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_1.plx rename to tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_1.mthds diff --git a/tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_2.plx b/tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_2.mthds similarity index 100% rename from tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_2.plx rename to tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_2.mthds diff --git a/tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_complex.plx b/tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_complex.mthds similarity index 100% rename from tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_complex.plx rename to tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_complex.mthds diff --git a/tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_continue_output_type.plx b/tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_continue_output_type.mthds similarity index 100% rename from tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_continue_output_type.plx rename to tests/integration/pipelex/pipes/controller/pipe_condition/pipe_condition_continue_output_type.mthds diff --git a/tests/integration/pipelex/pipes/controller/pipe_condition/text_length_condition.plx b/tests/integration/pipelex/pipes/controller/pipe_condition/text_length_condition.mthds similarity index 100% rename from tests/integration/pipelex/pipes/controller/pipe_condition/text_length_condition.plx rename to tests/integration/pipelex/pipes/controller/pipe_condition/text_length_condition.mthds diff --git a/tests/integration/pipelex/pipes/controller/pipe_parallel/parallel_text_analysis.plx b/tests/integration/pipelex/pipes/controller/pipe_parallel/parallel_text_analysis.mthds similarity index 100% rename from tests/integration/pipelex/pipes/controller/pipe_parallel/parallel_text_analysis.plx rename to tests/integration/pipelex/pipes/controller/pipe_parallel/parallel_text_analysis.mthds diff --git a/tests/integration/pipelex/pipes/controller/pipe_parallel/pipe_parallel_1.plx b/tests/integration/pipelex/pipes/controller/pipe_parallel/pipe_parallel_1.mthds similarity index 100% rename from tests/integration/pipelex/pipes/controller/pipe_parallel/pipe_parallel_1.plx rename to tests/integration/pipelex/pipes/controller/pipe_parallel/pipe_parallel_1.mthds diff --git a/tests/integration/pipelex/pipes/controller/pipe_sequence/capitalize_text.plx b/tests/integration/pipelex/pipes/controller/pipe_sequence/capitalize_text.mthds similarity index 100% rename from tests/integration/pipelex/pipes/controller/pipe_sequence/capitalize_text.plx rename to tests/integration/pipelex/pipes/controller/pipe_sequence/capitalize_text.mthds diff --git a/tests/integration/pipelex/pipes/controller/pipe_sequence/discord_newsletter.plx b/tests/integration/pipelex/pipes/controller/pipe_sequence/discord_newsletter.mthds similarity index 100% rename from tests/integration/pipelex/pipes/controller/pipe_sequence/discord_newsletter.plx rename to tests/integration/pipelex/pipes/controller/pipe_sequence/discord_newsletter.mthds diff --git a/tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_1.plx b/tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_1.mthds similarity index 100% rename from tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_1.plx rename to tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_1.mthds diff --git a/tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_2.plx b/tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_2.mthds similarity index 100% rename from tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_2.plx rename to tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_2.mthds diff --git a/tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_3.plx b/tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_3.mthds similarity index 100% rename from tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_3.plx rename to tests/integration/pipelex/pipes/controller/pipe_sequence/pipe_sequence_3.mthds diff --git a/tests/integration/pipelex/pipes/controller/pipe_sequence/test_pipe_sequence_list_output_bug.py b/tests/integration/pipelex/pipes/controller/pipe_sequence/test_pipe_sequence_list_output_bug.py index 6010579e2..baa62a205 100644 --- a/tests/integration/pipelex/pipes/controller/pipe_sequence/test_pipe_sequence_list_output_bug.py +++ b/tests/integration/pipelex/pipes/controller/pipe_sequence/test_pipe_sequence_list_output_bug.py @@ -21,7 +21,7 @@ class TestData: """Test data for pipe_sequence list output bug.""" - PLX_BUNDLE: ClassVar[str] = """ + MTHDS_BUNDLE: ClassVar[str] = """ domain = "test_list_output" description = "Test bundle for list output bug" @@ -85,13 +85,13 @@ async def test_pipe_llm_list_output_produces_list_content_in_sequence(self): with tempfile.TemporaryDirectory() as temp_dir: temp_path = Path(temp_dir) - # Create the .plx file - plx_file = temp_path / "test_bundle.plx" - plx_file.write_text(TestData.PLX_BUNDLE) + # Create the .mthds file + mthds_file = temp_path / "test_bundle.mthds" + mthds_file.write_text(TestData.MTHDS_BUNDLE) # Load the bundle result = await validate_bundle( - plx_file_path=plx_file, + plx_file_path=mthds_file, library_dirs=[temp_path], ) @@ -116,13 +116,13 @@ async def test_standalone_pipe_llm_with_list_output(self): with tempfile.TemporaryDirectory() as temp_dir: temp_path = Path(temp_dir) - # Create the .plx file - plx_file = temp_path / "test_bundle.plx" - plx_file.write_text(TestData.PLX_BUNDLE) + # Create the .mthds file + mthds_file = temp_path / "test_bundle.mthds" + mthds_file.write_text(TestData.MTHDS_BUNDLE) # Load the bundle await validate_bundle( - plx_file_path=plx_file, + plx_file_path=mthds_file, library_dirs=[temp_path], ) @@ -143,7 +143,7 @@ async def test_standalone_pipe_llm_with_list_output(self): class TestDataNested: """Test data for nested pipe_sequence list output bug.""" - PLX_BUNDLE: ClassVar[str] = """ + MTHDS_BUNDLE: ClassVar[str] = """ domain = "test_nested_list_output" description = "Test bundle for nested list output bug" @@ -251,13 +251,13 @@ async def test_nested_sequence_with_list_output_and_batch_over(self): with tempfile.TemporaryDirectory() as temp_dir: temp_path = Path(temp_dir) - # Create the .plx file - plx_file = temp_path / "test_bundle.plx" - plx_file.write_text(TestDataNested.PLX_BUNDLE) + # Create the .mthds file + mthds_file = temp_path / "test_bundle.mthds" + mthds_file.write_text(TestDataNested.MTHDS_BUNDLE) # Load the bundle result = await validate_bundle( - plx_file_path=plx_file, + plx_file_path=mthds_file, library_dirs=[temp_path], ) @@ -278,13 +278,13 @@ async def test_inner_sequence_directly(self): with tempfile.TemporaryDirectory() as temp_dir: temp_path = Path(temp_dir) - # Create the .plx file - plx_file = temp_path / "test_bundle.plx" - plx_file.write_text(TestDataNested.PLX_BUNDLE) + # Create the .mthds file + mthds_file = temp_path / "test_bundle.mthds" + mthds_file.write_text(TestDataNested.MTHDS_BUNDLE) # Load the bundle await validate_bundle( - plx_file_path=plx_file, + plx_file_path=mthds_file, library_dirs=[temp_path], ) diff --git a/tests/integration/pipelex/pipes/operator/pipe_compose_structured/compose_structured_models.plx b/tests/integration/pipelex/pipes/operator/pipe_compose_structured/compose_structured_models.mthds similarity index 100% rename from tests/integration/pipelex/pipes/operator/pipe_compose_structured/compose_structured_models.plx rename to tests/integration/pipelex/pipes/operator/pipe_compose_structured/compose_structured_models.mthds diff --git a/tests/integration/pipelex/pipes/operator/pipe_func/test_pipe_func_validation_errors.py b/tests/integration/pipelex/pipes/operator/pipe_func/test_pipe_func_validation_errors.py index 4c9f86b02..bc30af6d9 100644 --- a/tests/integration/pipelex/pipes/operator/pipe_func/test_pipe_func_validation_errors.py +++ b/tests/integration/pipelex/pipes/operator/pipe_func/test_pipe_func_validation_errors.py @@ -20,8 +20,8 @@ class TestData: """Test data for pipe_func validation error tests.""" @staticmethod - def make_plx_content(function_name: str) -> str: - """Generate PLX content for testing a specific function.""" + def make_mthds_content(function_name: str) -> str: + """Generate MTHDS content for testing a specific function.""" return f""" domain = "test_pipe_func_validation" description = "Test bundle for pipe_func validation error reporting" @@ -33,7 +33,7 @@ def make_plx_content(function_name: str) -> str: output = "Text" """ - PLX_CONTENT_WITH_PIPE_FUNC: ClassVar[str] = """ + MTHDS_CONTENT_WITH_PIPE_FUNC: ClassVar[str] = """ domain = "test_pipe_func_validation" description = "Test bundle for pipe_func validation error reporting" @@ -195,9 +195,9 @@ async def test_pipe_func_missing_return_type_reports_clear_error(self): with tempfile.TemporaryDirectory() as temp_dir: temp_path = Path(temp_dir) - # Create the .plx file - plx_file = temp_path / "test_bundle.plx" - plx_file.write_text(TestData.PLX_CONTENT_WITH_PIPE_FUNC) + # Create the .mthds file + mthds_file = temp_path / "test_bundle.mthds" + mthds_file.write_text(TestData.MTHDS_CONTENT_WITH_PIPE_FUNC) # Create the .py file with the function (missing return type) py_file = temp_path / "my_funcs.py" @@ -207,7 +207,7 @@ async def test_pipe_func_missing_return_type_reports_clear_error(self): # Currently raises LibraryError, but ValidateBundleError is also acceptable with pytest.raises((ValidateBundleError, LibraryError)) as exc_info: await validate_bundle( - plx_file_path=plx_file, + plx_file_path=mthds_file, library_dirs=[temp_path], ) @@ -242,9 +242,9 @@ async def test_pipe_func_with_return_type_validates_successfully(self): with tempfile.TemporaryDirectory() as temp_dir: temp_path = Path(temp_dir) - # Create the .plx file - plx_file = temp_path / "test_bundle.plx" - plx_file.write_text(TestData.PLX_CONTENT_WITH_PIPE_FUNC) + # Create the .mthds file + mthds_file = temp_path / "test_bundle.mthds" + mthds_file.write_text(TestData.MTHDS_CONTENT_WITH_PIPE_FUNC) # Create the .py file with the function (WITH return type) py_file = temp_path / "my_funcs.py" @@ -252,7 +252,7 @@ async def test_pipe_func_with_return_type_validates_successfully(self): # Validate the bundle - should succeed result = await validate_bundle( - plx_file_path=plx_file, + plx_file_path=mthds_file, library_dirs=[temp_path], ) @@ -277,14 +277,14 @@ async def test_pipe_func_decorated_but_ineligible_not_silently_ignored(self): py_file = temp_path / "my_funcs.py" py_file.write_text(TestData.FUNC_WITH_DECORATOR_NO_RETURN_TYPE) - # Create .plx file that references the function - plx_file = temp_path / "test_bundle.plx" - plx_file.write_text(TestData.PLX_CONTENT_WITH_PIPE_FUNC) + # Create .mthds file that references the function + mthds_file = temp_path / "test_bundle.mthds" + mthds_file.write_text(TestData.MTHDS_CONTENT_WITH_PIPE_FUNC) # Try to validate - should fail with informative error with pytest.raises((ValidateBundleError, LibraryError)) as exc_info: await validate_bundle( - plx_file_path=plx_file, + plx_file_path=mthds_file, library_dirs=[temp_path], ) @@ -328,9 +328,9 @@ async def test_ineligible_function_returns_correct_error( with tempfile.TemporaryDirectory() as temp_dir: temp_path = Path(temp_dir) - # Create the .plx file referencing the function - plx_file = temp_path / "test_bundle.plx" - plx_file.write_text(TestData.make_plx_content(function_name)) + # Create the .mthds file referencing the function + mthds_file = temp_path / "test_bundle.mthds" + mthds_file.write_text(TestData.make_mthds_content(function_name)) # Create the .py file with the ineligible function py_file = temp_path / "my_funcs.py" @@ -339,7 +339,7 @@ async def test_ineligible_function_returns_correct_error( # Validate the bundle - should fail with a specific error message with pytest.raises((ValidateBundleError, LibraryError)) as exc_info: await validate_bundle( - plx_file_path=plx_file, + plx_file_path=mthds_file, library_dirs=[temp_path], ) @@ -380,8 +380,8 @@ class MyStructuredContent(StructuredContent): async def func_wrong_structure_class(working_memory: WorkingMemory) -> MyStructuredContent: return MyStructuredContent(name="test") """ - # PLX file that expects Text output (which uses TextContent) - plx_content = """ + # MTHDS file that expects Text output (which uses TextContent) + mthds_content = """ domain = "test_pipe_func_validation" description = "Test bundle for pipe_func return type validation" @@ -394,9 +394,9 @@ async def func_wrong_structure_class(working_memory: WorkingMemory) -> MyStructu with tempfile.TemporaryDirectory() as temp_dir: temp_path = Path(temp_dir) - # Create the .plx file - plx_file = temp_path / "test_bundle.plx" - plx_file.write_text(plx_content) + # Create the .mthds file + mthds_file = temp_path / "test_bundle.mthds" + mthds_file.write_text(mthds_content) # Create the .py file with the function py_file = temp_path / "my_funcs.py" @@ -405,7 +405,7 @@ async def func_wrong_structure_class(working_memory: WorkingMemory) -> MyStructu # Validate the bundle - should fail because return type doesn't match concept's structure class with pytest.raises((ValidateBundleError, LibraryError, TypeError)) as exc_info: await validate_bundle( - plx_file_path=plx_file, + plx_file_path=mthds_file, library_dirs=[temp_path], ) @@ -438,8 +438,8 @@ async def test_pipe_func_list_content_with_array_output_validates_successfully(s async def func_returns_list_content(working_memory: WorkingMemory) -> ListContent[TextContent]: return ListContent(items=[TextContent(text="test1"), TextContent(text="test2")]) """ - # PLX file with array output notation using built-in Text concept - plx_content = """ + # MTHDS file with array output notation using built-in Text concept + mthds_content = """ domain = "test_pipe_func_validation" description = "Test bundle for ListContent validation" @@ -452,9 +452,9 @@ async def func_returns_list_content(working_memory: WorkingMemory) -> ListConten with tempfile.TemporaryDirectory() as temp_dir: temp_path = Path(temp_dir) - # Create the .plx file - plx_file = temp_path / "test_bundle.plx" - plx_file.write_text(plx_content) + # Create the .mthds file + mthds_file = temp_path / "test_bundle.mthds" + mthds_file.write_text(mthds_content) # Create the .py file with the function py_file = temp_path / "my_funcs.py" @@ -462,7 +462,7 @@ async def func_returns_list_content(working_memory: WorkingMemory) -> ListConten # Validate the bundle - should succeed result = await validate_bundle( - plx_file_path=plx_file, + plx_file_path=mthds_file, library_dirs=[temp_path], ) @@ -490,8 +490,8 @@ class WrongItem(StructuredContent): async def func_returns_wrong_list_content(working_memory: WorkingMemory) -> ListContent[WrongItem]: return ListContent(items=[WrongItem(different_field=42)]) """ - # PLX file expects Text[] (TextContent) but function returns ListContent[WrongItem] - plx_content = """ + # MTHDS file expects Text[] (TextContent) but function returns ListContent[WrongItem] + mthds_content = """ domain = "test_pipe_func_validation" description = "Test bundle for ListContent validation error" @@ -504,9 +504,9 @@ async def func_returns_wrong_list_content(working_memory: WorkingMemory) -> List with tempfile.TemporaryDirectory() as temp_dir: temp_path = Path(temp_dir) - # Create the .plx file - plx_file = temp_path / "test_bundle.plx" - plx_file.write_text(plx_content) + # Create the .mthds file + mthds_file = temp_path / "test_bundle.mthds" + mthds_file.write_text(mthds_content) # Create the .py file with the function py_file = temp_path / "my_funcs.py" @@ -515,7 +515,7 @@ async def func_returns_wrong_list_content(working_memory: WorkingMemory) -> List # Validate the bundle - should fail with clear error about item type mismatch with pytest.raises((ValidateBundleError, LibraryError, TypeError)) as exc_info: await validate_bundle( - plx_file_path=plx_file, + plx_file_path=mthds_file, library_dirs=[temp_path], ) @@ -548,8 +548,8 @@ async def test_pipe_func_array_output_requires_list_content_return_type(self): async def func_returns_single_instead_of_list(working_memory: WorkingMemory) -> TextContent: return TextContent(text="single item - should be a list!") """ - # PLX file expects Text[] (array) but function returns single TextContent - plx_content = """ + # MTHDS file expects Text[] (array) but function returns single TextContent + mthds_content = """ domain = "test_pipe_func_validation" description = "Test bundle for ListContent requirement" @@ -562,9 +562,9 @@ async def func_returns_single_instead_of_list(working_memory: WorkingMemory) -> with tempfile.TemporaryDirectory() as temp_dir: temp_path = Path(temp_dir) - # Create the .plx file - plx_file = temp_path / "test_bundle.plx" - plx_file.write_text(plx_content) + # Create the .mthds file + mthds_file = temp_path / "test_bundle.mthds" + mthds_file.write_text(mthds_content) # Create the .py file with the function py_file = temp_path / "my_funcs.py" @@ -573,7 +573,7 @@ async def func_returns_single_instead_of_list(working_memory: WorkingMemory) -> # Validate the bundle - should fail because return type is not ListContent with pytest.raises((ValidateBundleError, LibraryError, TypeError)) as exc_info: await validate_bundle( - plx_file_path=plx_file, + plx_file_path=mthds_file, library_dirs=[temp_path], ) diff --git a/tests/integration/pipelex/pipes/operator/pipe_llm/test_structures_basic.plx b/tests/integration/pipelex/pipes/operator/pipe_llm/test_structures_basic.mthds similarity index 100% rename from tests/integration/pipelex/pipes/operator/pipe_llm/test_structures_basic.plx rename to tests/integration/pipelex/pipes/operator/pipe_llm/test_structures_basic.mthds diff --git a/tests/integration/pipelex/pipes/operator/pipe_llm/test_structures_complex.plx b/tests/integration/pipelex/pipes/operator/pipe_llm/test_structures_complex.mthds similarity index 100% rename from tests/integration/pipelex/pipes/operator/pipe_llm/test_structures_complex.plx rename to tests/integration/pipelex/pipes/operator/pipe_llm/test_structures_complex.mthds diff --git a/tests/integration/pipelex/pipes/pipelines/crazy_image_generation.plx b/tests/integration/pipelex/pipes/pipelines/crazy_image_generation.mthds similarity index 100% rename from tests/integration/pipelex/pipes/pipelines/crazy_image_generation.plx rename to tests/integration/pipelex/pipes/pipelines/crazy_image_generation.mthds diff --git a/tests/integration/pipelex/pipes/pipelines/failing_pipelines.plx b/tests/integration/pipelex/pipes/pipelines/failing_pipelines.mthds similarity index 100% rename from tests/integration/pipelex/pipes/pipelines/failing_pipelines.plx rename to tests/integration/pipelex/pipes/pipelines/failing_pipelines.mthds diff --git a/tests/integration/pipelex/pipes/pipelines/flows.plx b/tests/integration/pipelex/pipes/pipelines/flows.mthds similarity index 100% rename from tests/integration/pipelex/pipes/pipelines/flows.plx rename to tests/integration/pipelex/pipes/pipelines/flows.mthds diff --git a/tests/integration/pipelex/pipes/pipelines/multiple_images_input_to_llm.plx b/tests/integration/pipelex/pipes/pipelines/multiple_images_input_to_llm.mthds similarity index 100% rename from tests/integration/pipelex/pipes/pipelines/multiple_images_input_to_llm.plx rename to tests/integration/pipelex/pipes/pipelines/multiple_images_input_to_llm.mthds diff --git a/tests/integration/pipelex/pipes/pipelines/multiplicity.plx b/tests/integration/pipelex/pipes/pipelines/multiplicity.mthds similarity index 100% rename from tests/integration/pipelex/pipes/pipelines/multiplicity.plx rename to tests/integration/pipelex/pipes/pipelines/multiplicity.mthds diff --git a/tests/integration/pipelex/pipes/pipelines/refined_concepts.plx b/tests/integration/pipelex/pipes/pipelines/refined_concepts.mthds similarity index 100% rename from tests/integration/pipelex/pipes/pipelines/refined_concepts.plx rename to tests/integration/pipelex/pipes/pipelines/refined_concepts.mthds diff --git a/tests/integration/pipelex/pipes/pipelines/test_image_inputs.plx b/tests/integration/pipelex/pipes/pipelines/test_image_inputs.mthds similarity index 100% rename from tests/integration/pipelex/pipes/pipelines/test_image_inputs.plx rename to tests/integration/pipelex/pipes/pipelines/test_image_inputs.mthds diff --git a/tests/integration/pipelex/pipes/pipelines/test_image_out_in.plx b/tests/integration/pipelex/pipes/pipelines/test_image_out_in.mthds similarity index 100% rename from tests/integration/pipelex/pipes/pipelines/test_image_out_in.plx rename to tests/integration/pipelex/pipes/pipelines/test_image_out_in.mthds diff --git a/tests/integration/pipelex/pipes/pipelines/tests.plx b/tests/integration/pipelex/pipes/pipelines/tests.mthds similarity index 100% rename from tests/integration/pipelex/pipes/pipelines/tests.plx rename to tests/integration/pipelex/pipes/pipelines/tests.mthds diff --git a/tests/unit/pipelex/cli/test_agent_graph_cmd.py b/tests/unit/pipelex/cli/test_agent_graph_cmd.py index a60d2171e..e864f669f 100644 --- a/tests/unit/pipelex/cli/test_agent_graph_cmd.py +++ b/tests/unit/pipelex/cli/test_agent_graph_cmd.py @@ -20,7 +20,7 @@ class TestGraphCmd: - """Tests for the graph command that generates HTML from a .plx bundle.""" + """Tests for the graph command that generates HTML from a .mthds bundle.""" def _mock_blueprint(self, mocker: MockerFixture, *, main_pipe: str = "my_pipe") -> None: """Mock bundle parsing to return a blueprint with the given main_pipe.""" @@ -59,20 +59,20 @@ def _mock_execution(self, mocker: MockerFixture, *, graph_spec_present: bool = T return_value={"reactflow_html": Path("graph/reactflow.html")}, ) - def test_valid_plx_file_produces_success_json( + def test_valid_mthds_file_produces_success_json( self, mocker: MockerFixture, capsys: pytest.CaptureFixture[str], tmp_path: Path, ) -> None: - """Valid .plx file should produce success JSON with pipe_code and output_dir.""" - plx_file = tmp_path / "bundle.plx" - plx_file.write_text('[bundle]\nmain_pipe = "my_pipe"\n[domain]\ncode = "test"') + """Valid .mthds file should produce success JSON with pipe_code and output_dir.""" + mthds_file = tmp_path / "bundle.mthds" + mthds_file.write_text('[bundle]\nmain_pipe = "my_pipe"\n[domain]\ncode = "test"') self._mock_blueprint(mocker) self._mock_execution(mocker) - graph_cmd(target=str(plx_file)) + graph_cmd(target=str(mthds_file)) parsed = json.loads(capsys.readouterr().out) assert parsed["success"] is True @@ -80,14 +80,14 @@ def test_valid_plx_file_produces_success_json( assert "output_dir" in parsed assert "files" in parsed - def test_valid_plx_file_calls_asyncio_run_twice( + def test_valid_mthds_file_calls_asyncio_run_twice( self, mocker: MockerFixture, tmp_path: Path, ) -> None: - """Valid .plx file should call asyncio.run twice (execute_pipeline + generate_graph_outputs).""" - plx_file = tmp_path / "bundle.plx" - plx_file.write_text('[bundle]\nmain_pipe = "my_pipe"\n[domain]\ncode = "test"') + """Valid .mthds file should call asyncio.run twice (execute_pipeline + generate_graph_outputs).""" + mthds_file = tmp_path / "bundle.mthds" + mthds_file.write_text('[bundle]\nmain_pipe = "my_pipe"\n[domain]\ncode = "test"') self._mock_blueprint(mocker) @@ -111,16 +111,16 @@ def test_valid_plx_file_calls_asyncio_run_twice( return_value={"reactflow_html": Path("graph/reactflow.html")}, ) - graph_cmd(target=str(plx_file)) + graph_cmd(target=str(mthds_file)) assert mock_asyncio_run.call_count == 2 - def test_non_plx_file_produces_error( + def test_non_mthds_file_produces_error( self, capsys: pytest.CaptureFixture[str], tmp_path: Path, ) -> None: - """Non-PLX file (e.g. .json, .txt) should produce an ArgumentError.""" + """Non-MTHDS file (e.g. .json, .txt) should produce an ArgumentError.""" json_file = tmp_path / "graphspec.json" json_file.write_text("{}") @@ -131,7 +131,7 @@ def test_non_plx_file_produces_error( parsed = json.loads(capsys.readouterr().err) assert parsed["error"] is True assert parsed["error_type"] == "ArgumentError" - assert ".plx" in parsed["message"] + assert ".mthds" in parsed["message"] def test_file_not_found_produces_error( self, @@ -139,7 +139,7 @@ def test_file_not_found_produces_error( tmp_path: Path, ) -> None: """Missing file should produce a FileNotFoundError.""" - missing = tmp_path / "nonexistent.plx" + missing = tmp_path / "nonexistent.mthds" with pytest.raises(typer.Exit) as exc_info: graph_cmd(target=str(missing)) @@ -156,8 +156,8 @@ def test_bundle_without_main_pipe_produces_error( tmp_path: Path, ) -> None: """Bundle that doesn't declare main_pipe should produce a BundleError.""" - plx_file = tmp_path / "bundle.plx" - plx_file.write_text('[domain]\ncode = "test"') + mthds_file = tmp_path / "bundle.mthds" + mthds_file.write_text('[domain]\ncode = "test"') mock_blueprint = mocker.MagicMock() mock_blueprint.main_pipe = None @@ -167,7 +167,7 @@ def test_bundle_without_main_pipe_produces_error( ) with pytest.raises(typer.Exit) as exc_info: - graph_cmd(target=str(plx_file)) + graph_cmd(target=str(mthds_file)) assert exc_info.value.exit_code == 1 parsed = json.loads(capsys.readouterr().err) @@ -182,8 +182,8 @@ def test_no_graph_spec_produces_error( tmp_path: Path, ) -> None: """If pipe_output.graph_spec is None, should produce a GraphSpecMissingError.""" - plx_file = tmp_path / "bundle.plx" - plx_file.write_text('[bundle]\nmain_pipe = "my_pipe"\n[domain]\ncode = "test"') + mthds_file = tmp_path / "bundle.mthds" + mthds_file.write_text('[bundle]\nmain_pipe = "my_pipe"\n[domain]\ncode = "test"') self._mock_blueprint(mocker) @@ -199,7 +199,7 @@ def test_no_graph_spec_produces_error( mocker.patch(f"{GRAPH_CMD_MODULE}.asyncio.run", return_value=mock_pipe_output) with pytest.raises(typer.Exit) as exc_info: - graph_cmd(target=str(plx_file)) + graph_cmd(target=str(mthds_file)) assert exc_info.value.exit_code == 1 parsed = json.loads(capsys.readouterr().err) @@ -222,13 +222,13 @@ def test_format_option_produces_success( format_option: GraphFormat, ) -> None: """Each format option should produce success JSON.""" - plx_file = tmp_path / "bundle.plx" - plx_file.write_text('[bundle]\nmain_pipe = "my_pipe"\n[domain]\ncode = "test"') + mthds_file = tmp_path / "bundle.mthds" + mthds_file.write_text('[bundle]\nmain_pipe = "my_pipe"\n[domain]\ncode = "test"') self._mock_blueprint(mocker) self._mock_execution(mocker) - graph_cmd(target=str(plx_file), graph_format=format_option) + graph_cmd(target=str(mthds_file), graph_format=format_option) parsed = json.loads(capsys.readouterr().out) assert parsed["success"] is True @@ -239,15 +239,15 @@ def test_default_format_is_reactflow(self) -> None: default = sig.parameters["graph_format"].default assert default == GraphFormat.REACTFLOW - def test_plx_parse_error_produces_error( + def test_mthds_parse_error_produces_error( self, mocker: MockerFixture, capsys: pytest.CaptureFixture[str], tmp_path: Path, ) -> None: - """PLX parse error should produce a PLXDecodeError.""" - plx_file = tmp_path / "bundle.plx" - plx_file.write_text("invalid toml {{{{") + """MTHDS parse error should produce a PLXDecodeError.""" + mthds_file = tmp_path / "bundle.mthds" + mthds_file.write_text("invalid toml {{{{") mocker.patch( f"{GRAPH_CMD_MODULE}.PipelexInterpreter.make_pipelex_bundle_blueprint", @@ -255,7 +255,7 @@ def test_plx_parse_error_produces_error( ) with pytest.raises(typer.Exit) as exc_info: - graph_cmd(target=str(plx_file)) + graph_cmd(target=str(mthds_file)) assert exc_info.value.exit_code == 1 parsed = json.loads(capsys.readouterr().err) diff --git a/tests/unit/pipelex/core/interpreter/test_interpreter.py b/tests/unit/pipelex/core/interpreter/test_interpreter.py index a351aa42d..f5dde26ec 100644 --- a/tests/unit/pipelex/core/interpreter/test_interpreter.py +++ b/tests/unit/pipelex/core/interpreter/test_interpreter.py @@ -7,18 +7,18 @@ class TestPipelexInterpreter: - @pytest.mark.parametrize(("test_name", "plx_content", "expected_blueprint"), InterpreterTestCases.VALID_TEST_CASES) - def test_make_pipelex_bundle_blueprint(self, test_name: str, plx_content: str, expected_blueprint: PipelexBundleBlueprint): - """Test making blueprint from various valid PLX content.""" - blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(plx_content=plx_content) + @pytest.mark.parametrize(("test_name", "mthds_content", "expected_blueprint"), InterpreterTestCases.VALID_TEST_CASES) + def test_make_pipelex_bundle_blueprint(self, test_name: str, mthds_content: str, expected_blueprint: PipelexBundleBlueprint): + """Test making blueprint from various valid MTHDS content.""" + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(plx_content=mthds_content) pretty_print(blueprint, title=f"Blueprint {test_name}") pretty_print(expected_blueprint, title=f"Expected blueprint {test_name}") assert blueprint == expected_blueprint - @pytest.mark.parametrize(("test_name", "invalid_plx_content", "expected_exception"), InterpreterTestCases.ERROR_TEST_CASES) - def test_invalid_plx_should_raise_exception(self, test_name: str, invalid_plx_content: str, expected_exception: type[Exception]): - """Test that invalid PLX content raises appropriate exceptions.""" - log.verbose(f"Testing invalid PLX content: {test_name}") + @pytest.mark.parametrize(("test_name", "invalid_mthds_content", "expected_exception"), InterpreterTestCases.ERROR_TEST_CASES) + def test_invalid_mthds_should_raise_exception(self, test_name: str, invalid_mthds_content: str, expected_exception: type[Exception]): + """Test that invalid MTHDS content raises appropriate exceptions.""" + log.verbose(f"Testing invalid MTHDS content: {test_name}") with pytest.raises(expected_exception): - PipelexInterpreter.make_pipelex_bundle_blueprint(plx_content=invalid_plx_content) + PipelexInterpreter.make_pipelex_bundle_blueprint(plx_content=invalid_mthds_content) diff --git a/tests/unit/pipelex/core/test_data/errors/invalid_plx.py b/tests/unit/pipelex/core/test_data/errors/invalid_plx.py index a1571ec44..5017ac2ec 100644 --- a/tests/unit/pipelex/core/test_data/errors/invalid_plx.py +++ b/tests/unit/pipelex/core/test_data/errors/invalid_plx.py @@ -1,9 +1,9 @@ from pipelex.core.interpreter.interpreter import PipelexInterpreterError, PLXDecodeError -INVALID_PLX_SYNTAX = ( - "invalid_plx_syntax", +INVALID_MTHDS_SYNTAX = ( + "invalid_mthds_syntax", """domain = "test_domain" -description = "Domain with invalid PLX syntax" +description = "Domain with invalid MTHDS syntax" [concept] InvalidConcept = "This is missing a closing quote""", @@ -200,8 +200,8 @@ # Export all error test cases ERROR_TEST_CASES: list[tuple[str, str, type[Exception] | tuple[type[Exception], ...]]] = [ - # PLX Syntax Errors - INVALID_PLX_SYNTAX, + # MTHDS Syntax Errors + INVALID_MTHDS_SYNTAX, MALFORMED_SECTION, UNCLOSED_STRING, DUPLICATE_KEYS, diff --git a/tests/unit/pipelex/core/test_data/interpreter_test_cases.py b/tests/unit/pipelex/core/test_data/interpreter_test_cases.py index 663094fea..db5ca3dae 100644 --- a/tests/unit/pipelex/core/test_data/interpreter_test_cases.py +++ b/tests/unit/pipelex/core/test_data/interpreter_test_cases.py @@ -19,10 +19,10 @@ class InterpreterTestCases: - """Test cases for PipelexInterpreter with PLX content and expected blueprints.""" + """Test cases for PipelexInterpreter with MTHDS content and expected blueprints.""" # Aggregate all valid test cases from organized modules - VALID_TEST_CASES: ClassVar[list[tuple[str, str, PipelexBundleBlueprint]]] = [ # test_name,plx_content,blueprint + VALID_TEST_CASES: ClassVar[list[tuple[str, str, PipelexBundleBlueprint]]] = [ # test_name,mthds_content,blueprint # Domain tests *DOMAIN_TEST_CASES, # Concept tests diff --git a/tests/unit/pipelex/language/test_plx_factory.py b/tests/unit/pipelex/language/test_mthds_factory.py similarity index 65% rename from tests/unit/pipelex/language/test_plx_factory.py rename to tests/unit/pipelex/language/test_mthds_factory.py index fcdac3eda..7988c13f2 100644 --- a/tests/unit/pipelex/language/test_plx_factory.py +++ b/tests/unit/pipelex/language/test_mthds_factory.py @@ -5,32 +5,32 @@ from pytest_mock import MockerFixture from pipelex.core.bundles.pipelex_bundle_blueprint import PipelexBundleBlueprint -from pipelex.language.plx_config import PlxConfig, PlxConfigForConcepts, PlxConfigForPipes, PlxConfigInlineTables, PlxConfigStrings -from pipelex.language.plx_factory import PIPE_CATEGORY_FIELD_KEY, PlxFactory +from pipelex.language.mthds_config import MthdsConfig, MthdsConfigForConcepts, MthdsConfigForPipes, MthdsConfigInlineTables, MthdsConfigStrings +from pipelex.language.mthds_factory import PIPE_CATEGORY_FIELD_KEY, MthdsFactory from pipelex.pipe_operators.compose.pipe_compose_blueprint import PipeComposeBlueprint -class TestPlxFactoryUnit: - """Unit tests for PlxFactory methods.""" +class TestMthdsFactoryUnit: + """Unit tests for MthdsFactory methods.""" @pytest.fixture - def mock_plx_config(self) -> PlxConfig: - """Create a mock PLX configuration for testing.""" - return PlxConfig( - strings=PlxConfigStrings( + def mock_mthds_config(self) -> MthdsConfig: + """Create a mock MTHDS configuration for testing.""" + return MthdsConfig( + strings=MthdsConfigStrings( prefer_literal=True, force_multiline=False, length_limit_to_multiline=50, ensure_trailing_newline=True, ensure_leading_blank_line=False, ), - inline_tables=PlxConfigInlineTables( + inline_tables=MthdsConfigInlineTables( spaces_inside_curly_braces=True, ), - concepts=PlxConfigForConcepts( + concepts=MthdsConfigForConcepts( structure_field_ordering=["type", "description", "inputs", "output"], ), - pipes=PlxConfigForPipes( + pipes=MthdsConfigForPipes( field_ordering=["type", "description", "inputs", "output"], ), ) @@ -51,85 +51,85 @@ def sample_mapping_data(self) -> dict[str, Any]: ], } - def test_format_tomlkit_string_simple(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_format_tomlkit_string_simple(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test formatting simple strings.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) # Test simple string - result = PlxFactory.format_tomlkit_string("simple text") + result = MthdsFactory.format_tomlkit_string("simple text") assert isinstance(result, tomlkit.items.String) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] # The actual string value without quotes assert result.value == "simple text" - def test_format_tomlkit_string_multiline(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_format_tomlkit_string_multiline(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test formatting multiline strings.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) # Test string with newlines multiline_text = "line1\nline2\nline3" - result = PlxFactory.format_tomlkit_string(multiline_text) + result = MthdsFactory.format_tomlkit_string(multiline_text) assert isinstance(result, tomlkit.items.String) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] # Should be multiline with trailing newline assert result.value == "line1\nline2\nline3\n" # Check if it's a multiline string by checking if it has newlines in the value assert "\n" in result.value - def test_format_tomlkit_string_force_multiline(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_format_tomlkit_string_force_multiline(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test force multiline configuration.""" - mock_plx_config.strings.force_multiline = True - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + mock_mthds_config.strings.force_multiline = True + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) - result = PlxFactory.format_tomlkit_string("short") + result = MthdsFactory.format_tomlkit_string("short") assert isinstance(result, tomlkit.items.String) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] # Should be multiline even for short text assert result.value == "short\n" # Check if it's a multiline string by checking if it has newlines in the value assert "\n" in result.value - def test_format_tomlkit_string_length_limit(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_format_tomlkit_string_length_limit(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test length limit for multiline conversion.""" - mock_plx_config.strings.length_limit_to_multiline = 10 - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + mock_mthds_config.strings.length_limit_to_multiline = 10 + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) long_text = "this is a very long text that exceeds the limit" - result = PlxFactory.format_tomlkit_string(long_text) + result = MthdsFactory.format_tomlkit_string(long_text) assert isinstance(result, tomlkit.items.String) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] # Should be multiline due to length assert result.value == "this is a very long text that exceeds the limit\n" # Check if it's a multiline string by checking if it has newlines in the value assert "\n" in result.value - def test_format_tomlkit_string_leading_blank_line(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_format_tomlkit_string_leading_blank_line(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test leading blank line configuration.""" - mock_plx_config.strings.ensure_leading_blank_line = True - mock_plx_config.strings.force_multiline = True - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + mock_mthds_config.strings.ensure_leading_blank_line = True + mock_mthds_config.strings.force_multiline = True + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) - result = PlxFactory.format_tomlkit_string("content") + result = MthdsFactory.format_tomlkit_string("content") assert isinstance(result, tomlkit.items.String) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] # Should have leading blank line assert result.value == "\ncontent\n" # Check if it's a multiline string by checking if it has newlines in the value assert "\n" in result.value - def test_convert_dicts_to_inline_tables_simple_dict(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_convert_dicts_to_inline_tables_simple_dict(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test converting simple dictionary to inline table.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) input_dict = {"key1": "value1", "key2": "value2"} - result = PlxFactory.convert_dicts_to_inline_tables(input_dict) + result = MthdsFactory.convert_dicts_to_inline_tables(input_dict) assert isinstance(result, tomlkit.items.InlineTable) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] assert result["key1"].value == "value1" assert result["key2"].value == "value2" - def test_convert_dicts_to_inline_tables_with_field_ordering(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_convert_dicts_to_inline_tables_with_field_ordering(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test converting dictionary with field ordering preserves all fields.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) input_dict = {"key2": "value2", "key1": "value1", "key3": "value3"} field_ordering = ["key1", "key3"] - result = PlxFactory.convert_dicts_to_inline_tables(input_dict, field_ordering) + result = MthdsFactory.convert_dicts_to_inline_tables(input_dict, field_ordering) assert isinstance(result, tomlkit.items.InlineTable) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] # All input keys must be present in the result @@ -173,15 +173,15 @@ def test_convert_dicts_to_inline_tables_with_field_ordering(self, mocker: Mocker def test_convert_dicts_to_inline_tables_with_field_ordering_preserves_all_fields( self, mocker: MockerFixture, - mock_plx_config: PlxConfig, + mock_mthds_config: MthdsConfig, topic: str, input_dict: dict[str, Any], field_ordering: list[str], ): """Test that all input fields are preserved in the output regardless of field_ordering.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) - result = PlxFactory.convert_dicts_to_inline_tables(input_dict, field_ordering or None) + result = MthdsFactory.convert_dicts_to_inline_tables(input_dict, field_ordering or None) assert isinstance(result, tomlkit.items.InlineTable) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] result_keys = set(result.keys()) @@ -195,23 +195,23 @@ def test_convert_dicts_to_inline_tables_with_field_ordering_preserves_all_fields else: assert result_value == expected_value, f"[{topic}] Value mismatch for key '{key}'" - def test_convert_dicts_to_inline_tables_nested_dict(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_convert_dicts_to_inline_tables_nested_dict(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test converting nested dictionary.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) input_dict = {"outer": {"inner": "value"}} - result = PlxFactory.convert_dicts_to_inline_tables(input_dict) + result = MthdsFactory.convert_dicts_to_inline_tables(input_dict) assert isinstance(result, tomlkit.items.InlineTable) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] assert isinstance(result["outer"], tomlkit.items.InlineTable) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] assert result["outer"]["inner"].value == "value" - def test_convert_dicts_to_inline_tables_list_with_dicts(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_convert_dicts_to_inline_tables_list_with_dicts(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test converting list containing dictionaries.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) input_list = [{"name": "first", "value": 1}, {"name": "second", "value": 2}] - result = PlxFactory.convert_dicts_to_inline_tables(input_list) + result = MthdsFactory.convert_dicts_to_inline_tables(input_list) assert isinstance(result, tomlkit.items.Array) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] assert len(result) == 2 @@ -219,23 +219,23 @@ def test_convert_dicts_to_inline_tables_list_with_dicts(self, mocker: MockerFixt assert result[0]["name"].value == "first" assert result[0]["value"] == 1 - def test_convert_dicts_to_inline_tables_string_handling(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_convert_dicts_to_inline_tables_string_handling(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test string handling in conversion.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) # Test simple string - result = PlxFactory.convert_dicts_to_inline_tables("simple string") + result = MthdsFactory.convert_dicts_to_inline_tables("simple string") assert isinstance(result, tomlkit.items.String) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] # Test other types pass through - assert PlxFactory.convert_dicts_to_inline_tables(42) == 42 - assert PlxFactory.convert_dicts_to_inline_tables(True) is True + assert MthdsFactory.convert_dicts_to_inline_tables(42) == 42 + assert MthdsFactory.convert_dicts_to_inline_tables(True) is True - def test_convert_mapping_to_table(self, mocker: MockerFixture, mock_plx_config: PlxConfig, sample_mapping_data: dict[str, Any]): + def test_convert_mapping_to_table(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig, sample_mapping_data: dict[str, Any]): """Test converting mapping to table.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) - result = PlxFactory.convert_mapping_to_table(sample_mapping_data) + result = MthdsFactory.convert_mapping_to_table(sample_mapping_data) assert isinstance(result, tomlkit.items.Table) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] assert "simple_field" in result @@ -243,14 +243,14 @@ def test_convert_mapping_to_table(self, mocker: MockerFixture, mock_plx_config: assert "list_field" in result assert "complex_list" in result - def test_convert_mapping_to_table_with_field_ordering(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_convert_mapping_to_table_with_field_ordering(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test converting mapping with field ordering.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) mapping = {"field3": "value3", "field1": "value1", "field2": "value2"} field_ordering = ["field1", "field2"] - result = PlxFactory.convert_mapping_to_table(mapping, field_ordering) + result = MthdsFactory.convert_mapping_to_table(mapping, field_ordering) assert isinstance(result, tomlkit.items.Table) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] # Check ordering (note: tomlkit preserves insertion order) @@ -259,12 +259,12 @@ def test_convert_mapping_to_table_with_field_ordering(self, mocker: MockerFixtur assert keys[1] == "field2" assert keys[2] == "field3" - def test_convert_mapping_to_table_skips_category(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_convert_mapping_to_table_skips_category(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test that category field is skipped.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) mapping = {"field1": "value1", PIPE_CATEGORY_FIELD_KEY: "should_be_skipped", "field2": "value2"} - result = PlxFactory.convert_mapping_to_table(mapping) + result = MthdsFactory.convert_mapping_to_table(mapping) assert isinstance(result, tomlkit.items.Table) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] assert "field1" in result @@ -274,31 +274,31 @@ def test_convert_mapping_to_table_skips_category(self, mocker: MockerFixture, mo def test_add_spaces_to_inline_tables_simple(self): """Test adding spaces to simple inline tables.""" input_toml = "{key = value}" - result = PlxFactory.add_spaces_to_inline_tables(input_toml) + result = MthdsFactory.add_spaces_to_inline_tables(input_toml) assert result == "{ key = value }" def test_add_spaces_to_inline_tables_already_spaced(self): """Test that already spaced tables are preserved.""" input_toml = "{ key = value }" - result = PlxFactory.add_spaces_to_inline_tables(input_toml) + result = MthdsFactory.add_spaces_to_inline_tables(input_toml) assert result == "{ key = value }" def test_add_spaces_to_inline_tables_nested(self): """Test adding spaces to nested inline tables.""" input_toml = "{outer = {inner = value}}" - result = PlxFactory.add_spaces_to_inline_tables(input_toml) + result = MthdsFactory.add_spaces_to_inline_tables(input_toml) assert result == "{ outer = { inner = value } }" def test_add_spaces_to_inline_tables_with_jinja2(self): """Test that Jinja2 templates are preserved.""" input_toml = "template = '{{ variable }}' and {key = value}" - result = PlxFactory.add_spaces_to_inline_tables(input_toml) + result = MthdsFactory.add_spaces_to_inline_tables(input_toml) assert result == "template = '{{ variable }}' and { key = value }" def test_add_spaces_to_inline_tables_complex(self): """Test complex inline table spacing.""" input_toml = "config = {db = {host = 'localhost', port = 5432}, cache = {enabled = true}}" - result = PlxFactory.add_spaces_to_inline_tables(input_toml) + result = MthdsFactory.add_spaces_to_inline_tables(input_toml) expected = "config = { db = { host = 'localhost', port = 5432 }, cache = { enabled = true } }" assert result == expected @@ -306,17 +306,17 @@ def test_add_spaces_to_inline_tables_partial_spacing(self): """Test partial spacing scenarios.""" # Left space only input_toml = "{ key = value}" - result = PlxFactory.add_spaces_to_inline_tables(input_toml) + result = MthdsFactory.add_spaces_to_inline_tables(input_toml) assert result == "{ key = value }" # Right space only input_toml = "{key = value }" - result = PlxFactory.add_spaces_to_inline_tables(input_toml) + result = MthdsFactory.add_spaces_to_inline_tables(input_toml) assert result == "{ key = value }" - def test_make_table_obj_for_pipe(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_make_table_obj_for_pipe(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test making table object for pipe section.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) pipe_data = { "type": "PipeLLM", @@ -326,7 +326,7 @@ def test_make_table_obj_for_pipe(self, mocker: MockerFixture, mock_plx_config: P "nested_config": {"param1": "value1", "param2": 42}, } - result = PlxFactory.make_table_obj_for_pipe(pipe_data) + result = MthdsFactory.make_table_obj_for_pipe(pipe_data) assert isinstance(result, tomlkit.items.Table) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] assert "type" in result @@ -335,25 +335,25 @@ def test_make_table_obj_for_pipe(self, mocker: MockerFixture, mock_plx_config: P assert "output" in result assert "nested_config" in result - def test_make_table_obj_for_concept_simple_string(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_make_table_obj_for_concept_simple_string(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test making table object for concept with simple string definition.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) concept_data = {"SimpleConcept": "A simple concept definition"} - result = PlxFactory.make_table_obj_for_concept(concept_data) + result = MthdsFactory.make_table_obj_for_concept(concept_data) assert isinstance(result, tomlkit.items.Table) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] assert "SimpleConcept" in result assert result["SimpleConcept"] == "A simple concept definition" - def test_make_table_obj_for_concept_with_structure(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_make_table_obj_for_concept_with_structure(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test making table object for concept with structure.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) concept_data = {"ComplexConcept": {"description": "A complex concept", "structure": {"field1": "string", "field2": "int"}}} - result = PlxFactory.make_table_obj_for_concept(concept_data) + result = MthdsFactory.make_table_obj_for_concept(concept_data) assert isinstance(result, tomlkit.items.Table) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] assert "ComplexConcept" in result @@ -361,22 +361,22 @@ def test_make_table_obj_for_concept_with_structure(self, mocker: MockerFixture, assert "description" in result["ComplexConcept"] assert "structure" in result["ComplexConcept"] - def test_make_table_obj_for_concept_structure_string(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_make_table_obj_for_concept_structure_string(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test concept with structure as string.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) concept_data = {"ConceptWithStringStructure": {"structure": "SomeClass"}} - result = PlxFactory.make_table_obj_for_concept(concept_data) + result = MthdsFactory.make_table_obj_for_concept(concept_data) assert isinstance(result, tomlkit.items.Table) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] concept_table = result["ConceptWithStringStructure"] assert isinstance(concept_table, tomlkit.items.Table) # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] assert concept_table["structure"] == "SomeClass" - def test_make_table_obj_for_concept_invalid_structure(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_make_table_obj_for_concept_invalid_structure(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test error handling for invalid structure types.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) concept_data = { "InvalidConcept": { @@ -385,49 +385,49 @@ def test_make_table_obj_for_concept_invalid_structure(self, mocker: MockerFixtur } with pytest.raises(TypeError, match="Structure field value is not a mapping"): - PlxFactory.make_table_obj_for_concept(concept_data) + MthdsFactory.make_table_obj_for_concept(concept_data) - def test_make_table_obj_for_concept_invalid_concept_value(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_make_table_obj_for_concept_invalid_concept_value(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test error handling for invalid concept value types.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) concept_data = { "InvalidConcept": 123 # Invalid type } with pytest.raises(TypeError, match="Concept field value is not a mapping"): - PlxFactory.make_table_obj_for_concept(concept_data) + MthdsFactory.make_table_obj_for_concept(concept_data) - def test_dict_to_plx_styled_toml_with_spacing(self, mocker: MockerFixture, mock_plx_config: PlxConfig): - """Test dict to PLX styled TOML with spacing enabled.""" - mock_plx_config.inline_tables.spaces_inside_curly_braces = True - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) - mock_add_spaces = mocker.patch.object(PlxFactory, "add_spaces_to_inline_tables", return_value="spaced_output") + def test_dict_to_mthds_styled_toml_with_spacing(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): + """Test dict to MTHDS styled TOML with spacing enabled.""" + mock_mthds_config.inline_tables.spaces_inside_curly_braces = True + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) + mock_add_spaces = mocker.patch.object(MthdsFactory, "add_spaces_to_inline_tables", return_value="spaced_output") data = {"domain": "test", "description": "test domain"} - result = PlxFactory.dict_to_plx_styled_toml(data) + result = MthdsFactory.dict_to_mthds_styled_toml(data) assert result == "spaced_output" mock_add_spaces.assert_called_once() - def test_dict_to_plx_styled_toml_without_spacing(self, mocker: MockerFixture, mock_plx_config: PlxConfig): - """Test dict to PLX styled TOML without spacing.""" - mock_plx_config.inline_tables.spaces_inside_curly_braces = False - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) - mock_add_spaces = mocker.patch.object(PlxFactory, "add_spaces_to_inline_tables") + def test_dict_to_mthds_styled_toml_without_spacing(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): + """Test dict to MTHDS styled TOML without spacing.""" + mock_mthds_config.inline_tables.spaces_inside_curly_braces = False + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) + mock_add_spaces = mocker.patch.object(MthdsFactory, "add_spaces_to_inline_tables") data = {"domain": "test", "description": "test domain"} - result = PlxFactory.dict_to_plx_styled_toml(data) + result = MthdsFactory.dict_to_mthds_styled_toml(data) # Should not call add_spaces_to_inline_tables mock_add_spaces.assert_not_called() assert isinstance(result, str) - def test_dict_to_plx_styled_toml_empty_sections(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_dict_to_mthds_styled_toml_empty_sections(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test handling of empty sections.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) data: dict[str, Any] = { "domain": "test", @@ -435,41 +435,41 @@ def test_dict_to_plx_styled_toml_empty_sections(self, mocker: MockerFixture, moc "pipe": {}, # Empty pipe section } - result = PlxFactory.dict_to_plx_styled_toml(data) + result = MthdsFactory.dict_to_mthds_styled_toml(data) # Empty sections should be skipped assert "concept" not in result assert "pipe" not in result assert "domain" in result - def test_dict_to_plx_styled_toml_with_pipe_section(self, mocker: MockerFixture, mock_plx_config: PlxConfig): - """Test dict to PLX styled TOML with pipe section.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + def test_dict_to_mthds_styled_toml_with_pipe_section(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): + """Test dict to MTHDS styled TOML with pipe section.""" + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) data = {"domain": "test", "pipe": {"test_pipe": {"type": "PipeLLM", "description": "Test pipe"}}} - result = PlxFactory.dict_to_plx_styled_toml(data) + result = MthdsFactory.dict_to_mthds_styled_toml(data) assert "domain" in result assert "[pipe.test_pipe]" in result assert "type" in result assert "description" in result - def test_dict_to_plx_styled_toml_with_concept_section(self, mocker: MockerFixture, mock_plx_config: PlxConfig): - """Test dict to PLX styled TOML with concept section.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + def test_dict_to_mthds_styled_toml_with_concept_section(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): + """Test dict to MTHDS styled TOML with concept section.""" + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) data = {"domain": "test", "concept": {"TestConcept": "A test concept"}} - result = PlxFactory.dict_to_plx_styled_toml(data) + result = MthdsFactory.dict_to_mthds_styled_toml(data) assert "domain" in result assert "[concept]" in result assert "TestConcept" in result - def test_pipe_compose_construct_serialization_format(self, mocker: MockerFixture, mock_plx_config: PlxConfig): - """Test PipeComposeBlueprint construct serializes to correct PLX format.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + def test_pipe_compose_construct_serialization_format(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): + """Test PipeComposeBlueprint construct serializes to correct MTHDS format.""" + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) blueprint = PipelexBundleBlueprint( domain="test_domain", @@ -488,22 +488,22 @@ def test_pipe_compose_construct_serialization_format(self, mocker: MockerFixture }, ) - plx_content = PlxFactory.make_plx_content(blueprint=blueprint) + mthds_content = MthdsFactory.make_mthds_content(blueprint=blueprint) # Should have nested table section, not inline - assert "[pipe.compose_test.construct]" in plx_content + assert "[pipe.compose_test.construct]" in mthds_content # Should use concise format { from = '...' } - assert "value = { from = 'data.field' }" in plx_content - assert "name = { from = 'info.name' }" in plx_content + assert "value = { from = 'data.field' }" in mthds_content + assert "name = { from = 'info.name' }" in mthds_content # Should NOT have internal field names - assert "construct_blueprint" not in plx_content - assert "fields" not in plx_content - assert "from_path" not in plx_content - assert "method" not in plx_content + assert "construct_blueprint" not in mthds_content + assert "fields" not in mthds_content + assert "from_path" not in mthds_content + assert "method" not in mthds_content - def test_pipe_compose_construct_fixed_and_template_serialization(self, mocker: MockerFixture, mock_plx_config: PlxConfig): + def test_pipe_compose_construct_fixed_and_template_serialization(self, mocker: MockerFixture, mock_mthds_config: MthdsConfig): """Test PipeComposeBlueprint construct with FIXED and TEMPLATE methods serializes correctly.""" - _mock_config = mocker.patch.object(PlxFactory, "_plx_config", return_value=mock_plx_config) + _mock_config = mocker.patch.object(MthdsFactory, "_mthds_config", return_value=mock_mthds_config) blueprint = PipelexBundleBlueprint( domain="test_domain", @@ -524,19 +524,19 @@ def test_pipe_compose_construct_fixed_and_template_serialization(self, mocker: M }, ) - plx_content = PlxFactory.make_plx_content(blueprint=blueprint) + mthds_content = MthdsFactory.make_mthds_content(blueprint=blueprint) # Should have nested table section - assert "[pipe.compose_mixed.construct]" in plx_content + assert "[pipe.compose_mixed.construct]" in mthds_content # Fixed values should appear directly - assert "fixed_string = 'hello world'" in plx_content - assert "fixed_number = 42" in plx_content + assert "fixed_string = 'hello world'" in mthds_content + assert "fixed_number = 42" in mthds_content # From var should use { from = '...' } - assert "from_var = { from = 'data.value' }" in plx_content + assert "from_var = { from = 'data.value' }" in mthds_content # Template should use { template = '...' } - assert "templated = { template = 'Hello {{ data.name }}!' }" in plx_content + assert "templated = { template = 'Hello {{ data.name }}!' }" in mthds_content # Should NOT have internal field names (as key names in construct) - assert "fixed_value" not in plx_content - assert "from_path" not in plx_content + assert "fixed_value" not in mthds_content + assert "from_path" not in mthds_content # Check that 'method' does not appear as a key in construct section - assert "method =" not in plx_content + assert "method =" not in mthds_content diff --git a/tests/unit/pipelex/tools/test.plx b/tests/unit/pipelex/tools/test.mthds similarity index 100% rename from tests/unit/pipelex/tools/test.plx rename to tests/unit/pipelex/tools/test.mthds From 31f8425909f686f78a7376ef0bcea631c1f6eccf Mon Sep 17 00:00:00 2001 From: Louis Choquel Date: Wed, 11 Feb 2026 19:13:37 +0100 Subject: [PATCH 2/5] Rename remaining PLX identifiers to MTHDS and update doc filenames MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Rename doc files: kick-off-a-pipelex-workflow-project.md → kick-off-a-methods-project.md, write-workflows-manually.md → write-methods-manually.md, configure-ai-llm-to-optimize-workflows.md → configure-ai-llm-to-optimize-methods.md - Rename plx_content → mthds_content and plx_file_path → mthds_file_path across all pipeline, CLI, client, builder, and test files - Rename PLXDecodeError → MthdsDecodeError class and all references - Rename to_plx_dict() → to_mthds_dict() method and all call sites - Rename invalid_plx.py → invalid_mthds.py test data file - Update all remaining PLX/plx references in comments, docstrings, error messages, test identifiers, and template files - Update mkdocs.yml nav paths and site_description - Update all cross-references to renamed doc files Co-Authored-By: Claude Opus 4.6 --- README.md | 2 +- docs/home/2-get-started/pipe-builder.md | 6 +-- ...-manually.md => write-methods-manually.md} | 4 +- docs/home/5-setup/configure-ai-providers.md | 2 +- docs/home/5-setup/project-organization.md | 4 +- ...> configure-ai-llm-to-optimize-methods.md} | 0 .../6-build-reliable-ai-workflows/domain.md | 2 +- ...oject.md => kick-off-a-methods-project.md} | 0 .../libraries.md | 12 ++--- .../pipe-builder.md | 2 +- .../pipelex-bundle-specification.md | 2 +- .../pipes/executing-pipelines.md | 6 +-- .../config-technical/library-config.md | 8 ++-- mkdocs.yml | 10 ++--- pipelex/builder/CLAUDE.md | 2 +- pipelex/builder/builder_loop.py | 8 ++-- pipelex/builder/runner_code.py | 2 +- .../cli/agent_cli/commands/agent_output.py | 4 +- pipelex/cli/agent_cli/commands/graph_cmd.py | 12 ++--- pipelex/cli/agent_cli/commands/inputs_cmd.py | 2 +- pipelex/cli/agent_cli/commands/run_cmd.py | 20 ++++----- .../cli/agent_cli/commands/validate_cmd.py | 4 +- pipelex/cli/commands/build/inputs_cmd.py | 2 +- pipelex/cli/commands/build/output_cmd.py | 2 +- pipelex/cli/commands/build/pipe_cmd.py | 2 +- pipelex/cli/commands/build/runner_cmd.py | 2 +- pipelex/cli/commands/build/structures_cmd.py | 2 +- pipelex/cli/commands/run_cmd.py | 12 ++--- pipelex/cli/commands/validate_cmd.py | 2 +- pipelex/client/client.py | 20 ++++----- pipelex/client/pipeline_request_factory.py | 10 ++--- pipelex/client/protocol.py | 18 ++++---- pipelex/core/concepts/concept_factory.py | 2 +- .../structure_generation/generator.py | 2 +- pipelex/core/interpreter/exceptions.py | 4 +- pipelex/core/interpreter/interpreter.py | 16 +++---- .../reactflow/templates/_styles.css.jinja2 | 2 +- pipelex/language/mthds_factory.py | 2 +- pipelex/libraries/library_manager_abstract.py | 4 +- pipelex/libraries/pipe/pipe_library.py | 2 +- .../compose/construct_blueprint.py | 22 +++++----- .../compose/pipe_compose_blueprint.py | 2 +- .../pipe_operators/extract/pipe_extract.py | 2 +- pipelex/pipeline/execute.py | 28 ++++++------ pipelex/pipeline/pipeline_run_setup.py | 26 +++++------ pipelex/pipeline/start.py | 26 +++++------ pipelex/pipeline/validate_bundle.py | 44 +++++++++---------- .../nested_concepts_test__customer.py | 2 +- .../nested_concepts_test__invoice.py | 2 +- .../nested_concepts_test__line_item.py | 2 +- .../test_nested_concepts_pipe.py | 4 +- .../test_structure_generator_cli.py | 2 +- .../pipe_compose/cv_job_matching_analysis.py | 2 +- .../cv_job_matching_itvw_question.py | 2 +- .../cv_job_matching_itvw_sheet.py | 2 +- .../cv_job_matching_job_requirements.py | 2 +- .../cv_job_matching_match_analysis.py | 2 +- .../builder/test_builder_mthds_validation.py | 6 +-- .../test_out_of_order_refines.py | 2 +- .../pipeline/test_load_concepts_only.py | 14 +++--- .../pipe_batch/test_pipe_batch_simple.py | 2 +- .../test_pipe_condition_simple.py | 2 +- .../test_pipe_parallel_simple.py | 2 +- .../test_pipe_sequence_list_output_bug.py | 8 ++-- .../test_pipe_sequence_simple.py | 2 +- .../test_image_inputs_inference.py | 4 +- .../test_pipe_compose_structured.py | 2 +- .../test_pipe_func_validation_errors.py | 16 +++---- .../unit/pipelex/cli/test_agent_graph_cmd.py | 8 ++-- .../test_structure_generator.py | 42 +++++++++--------- .../test_structure_generator_concept_refs.py | 2 +- .../test_structure_generator_escaping.py | 28 ++++++------ .../core/interpreter/test_interpreter.py | 4 +- .../{invalid_plx.py => invalid_mthds.py} | 18 ++++---- .../core/test_data/interpreter_test_cases.py | 2 +- .../pipe_compose/test_construct_blueprint.py | 2 +- .../tools/test_jinja2_required_variables.py | 16 +++---- 77 files changed, 287 insertions(+), 287 deletions(-) rename docs/home/2-get-started/{write-workflows-manually.md => write-methods-manually.md} (98%) rename docs/home/6-build-reliable-ai-workflows/{configure-ai-llm-to-optimize-workflows.md => configure-ai-llm-to-optimize-methods.md} (100%) rename docs/home/6-build-reliable-ai-workflows/{kick-off-a-pipelex-workflow-project.md => kick-off-a-methods-project.md} (100%) rename tests/unit/pipelex/core/test_data/errors/{invalid_plx.py => invalid_mthds.py} (94%) diff --git a/README.md b/README.md index e80faecb5..c72ad64d4 100644 --- a/README.md +++ b/README.md @@ -332,7 +332,7 @@ Each pipe processes information using **Concepts** (typing with meaning) to ensu **Learn More:** - [Design and Run Pipelines](https://docs.pipelex.com/pre-release/home/6-build-reliable-ai-workflows/pipes/) - Complete guide with examples -- [Kick off a Pipeline Project](https://docs.pipelex.com/pre-release/home/6-build-reliable-ai-workflows/kick-off-a-pipelex-method-project/) - Deep dive into Pipelex +- [Kick off a Pipeline Project](https://docs.pipelex.com/pre-release/home/6-build-reliable-ai-workflows/kick-off-a-methods-project/) - Deep dive into Pipelex - [Configure AI Providers](https://docs.pipelex.com/pre-release/home/5-setup/configure-ai-providers/) - Set up AI providers and models ## 🔧 IDE Extension diff --git a/docs/home/2-get-started/pipe-builder.md b/docs/home/2-get-started/pipe-builder.md index 7bb7f90f7..aa3b092d1 100644 --- a/docs/home/2-get-started/pipe-builder.md +++ b/docs/home/2-get-started/pipe-builder.md @@ -33,7 +33,7 @@ The pipe builder generates three files in a numbered directory (e.g., `results/p 3. **`run_{pipe_code}.py`** - Ready-to-run Python script that you can customize and execute !!! tip "Pipe Builder Requirements" - For now, the pipe builder requires access to **Claude 4.5 Sonnet**, either through Pipelex Inference, or using your own key through Anthropic, Amazon Bedrock or BlackboxAI. Don't hesitate to join our [Discord](https://go.pipelex.com/discord) to get a key, otherwise, you can also create the methods yourself, following our [documentation guide](./write-workflows-manually.md). + For now, the pipe builder requires access to **Claude 4.5 Sonnet**, either through Pipelex Inference, or using your own key through Anthropic, Amazon Bedrock or BlackboxAI. Don't hesitate to join our [Discord](https://go.pipelex.com/discord) to get a key, otherwise, you can also create the methods yourself, following our [documentation guide](./write-methods-manually.md). !!! info "Learn More" Want to understand how the Pipe Builder works under the hood? See [Pipe Builder Deep Dive](../9-tools/pipe-builder.md) for the full explanation of its multi-step generation process. @@ -90,8 +90,8 @@ Now that you know how to generate methods with the Pipe Builder, explore these r **Learn how to Write Methods yourself** -- [:material-pencil: Write Methods Manually](./write-workflows-manually.md){ .md-button .md-button--primary } -- [:material-book-open-variant: Build Reliable AI Methods](../6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md){ .md-button .md-button--primary } +- [:material-pencil: Write Methods Manually](./write-methods-manually.md){ .md-button .md-button--primary } +- [:material-book-open-variant: Build Reliable AI Methods](../6-build-reliable-ai-workflows/kick-off-a-methods-project.md){ .md-button .md-button--primary } **Explore Examples:** diff --git a/docs/home/2-get-started/write-workflows-manually.md b/docs/home/2-get-started/write-methods-manually.md similarity index 98% rename from docs/home/2-get-started/write-workflows-manually.md rename to docs/home/2-get-started/write-methods-manually.md index d4c083e90..0bef7f8fc 100644 --- a/docs/home/2-get-started/write-workflows-manually.md +++ b/docs/home/2-get-started/write-methods-manually.md @@ -325,12 +325,12 @@ Now that you understand the basics, explore more: **Learn More about the PipeLLM:** -- [LLM Configuration: play with the models](../../home/6-build-reliable-ai-workflows/configure-ai-llm-to-optimize-workflows.md) - Optimize cost and quality +- [LLM Configuration: play with the models](../../home/6-build-reliable-ai-workflows/configure-ai-llm-to-optimize-methods.md) - Optimize cost and quality - [Full configuration of the PipeLLM](../../home/6-build-reliable-ai-workflows/pipes/pipe-operators/PipeLLM.md) **Learn more about Pipelex (domains, project structure, best practices...)** -- [Build Reliable AI Methods](../../home/6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md) - Deep dive into pipeline design +- [Build Reliable AI Methods](../../home/6-build-reliable-ai-workflows/kick-off-a-methods-project.md) - Deep dive into pipeline design - [Cookbook Examples](../../home/4-cookbook-examples/index.md) - Real-world examples and patterns **Learn More about the other pipes** diff --git a/docs/home/5-setup/configure-ai-providers.md b/docs/home/5-setup/configure-ai-providers.md index cb894e52f..8fa346266 100644 --- a/docs/home/5-setup/configure-ai-providers.md +++ b/docs/home/5-setup/configure-ai-providers.md @@ -176,7 +176,7 @@ Now that you have your backend configured: 2. **Learn the concepts**: [Writing Methods Tutorial](../../home/2-get-started/pipe-builder.md) 3. **Explore examples**: [Cookbook Repository](https://github.com/Pipelex/pipelex-cookbook/tree/feature/Chicago) -4. **Deep dive**: [Build Reliable AI Methods](../../home/6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md) +4. **Deep dive**: [Build Reliable AI Methods](../../home/6-build-reliable-ai-workflows/kick-off-a-methods-project.md) !!! tip "Advanced Configuration" For detailed backend configuration options, see [Inference Backend Configuration](../../home/7-configuration/config-technical/inference-backend-config.md). diff --git a/docs/home/5-setup/project-organization.md b/docs/home/5-setup/project-organization.md index da08468d6..c1c2f95b2 100644 --- a/docs/home/5-setup/project-organization.md +++ b/docs/home/5-setup/project-organization.md @@ -35,7 +35,7 @@ your_project/ └── pipelex.toml ``` -Learn more in our [Project Structure documentation](../../home/6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md). +Learn more in our [Project Structure documentation](../../home/6-build-reliable-ai-workflows/kick-off-a-methods-project.md). --- @@ -54,5 +54,5 @@ Now that you understand project organization: 2. **Learn the concepts**: [Writing Methods Tutorial](../../home/2-get-started/pipe-builder.md) 3. **Explore examples**: [Cookbook Repository](https://github.com/Pipelex/pipelex-cookbook/tree/feature/Chicago) -4. **Deep dive**: [Build Reliable AI Methods](../../home/6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md) +4. **Deep dive**: [Build Reliable AI Methods](../../home/6-build-reliable-ai-workflows/kick-off-a-methods-project.md) diff --git a/docs/home/6-build-reliable-ai-workflows/configure-ai-llm-to-optimize-workflows.md b/docs/home/6-build-reliable-ai-workflows/configure-ai-llm-to-optimize-methods.md similarity index 100% rename from docs/home/6-build-reliable-ai-workflows/configure-ai-llm-to-optimize-workflows.md rename to docs/home/6-build-reliable-ai-workflows/configure-ai-llm-to-optimize-methods.md diff --git a/docs/home/6-build-reliable-ai-workflows/domain.md b/docs/home/6-build-reliable-ai-workflows/domain.md index 6d79b0cd2..09733cca8 100644 --- a/docs/home/6-build-reliable-ai-workflows/domain.md +++ b/docs/home/6-build-reliable-ai-workflows/domain.md @@ -171,6 +171,6 @@ Individual pipes can override the domain system prompt by defining their own `sy ## Related Documentation - [Pipelex Bundle Specification](./pipelex-bundle-specification.md) - How domains are declared in bundles -- [Kick off a Pipelex Method Project](./kick-off-a-pipelex-workflow-project.md) - Getting started +- [Kick off a Pipelex Method Project](./kick-off-a-methods-project.md) - Getting started - [Define Your Concepts](./concepts/define_your_concepts.md) - Creating concepts within domains - [Designing Pipelines](./pipes/index.md) - Building pipes within domains diff --git a/docs/home/6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md b/docs/home/6-build-reliable-ai-workflows/kick-off-a-methods-project.md similarity index 100% rename from docs/home/6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md rename to docs/home/6-build-reliable-ai-workflows/kick-off-a-methods-project.md diff --git a/docs/home/6-build-reliable-ai-workflows/libraries.md b/docs/home/6-build-reliable-ai-workflows/libraries.md index f072a0ce9..794d30d26 100644 --- a/docs/home/6-build-reliable-ai-workflows/libraries.md +++ b/docs/home/6-build-reliable-ai-workflows/libraries.md @@ -41,7 +41,7 @@ Libraries enforce specific uniqueness constraints to maintain consistency: Currently, all libraries are **local**, meaning they are loaded from: - Directories on your filesystem (using `library_dirs` parameter) -- MTHDS content strings (using `plx_content` parameter) +- MTHDS content strings (using `mthds_content` parameter) - The current working directory (default behavior) ```python @@ -102,7 +102,7 @@ pipe_output = await execute_pipeline( ```python # Loads only the provided MTHDS content -plx_content = """ +mthds_content = """ domain = "marketing" [concept] @@ -116,7 +116,7 @@ prompt = "Generate a tagline for: @desc" """ pipe_output = await execute_pipeline( - plx_content=plx_content, + mthds_content=mthds_content, pipe_code="my_pipe", inputs={...}, ) @@ -167,14 +167,14 @@ pipe_output = await execute_pipeline( ### 2. Use MTHDS Content for Dynamic Pipelines -When generating or modifying pipelines dynamically, use `plx_content`: +When generating or modifying pipelines dynamically, use `mthds_content`: ```python # Generate MTHDS content dynamically -plx_content = generate_custom_pipeline(user_requirements) +mthds_content = generate_custom_pipeline(user_requirements) pipe_output = await execute_pipeline( - plx_content=plx_content, + mthds_content=mthds_content, inputs={...}, ) ``` diff --git a/docs/home/6-build-reliable-ai-workflows/pipe-builder.md b/docs/home/6-build-reliable-ai-workflows/pipe-builder.md index 65c9e1263..543dfb072 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipe-builder.md +++ b/docs/home/6-build-reliable-ai-workflows/pipe-builder.md @@ -3,7 +3,7 @@ Pipelex provides powerful tools to automatically generate complete, working pipelines from natural language requirements. This feature leverages AI to translate your ideas into fully functional pipeline code, dramatically speeding up development. !!! tip "Pipe Builder Requirements" - For now, the pipe builder requires access to **Claude 4.5 Sonnet**, either through Pipelex Inference, or using your own key through Anthropic, Amazon Bedrock or BlackboxAI. Don't hesitate to join our [Discord](https://go.pipelex.com/discord) to get a key or see [Configure AI Providers](../../home/5-setup/configure-ai-providers.md) for details. Otherwise, you can also create the methods yourself, following our [documentation guide](./kick-off-a-pipelex-workflow-project.md). + For now, the pipe builder requires access to **Claude 4.5 Sonnet**, either through Pipelex Inference, or using your own key through Anthropic, Amazon Bedrock or BlackboxAI. Don't hesitate to join our [Discord](https://go.pipelex.com/discord) to get a key or see [Configure AI Providers](../../home/5-setup/configure-ai-providers.md) for details. Otherwise, you can also create the methods yourself, following our [documentation guide](./kick-off-a-methods-project.md). ## Overview diff --git a/docs/home/6-build-reliable-ai-workflows/pipelex-bundle-specification.md b/docs/home/6-build-reliable-ai-workflows/pipelex-bundle-specification.md index 66ee5643e..f36b33d15 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipelex-bundle-specification.md +++ b/docs/home/6-build-reliable-ai-workflows/pipelex-bundle-specification.md @@ -242,5 +242,5 @@ prompt = "..." - [Understanding Domains](./domain.md) - Deep dive into domain organization - [Designing Pipelines](./pipes/index.md) - Learn how to design and compose pipes - [Define Your Concepts](./concepts/define_your_concepts.md) - Complete guide to concept definitions -- [Kick off a Pipelex Method Project](./kick-off-a-pipelex-workflow-project.md) - Start a new project +- [Kick off a Pipelex Method Project](./kick-off-a-methods-project.md) - Start a new project diff --git a/docs/home/6-build-reliable-ai-workflows/pipes/executing-pipelines.md b/docs/home/6-build-reliable-ai-workflows/pipes/executing-pipelines.md index ec1e07c96..f99223eca 100644 --- a/docs/home/6-build-reliable-ai-workflows/pipes/executing-pipelines.md +++ b/docs/home/6-build-reliable-ai-workflows/pipes/executing-pipelines.md @@ -78,7 +78,7 @@ When using `execute_pipeline` or `start_pipeline`, you can control library behav - **`library_dirs`**: A list of directory paths to load pipe definitions from. **These directories must contain both your `.mthds` files AND any Python files defining `StructuredContent` classes** (e.g., `*_struct.py` files). If not specified, Pipelex falls back to the `PIPELEXPATH` environment variable, then to the current working directory. -- **`plx_content`**: When provided, Pipelex will load only this MTHDS content into the library, bypassing directory scanning. This is useful for dynamic pipeline execution without file-based definitions. +- **`mthds_content`**: When provided, Pipelex will load only this MTHDS content into the library, bypassing directory scanning. This is useful for dynamic pipeline execution without file-based definitions. !!! info "Python Structure Classes" If your concepts use Python `StructuredContent` classes instead of inline structures, those Python files must be in the directories specified by `library_dirs`. Pipelex auto-discovers and registers these classes during library loading. Learn more about [Python StructuredContent Classes](../concepts/python-classes.md). @@ -182,7 +182,7 @@ Generate a catchy tagline based on the above description. The tagline should be Pipelex.make() pipe_output = await execute_pipeline( - plx_content=my_pipe_content, + mthds_content=my_pipe_content, inputs={ "description": { "concept": "ProductDescription", @@ -193,7 +193,7 @@ pipe_output = await execute_pipeline( ``` !!! note "Pipe Code Resolution" - When using `plx_content`: + When using `mthds_content`: - If the content has a `main_pipe` property and you don't provide `pipe_code`, the `main_pipe` is executed - If you provide `pipe_code`, it overrides `main_pipe` diff --git a/docs/home/7-configuration/config-technical/library-config.md b/docs/home/7-configuration/config-technical/library-config.md index 38204d747..7ecb9586b 100644 --- a/docs/home/7-configuration/config-technical/library-config.md +++ b/docs/home/7-configuration/config-technical/library-config.md @@ -68,7 +68,7 @@ Pipelex resolves library directories using this priority order (highest to lowes | **3 (Fallback)** | `PIPELEXPATH` environment variable | System-wide or shell session default | !!! info "Empty List is Valid" - Passing an empty list `[]` to `library_dirs` is a valid explicit value that **disables** directory-based library loading. This is useful when using `plx_content` directly without needing files from the filesystem. + Passing an empty list `[]` to `library_dirs` is a valid explicit value that **disables** directory-based library loading. This is useful when using `mthds_content` directly without needing files from the filesystem. ### Using the PIPELEXPATH Environment Variable @@ -176,9 +176,9 @@ output2 = await execute_pipeline( inputs={"input": "value"}, ) -# Disable directory loading (use only plx_content) +# Disable directory loading (use only mthds_content) output3 = await execute_pipeline( - plx_content=my_plx_string, + mthds_content=my_mthds_string, library_dirs=[], # Empty list disables directory-based loading inputs={"input": "value"}, ) @@ -237,7 +237,7 @@ output = await execute_pipeline( 3. **Use per-call `library_dirs` for exceptions**: Override only when a specific execution needs different directories. -4. **Use empty list `[]` for isolated execution**: When you want to execute only from `plx_content` without loading any file-based definitions. +4. **Use empty list `[]` for isolated execution**: When you want to execute only from `mthds_content` without loading any file-based definitions. 5. **Include structure class directories**: Remember that `library_dirs` must contain both `.mthds` files AND Python files defining `StructuredContent` classes. diff --git a/mkdocs.yml b/mkdocs.yml index d557c323d..c1b33f9b8 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -1,6 +1,6 @@ site_name: Pipelex Documentation site_url: https://docs.pipelex.com/ -site_description: "Official documentation for Pipelex, a framework and language for building deterministic, repeatable AI workflows and knowledge pipelines." +site_description: "Official documentation for Pipelex, an open-source library/CLI for building and running deterministic, repeatable AI methods." docs_dir: docs repo_url: "https://github.com/Pipelex/pipelex" repo_name: "Pipelex on GitHub" @@ -95,7 +95,7 @@ nav: - v0.18.0 "Chicago": home/1-releases/chicago.md - Get Started: - The Pipe Builder: home/2-get-started/pipe-builder.md - - Write Workflows Manually: home/2-get-started/write-workflows-manually.md + - Write Methods Manually: home/2-get-started/write-methods-manually.md - Understand Pipelex: - The Know-How Graph: home/3-understand-pipelex/viewpoint.md - The Pipelex Paradigm: home/3-understand-pipelex/pipelex-paradigm/index.md @@ -120,8 +120,8 @@ nav: - Gateway Available Models: home/5-setup/gateway-models.md - Project Organization: home/5-setup/project-organization.md - Telemetry: home/5-setup/telemetry.md - - Build Reliable AI Workflows: - - Kick off a Pipeline Project: home/6-build-reliable-ai-workflows/kick-off-a-pipelex-workflow-project.md + - Build Reliable AI Methods: + - Kick off a Method Project: home/6-build-reliable-ai-workflows/kick-off-a-methods-project.md - Pipe Builder: home/6-build-reliable-ai-workflows/pipe-builder.md - Pipelex Bundle Specification: home/6-build-reliable-ai-workflows/pipelex-bundle-specification.md - Domain: home/6-build-reliable-ai-workflows/domain.md @@ -152,7 +152,7 @@ nav: - PipeParallel: home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeParallel.md - PipeBatch: home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeBatch.md - PipeCondition: home/6-build-reliable-ai-workflows/pipes/pipe-controllers/PipeCondition.md - - Optimize Cost & Quality: home/6-build-reliable-ai-workflows/configure-ai-llm-to-optimize-workflows.md + - Optimize Cost & Quality: home/6-build-reliable-ai-workflows/configure-ai-llm-to-optimize-methods.md - LLM Structured Generation: home/6-build-reliable-ai-workflows/llm-structured-generation-config.md - LLM Prompting Style: home/6-build-reliable-ai-workflows/adapt-to-llm-prompting-style-openai-anthropic-mistral.md - Configuration (TOML reference): diff --git a/pipelex/builder/CLAUDE.md b/pipelex/builder/CLAUDE.md index b7c115f95..2e59176f1 100644 --- a/pipelex/builder/CLAUDE.md +++ b/pipelex/builder/CLAUDE.md @@ -91,7 +91,7 @@ The `build` command in `pipelex/cli/agent_cli/commands/build_cmd.py` calls `buil 1. Runs a "builder pipe" (itself a Pipelex pipeline) that generates a `PipelexBundleSpec` 2. Passes it to `BuilderLoop.build_and_fix()` -3. Converts the result to MTHDS via `MthdsFactory.make_plx_content()` +3. Converts the result to MTHDS via `MthdsFactory.make_mthds_content()` 4. Saves to `pipelex-wip/` with incremental naming ## Talent System diff --git a/pipelex/builder/builder_loop.py b/pipelex/builder/builder_loop.py index b7e5d2a3f..12854dd9e 100644 --- a/pipelex/builder/builder_loop.py +++ b/pipelex/builder/builder_loop.py @@ -69,13 +69,13 @@ async def build_and_fix( if is_save_first_iteration_enabled: try: - plx_content = MthdsFactory.make_mthds_content(blueprint=pipelex_bundle_spec.to_blueprint()) + mthds_content = MthdsFactory.make_mthds_content(blueprint=pipelex_bundle_spec.to_blueprint()) first_iteration_path = get_incremental_file_path( base_path=output_dir or "results/pipe-builder", base_name="generated_pipeline_1st_iteration", extension="mthds", ) - save_text_to_path(text=plx_content, path=str(first_iteration_path), create_directory=True) + save_text_to_path(text=mthds_content, path=str(first_iteration_path), create_directory=True) except PipelexBundleSpecBlueprintError as exc: log.warning(f"Could not save first iteration MTHDS: {exc}") @@ -693,13 +693,13 @@ def _fix_bundle_validation_error( # Save second iteration if we made any changes (pipes or concepts) if (fixed_pipes or added_concepts) and is_save_second_iteration_enabled: try: - plx_content = MthdsFactory.make_mthds_content(blueprint=pipelex_bundle_spec.to_blueprint()) + mthds_content = MthdsFactory.make_mthds_content(blueprint=pipelex_bundle_spec.to_blueprint()) second_iteration_path = get_incremental_file_path( base_path=output_dir or "results/pipe-builder", base_name="generated_pipeline_2nd_iteration", extension="mthds", ) - save_text_to_path(text=plx_content, path=str(second_iteration_path)) + save_text_to_path(text=mthds_content, path=str(second_iteration_path)) except PipelexBundleSpecBlueprintError as exc: log.warning(f"Could not save second iteration MTHDS: {exc}") diff --git a/pipelex/builder/runner_code.py b/pipelex/builder/runner_code.py index 2e27fbfe9..9d891b154 100644 --- a/pipelex/builder/runner_code.py +++ b/pipelex/builder/runner_code.py @@ -165,7 +165,7 @@ def generate_runner_code(pipe: PipeAbstract, output_multiplicity: bool = False, Args: pipe: The pipe to generate runner code for output_multiplicity: Whether the output is a list (e.g., Text[]) - library_dir: Directory containing the PLX bundles to load + library_dir: Directory containing the MTHDS bundles to load """ # Get output information structure_class_name = pipe.output.concept.structure_class_name diff --git a/pipelex/cli/agent_cli/commands/agent_output.py b/pipelex/cli/agent_cli/commands/agent_output.py index 1d17c2182..8cbddf0ca 100644 --- a/pipelex/cli/agent_cli/commands/agent_output.py +++ b/pipelex/cli/agent_cli/commands/agent_output.py @@ -28,7 +28,7 @@ "JSONDecodeError": "Verify the JSON input is valid (check for trailing commas, unquoted keys, etc.)", # Interpreter errors "PipelexInterpreterError": "Check MTHDS file TOML syntax and ensure all referenced concepts and pipes are defined", - "PLXDecodeError": "The MTHDS file has TOML syntax errors; validate TOML syntax before retrying", + "MthdsDecodeError": "The MTHDS file has TOML syntax errors; validate TOML syntax before retrying", # Configuration/initialization errors "TelemetryConfigValidationError": "Run 'pipelex init telemetry' to create a valid telemetry configuration", "GatewayTermsNotAcceptedError": "Run 'pipelex init config' to accept gateway terms, or disable pipelex_gateway in backends.toml", @@ -65,7 +65,7 @@ "JSONDecodeError": "input", "JsonTypeError": "input", "ArgumentError": "input", - "PLXDecodeError": "input", + "MthdsDecodeError": "input", "PipelexInterpreterError": "input", "ValidationError": "input", "ValueError": "input", diff --git a/pipelex/cli/agent_cli/commands/graph_cmd.py b/pipelex/cli/agent_cli/commands/graph_cmd.py index 4eb342447..9a16a11e5 100644 --- a/pipelex/cli/agent_cli/commands/graph_cmd.py +++ b/pipelex/cli/agent_cli/commands/graph_cmd.py @@ -9,7 +9,7 @@ from pipelex.cli.agent_cli.commands.agent_cli_factory import make_pipelex_for_agent_cli from pipelex.cli.agent_cli.commands.agent_output import agent_error, agent_success from pipelex.config import get_config -from pipelex.core.interpreter.exceptions import PipelexInterpreterError, PLXDecodeError +from pipelex.core.interpreter.exceptions import MthdsDecodeError, PipelexInterpreterError from pipelex.core.interpreter.helpers import is_pipelex_file from pipelex.core.interpreter.interpreter import PipelexInterpreter from pipelex.core.pipes.exceptions import PipeOperatorModelChoiceError @@ -64,10 +64,10 @@ def graph_cmd( if not is_pipelex_file(input_path): agent_error(f"Expected a .mthds bundle file, got: {input_path.name}", "ArgumentError") - # Read PLX content and extract main pipe + # Read MTHDS content and extract main pipe try: - plx_content = input_path.read_text(encoding="utf-8") - bundle_blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(plx_content=plx_content) + mthds_content = input_path.read_text(encoding="utf-8") + bundle_blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(mthds_content=mthds_content) main_pipe_code = bundle_blueprint.main_pipe if not main_pipe_code: agent_error( @@ -77,7 +77,7 @@ def graph_cmd( pipe_code: str = main_pipe_code except (OSError, UnicodeDecodeError) as exc: agent_error(f"Failed to read bundle file '{target}': {exc}", type(exc).__name__, cause=exc) - except (PipelexInterpreterError, PLXDecodeError) as exc: + except (PipelexInterpreterError, MthdsDecodeError) as exc: agent_error(f"Failed to parse bundle '{target}': {exc}", type(exc).__name__, cause=exc) # Initialize Pipelex @@ -93,7 +93,7 @@ def graph_cmd( pipe_output = asyncio.run( execute_pipeline( pipe_code=pipe_code, - plx_content=plx_content, + mthds_content=mthds_content, bundle_uri=target, pipe_run_mode=PipeRunMode.DRY, execution_config=execution_config, diff --git a/pipelex/cli/agent_cli/commands/inputs_cmd.py b/pipelex/cli/agent_cli/commands/inputs_cmd.py index 992846a57..51ee144b2 100644 --- a/pipelex/cli/agent_cli/commands/inputs_cmd.py +++ b/pipelex/cli/agent_cli/commands/inputs_cmd.py @@ -44,7 +44,7 @@ async def _inputs_core( NoInputsRequiredError: If the pipe has no inputs. """ if bundle_path: - validate_bundle_result = await validate_bundle(plx_file_path=bundle_path, library_dirs=library_dirs) + validate_bundle_result = await validate_bundle(mthds_file_path=bundle_path, library_dirs=library_dirs) bundle_blueprint = validate_bundle_result.blueprints[0] if not pipe_code: main_pipe_code = bundle_blueprint.main_pipe diff --git a/pipelex/cli/agent_cli/commands/run_cmd.py b/pipelex/cli/agent_cli/commands/run_cmd.py index 1abafd7e1..8720b710d 100644 --- a/pipelex/cli/agent_cli/commands/run_cmd.py +++ b/pipelex/cli/agent_cli/commands/run_cmd.py @@ -10,7 +10,7 @@ from pipelex.cli.agent_cli.commands.agent_cli_factory import make_pipelex_for_agent_cli from pipelex.cli.agent_cli.commands.agent_output import agent_error, agent_success from pipelex.config import get_config -from pipelex.core.interpreter.exceptions import PipelexInterpreterError, PLXDecodeError +from pipelex.core.interpreter.exceptions import MthdsDecodeError, PipelexInterpreterError from pipelex.core.interpreter.helpers import is_pipelex_file from pipelex.core.interpreter.interpreter import PipelexInterpreter from pipelex.core.pipes.exceptions import PipeOperatorModelChoiceError @@ -25,7 +25,7 @@ async def _run_pipeline_core( pipe_code: str, - plx_content: str | None = None, + mthds_content: str | None = None, bundle_uri: str | None = None, inputs: dict[str, Any] | None = None, dry_run: bool = False, @@ -37,7 +37,7 @@ async def _run_pipeline_core( Args: pipe_code: The pipe code to run. - plx_content: PLX content string (optional). + mthds_content: MTHDS content string (optional). bundle_uri: Bundle file path (optional). inputs: Input dictionary for the pipeline. dry_run: Whether to run in dry mode (no actual inference). @@ -60,7 +60,7 @@ async def _run_pipeline_core( pipe_output = await execute_pipeline( pipe_code=pipe_code, - plx_content=plx_content, + mthds_content=mthds_content, bundle_uri=bundle_uri, inputs=inputs, pipe_run_mode=pipe_run_mode, @@ -203,13 +203,13 @@ def run_cmd( if not pipe_code and not bundle_path: agent_error("No pipe code or bundle file specified", "ArgumentError") - # Load plx content from bundle if provided - plx_content: str | None = None + # Load MTHDS content from bundle if provided + mthds_content: str | None = None if bundle_path: try: - plx_content = Path(bundle_path).read_text(encoding="utf-8") + mthds_content = Path(bundle_path).read_text(encoding="utf-8") if not pipe_code: - bundle_blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(plx_content=plx_content) + bundle_blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(mthds_content=mthds_content) main_pipe_code = bundle_blueprint.main_pipe if not main_pipe_code: agent_error( @@ -221,7 +221,7 @@ def run_cmd( agent_error(f"Bundle file not found: {bundle_path}", "FileNotFoundError", cause=exc) except (OSError, UnicodeDecodeError) as exc: agent_error(f"Failed to read bundle file '{bundle_path}': {exc}", type(exc).__name__, cause=exc) - except (PipelexInterpreterError, PLXDecodeError) as exc: + except (PipelexInterpreterError, MthdsDecodeError) as exc: agent_error(f"Failed to parse bundle '{bundle_path}': {exc}", type(exc).__name__, cause=exc) # Load inputs if provided @@ -246,7 +246,7 @@ def run_cmd( result = asyncio.run( _run_pipeline_core( pipe_code=pipe_code, # type: ignore[arg-type] - plx_content=plx_content, + mthds_content=mthds_content, bundle_uri=bundle_path, inputs=pipeline_inputs, dry_run=dry_run, diff --git a/pipelex/cli/agent_cli/commands/validate_cmd.py b/pipelex/cli/agent_cli/commands/validate_cmd.py index 120a76a51..b9b5c3af1 100644 --- a/pipelex/cli/agent_cli/commands/validate_cmd.py +++ b/pipelex/cli/agent_cli/commands/validate_cmd.py @@ -76,7 +76,7 @@ async def _validate_bundle_core( Raises: ValidateBundleError: If validation fails. """ - result = await validate_bundle(plx_file_path=bundle_path, library_dirs=library_dirs) + result = await validate_bundle(mthds_file_path=bundle_path, library_dirs=library_dirs) validated_pipes = [{"pipe_code": the_pipe.code, "status": "SUCCESS"} for the_pipe in result.pipes] @@ -145,7 +145,7 @@ async def _validate_pipe_in_bundle_core( """ # Validate the bundle to load all its pipes into the library # This ensures all dependencies are available - await validate_bundle(plx_file_path=bundle_path, library_dirs=library_dirs) + await validate_bundle(mthds_file_path=bundle_path, library_dirs=library_dirs) # Now get the specific pipe and dry-run only that one the_pipe = get_required_pipe(pipe_code=pipe_code) diff --git a/pipelex/cli/commands/build/inputs_cmd.py b/pipelex/cli/commands/build/inputs_cmd.py index b23ed22d9..8bfe589c0 100644 --- a/pipelex/cli/commands/build/inputs_cmd.py +++ b/pipelex/cli/commands/build/inputs_cmd.py @@ -46,7 +46,7 @@ async def _generate_inputs_core( """ if bundle_path: try: - validate_bundle_result = await validate_bundle(plx_file_path=bundle_path) + validate_bundle_result = await validate_bundle(mthds_file_path=bundle_path) bundle_blueprint = validate_bundle_result.blueprints[0] if not pipe_code: # No pipe code specified, use main_pipe from bundle diff --git a/pipelex/cli/commands/build/output_cmd.py b/pipelex/cli/commands/build/output_cmd.py index d4c6abf98..f834f2659 100644 --- a/pipelex/cli/commands/build/output_cmd.py +++ b/pipelex/cli/commands/build/output_cmd.py @@ -48,7 +48,7 @@ async def _generate_output_core( """ if bundle_path: try: - validate_bundle_result = await validate_bundle(plx_file_path=bundle_path) + validate_bundle_result = await validate_bundle(mthds_file_path=bundle_path) bundle_blueprint = validate_bundle_result.blueprints[0] if not pipe_code: # No pipe code specified, use main_pipe from bundle diff --git a/pipelex/cli/commands/build/pipe_cmd.py b/pipelex/cli/commands/build/pipe_cmd.py index 9168dd7f4..d93bf0ad2 100644 --- a/pipelex/cli/commands/build/pipe_cmd.py +++ b/pipelex/cli/commands/build/pipe_cmd.py @@ -295,7 +295,7 @@ async def run_pipeline(): # pass empty library_dirs to avoid loading any libraries set at env var or instance level: # we don't want any other pipeline to interfere with the pipeline we just built built_pipe_output = await execute_pipeline( - plx_content=mthds_content, + mthds_content=mthds_content, pipe_run_mode=PipeRunMode.DRY, execution_config=built_pipe_execution_config, library_dirs=[], diff --git a/pipelex/cli/commands/build/runner_cmd.py b/pipelex/cli/commands/build/runner_cmd.py index 3537e7409..9e3d53956 100644 --- a/pipelex/cli/commands/build/runner_cmd.py +++ b/pipelex/cli/commands/build/runner_cmd.py @@ -49,7 +49,7 @@ async def prepare_runner( if bundle_path: try: - validate_bundle_result = await validate_bundle(plx_file_path=bundle_path, library_dirs=library_dirs) + validate_bundle_result = await validate_bundle(mthds_file_path=bundle_path, library_dirs=library_dirs) all_blueprints.extend(validate_bundle_result.blueprints) first_blueprint = validate_bundle_result.blueprints[0] if not pipe_code: diff --git a/pipelex/cli/commands/build/structures_cmd.py b/pipelex/cli/commands/build/structures_cmd.py index 77ec06f55..c979864a0 100644 --- a/pipelex/cli/commands/build/structures_cmd.py +++ b/pipelex/cli/commands/build/structures_cmd.py @@ -349,7 +349,7 @@ def _build_structures_cmd(): typer.echo(f"🔍 Loading concepts from bundle: {target_path}") # Load concepts only (no pipes) - load_result = load_concepts_only(plx_file_path=target_path, library_dirs=library_dirs_paths) + load_result = load_concepts_only(mthds_file_path=target_path, library_dirs=library_dirs_paths) # THIS IS A HACK, while waiting class/func registries to be in libraries. get_class_registry().teardown() get_func_registry().teardown() diff --git a/pipelex/cli/commands/run_cmd.py b/pipelex/cli/commands/run_cmd.py index 719bbe922..89def96f0 100644 --- a/pipelex/cli/commands/run_cmd.py +++ b/pipelex/cli/commands/run_cmd.py @@ -19,7 +19,7 @@ handle_model_choice_error, ) from pipelex.config import get_config -from pipelex.core.interpreter.exceptions import PipelexInterpreterError, PLXDecodeError +from pipelex.core.interpreter.exceptions import MthdsDecodeError, PipelexInterpreterError from pipelex.core.interpreter.helpers import MTHDS_EXTENSION, is_pipelex_file from pipelex.core.interpreter.interpreter import PipelexInterpreter from pipelex.core.pipes.exceptions import PipeOperatorModelChoiceError @@ -236,14 +236,14 @@ def run_cmd( async def run_pipeline(pipe_code: str | None = None, bundle_path: str | None = None): source_description: str - plx_content: str | None = None + mthds_content: str | None = None if bundle_path: try: - plx_content = Path(bundle_path).read_text(encoding="utf-8") + mthds_content = Path(bundle_path).read_text(encoding="utf-8") # Use lightweight parsing to extract main_pipe without full validation # Full validation happens later during execute_pipeline if not pipe_code: - bundle_blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(plx_content=plx_content) + bundle_blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(mthds_content=mthds_content) main_pipe_code = bundle_blueprint.main_pipe if not main_pipe_code: msg = ( @@ -259,7 +259,7 @@ async def run_pipeline(pipe_code: str | None = None, bundle_path: str | None = N except FileNotFoundError as exc: typer.secho(f"Failed to load bundle '{bundle_path}': {exc}", fg=typer.colors.RED, err=True) raise typer.Exit(1) from exc - except (PipelexInterpreterError, PLXDecodeError) as exc: + except (PipelexInterpreterError, MthdsDecodeError) as exc: typer.secho(f"Failed to parse bundle '{bundle_path}': {exc}", fg=typer.colors.RED, err=True) raise typer.Exit(1) from exc elif pipe_code: @@ -301,7 +301,7 @@ async def run_pipeline(pipe_code: str | None = None, bundle_path: str | None = N try: pipe_output = await execute_pipeline( pipe_code=pipe_code, - plx_content=plx_content, + mthds_content=mthds_content, bundle_uri=bundle_path, inputs=pipeline_inputs, pipe_run_mode=pipe_run_mode, diff --git a/pipelex/cli/commands/validate_cmd.py b/pipelex/cli/commands/validate_cmd.py index 263c2813b..b5ff7770c 100644 --- a/pipelex/cli/commands/validate_cmd.py +++ b/pipelex/cli/commands/validate_cmd.py @@ -187,7 +187,7 @@ async def validate_pipe( ): if bundle_path: try: - await validate_bundle(plx_file_path=bundle_path, library_dirs=library_dirs) + await validate_bundle(mthds_file_path=bundle_path, library_dirs=library_dirs) typer.secho( f"✅ Successfully validated bundle '{bundle_path}'", fg=typer.colors.GREEN, diff --git a/pipelex/client/client.py b/pipelex/client/client.py index 7277114c3..d4ca63a78 100644 --- a/pipelex/client/client.py +++ b/pipelex/client/client.py @@ -81,7 +81,7 @@ async def _make_api_call(self, endpoint: str, request: str | None = None) -> dic async def execute_pipeline( self, pipe_code: str | None = None, - plx_content: str | None = None, + mthds_content: str | None = None, inputs: PipelineInputs | WorkingMemory | None = None, output_name: str | None = None, output_multiplicity: VariableMultiplicity | None = None, @@ -91,7 +91,7 @@ async def execute_pipeline( Args: pipe_code: The code identifying the pipeline to execute - plx_content: Content of the pipeline bundle to execute + mthds_content: Content of the pipeline bundle to execute inputs: Inputs passed to the pipeline output_name: Name of the output slot to write to output_multiplicity: Output multiplicity setting @@ -100,8 +100,8 @@ async def execute_pipeline( Returns: Complete execution results including pipeline state and output """ - if not pipe_code and not plx_content: - msg = "Either pipe_code or plx_content must be provided to the API execute_pipeline." + if not pipe_code and not mthds_content: + msg = "Either pipe_code or mthds_content must be provided to the API execute_pipeline." raise PipelineRequestError(message=msg) working_memory: WorkingMemory | None = None @@ -114,7 +114,7 @@ async def execute_pipeline( pipeline_request = PipelineRequestFactory.make_from_working_memory( pipe_code=pipe_code, - plx_content=plx_content, + mthds_content=mthds_content, working_memory=working_memory, output_name=output_name, output_multiplicity=output_multiplicity, @@ -127,7 +127,7 @@ async def execute_pipeline( async def start_pipeline( self, pipe_code: str | None = None, - plx_content: str | None = None, + mthds_content: str | None = None, inputs: PipelineInputs | WorkingMemory | None = None, output_name: str | None = None, output_multiplicity: VariableMultiplicity | None = None, @@ -137,7 +137,7 @@ async def start_pipeline( Args: pipe_code: The code identifying the pipeline to execute - plx_content: Content of the pipeline bundle to execute + mthds_content: Content of the pipeline bundle to execute inputs: Inputs passed to the pipeline output_name: Name of the output slot to write to output_multiplicity: Output multiplicity setting @@ -146,8 +146,8 @@ async def start_pipeline( Returns: Initial response with pipeline_run_id and created_at timestamp """ - if not pipe_code and not plx_content: - msg = "Either pipe_code or plx_content must be provided to the API start_pipeline." + if not pipe_code and not mthds_content: + msg = "Either pipe_code or mthds_content must be provided to the API start_pipeline." raise PipelineRequestError(message=msg) working_memory: WorkingMemory | None = None @@ -160,7 +160,7 @@ async def start_pipeline( pipeline_request = PipelineRequestFactory.make_from_working_memory( pipe_code=pipe_code, - plx_content=plx_content, + mthds_content=mthds_content, working_memory=working_memory, output_name=output_name, output_multiplicity=output_multiplicity, diff --git a/pipelex/client/pipeline_request_factory.py b/pipelex/client/pipeline_request_factory.py index 29f134944..34626a78c 100644 --- a/pipelex/client/pipeline_request_factory.py +++ b/pipelex/client/pipeline_request_factory.py @@ -12,7 +12,7 @@ class PipelineRequestFactory: @staticmethod def make_from_working_memory( pipe_code: str | None, - plx_content: str | None, + mthds_content: str | None, working_memory: WorkingMemory | None = None, output_name: str | None = None, output_multiplicity: VariableMultiplicity | None = None, @@ -22,19 +22,19 @@ def make_from_working_memory( Args: pipe_code: The code identifying the pipeline to execute - plx_content: Content of the pipeline bundle to execute + mthds_content: Content of the pipeline bundle to execute working_memory: The WorkingMemory to convert output_name: Name of the output slot to write to output_multiplicity: Output multiplicity setting dynamic_output_concept_code: Override for the dynamic output concept code - plx_content: Content of the pipeline bundle to execute + mthds_content: Content of the pipeline bundle to execute Returns: PipelineRequest with the working memory serialized to reduced format """ return PipelineRequest( pipe_code=pipe_code, - plx_content=plx_content, + mthds_content=mthds_content, # `ApiSerializer.serialize_working_memory_for_api` returns a dict[str, dict[str, Any]] (plain dicts), which is a valid PipelineInputs inputs=cast("PipelineInputs", ApiSerializer.serialize_working_memory_for_api(working_memory=working_memory)), output_name=output_name, @@ -55,7 +55,7 @@ def make_from_body(request_body: dict[str, Any]) -> PipelineRequest: """ return PipelineRequest( pipe_code=request_body.get("pipe_code"), - plx_content=request_body.get("plx_content"), + mthds_content=request_body.get("mthds_content"), inputs=request_body.get("inputs", {}), output_name=request_body.get("output_name"), output_multiplicity=request_body.get("output_multiplicity"), diff --git a/pipelex/client/protocol.py b/pipelex/client/protocol.py index 6eeb93f2b..4bcd98d73 100644 --- a/pipelex/client/protocol.py +++ b/pipelex/client/protocol.py @@ -48,7 +48,7 @@ class PipelineRequest(BaseModel): Attributes: pipe_code (str | None): Code of the pipe to execute - plx_content (str | None): Content of the pipeline bundle to execute + mthds_content (str | None): Content of the pipeline bundle to execute inputs (PipelineInputs | None): Inputs in PipelineInputs format - Pydantic validation is skipped to preserve the flexible format (dicts, strings, StuffContent objects, etc.) output_name (str | None): Name of the output slot to write to @@ -58,7 +58,7 @@ class PipelineRequest(BaseModel): """ pipe_code: str | None = None - plx_content: str | None = None + mthds_content: str | None = None inputs: Annotated[PipelineInputs | None, SkipValidation] = None output_name: str | None = None output_multiplicity: VariableMultiplicity | None = None @@ -67,11 +67,11 @@ class PipelineRequest(BaseModel): @model_validator(mode="before") @classmethod def validate_request(cls, values: dict[str, Any]): - if values.get("pipe_code") is None and values.get("plx_content") is None: + if values.get("pipe_code") is None and values.get("mthds_content") is None: msg = ( - "pipe_code and plx_content cannot be None together. Its either: Both of them, or if there is no plx_content, " + "pipe_code and mthds_content cannot be None together. Its either: Both of them, or if there is no mthds_content, " "then pipe_code must be provided and must reference a pipe already registered in the library." - "If plx_content is provided but no pipe_code, plx_content must have a main_pipe property." + "If mthds_content is provided but no pipe_code, mthds_content must have a main_pipe property." ) raise PipelineRequestError(msg) return values @@ -129,7 +129,7 @@ class PipelexProtocol(Protocol): async def execute_pipeline( self, pipe_code: str | None = None, - plx_content: str | None = None, + mthds_content: str | None = None, inputs: PipelineInputs | WorkingMemory | None = None, output_name: str | None = None, output_multiplicity: VariableMultiplicity | None = None, @@ -139,7 +139,7 @@ async def execute_pipeline( Args: pipe_code (str): The code identifying the pipeline to execute - plx_content (str | None): Content of the pipeline bundle to execute + mthds_content (str | None): Content of the pipeline bundle to execute inputs (PipelineInputs | WorkingMemory | None): Inputs passed to the pipeline output_name (str | None): Target output slot name output_multiplicity (PipeOutputMultiplicity | None): Output multiplicity setting @@ -158,7 +158,7 @@ async def execute_pipeline( async def start_pipeline( self, pipe_code: str | None = None, - plx_content: str | None = None, + mthds_content: str | None = None, inputs: PipelineInputs | WorkingMemory | None = None, output_name: str | None = None, output_multiplicity: VariableMultiplicity | None = None, @@ -168,7 +168,7 @@ async def start_pipeline( Args: pipe_code (str): The code identifying the pipeline to execute - plx_content (str | None): Content of the pipeline bundle to execute + mthds_content (str | None): Content of the pipeline bundle to execute inputs (PipelineInputs | WorkingMemory | None): Inputs passed to the pipeline output_name (str | None): Target output slot name output_multiplicity (PipeOutputMultiplicity | None): Output multiplicity setting diff --git a/pipelex/core/concepts/concept_factory.py b/pipelex/core/concepts/concept_factory.py index 7e2ef1725..9a22ceda8 100644 --- a/pipelex/core/concepts/concept_factory.py +++ b/pipelex/core/concepts/concept_factory.py @@ -21,7 +21,7 @@ class ConceptDeclarationType(StrEnum): - """Enum representing the 5 ways a concept can be declared in PLX files. + """Enum representing the 5 ways a concept can be declared in MTHDS files. Option 1: STRING - Concept is defined as a string Example: diff --git a/pipelex/core/concepts/structure_generation/generator.py b/pipelex/core/concepts/structure_generation/generator.py index 0a57301c3..061435ec4 100644 --- a/pipelex/core/concepts/structure_generation/generator.py +++ b/pipelex/core/concepts/structure_generation/generator.py @@ -89,7 +89,7 @@ def generate_from_structure_blueprint( "\n" "If you want to customize this structure:\n" " 1. Copy this file to your own module\n" - " 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file\n" + " 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file\n" " and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition)\n" " 3. Make sure your custom class is importable and registered\n" "\n" diff --git a/pipelex/core/interpreter/exceptions.py b/pipelex/core/interpreter/exceptions.py index 6b5c4125b..70e9fce4c 100644 --- a/pipelex/core/interpreter/exceptions.py +++ b/pipelex/core/interpreter/exceptions.py @@ -15,5 +15,5 @@ def __init__( super().__init__(message) -class PLXDecodeError(TomlError): - """Raised when PLX decoding fails.""" +class MthdsDecodeError(TomlError): + """Raised when MTHDS decoding fails.""" diff --git a/pipelex/core/interpreter/interpreter.py b/pipelex/core/interpreter/interpreter.py index 6ae158f23..d6ece605d 100644 --- a/pipelex/core/interpreter/interpreter.py +++ b/pipelex/core/interpreter/interpreter.py @@ -4,7 +4,7 @@ from pydantic import BaseModel, ValidationError from pipelex.core.bundles.pipelex_bundle_blueprint import PipelexBundleBlueprint -from pipelex.core.interpreter.exceptions import PipelexInterpreterError, PLXDecodeError +from pipelex.core.interpreter.exceptions import MthdsDecodeError, PipelexInterpreterError from pipelex.core.interpreter.validation_error_categorizer import PIPELEX_BUNDLE_BLUEPRINT_SOURCE_FIELD, categorize_blueprint_validation_error from pipelex.tools.misc.toml_utils import TomlError, load_toml_from_content, load_toml_from_path from pipelex.tools.typing.pydantic_utils import format_pydantic_validation_error @@ -14,25 +14,25 @@ class PipelexInterpreter(BaseModel): - """plx -> PipelexBundleBlueprint""" + """MTHDS -> PipelexBundleBlueprint""" @classmethod - def make_pipelex_bundle_blueprint(cls, bundle_path: Path | None = None, plx_content: str | None = None) -> PipelexBundleBlueprint: + def make_pipelex_bundle_blueprint(cls, bundle_path: Path | None = None, mthds_content: str | None = None) -> PipelexBundleBlueprint: blueprint_dict: dict[str, Any] try: if bundle_path is not None: blueprint_dict = load_toml_from_path(path=str(bundle_path)) blueprint_dict[PIPELEX_BUNDLE_BLUEPRINT_SOURCE_FIELD] = str(bundle_path) - elif plx_content is not None: - blueprint_dict = load_toml_from_content(content=plx_content) + elif mthds_content is not None: + blueprint_dict = load_toml_from_content(content=mthds_content) else: - msg = "Either 'bundle_path' or 'plx_content' must be provided for the PipelexInterpreter to make a PipelexBundleBlueprint" + msg = "Either 'bundle_path' or 'mthds_content' must be provided for the PipelexInterpreter to make a PipelexBundleBlueprint" raise PipelexInterpreterError(msg) except TomlError as exc: - raise PLXDecodeError(message=exc.message, doc=exc.doc, pos=exc.pos, lineno=exc.lineno, colno=exc.colno) from exc + raise MthdsDecodeError(message=exc.message, doc=exc.doc, pos=exc.pos, lineno=exc.lineno, colno=exc.colno) from exc if not blueprint_dict: - msg = "Could not make 'PipelexBundleBlueprint': no blueprint found in the PLX file" + msg = "Could not make 'PipelexBundleBlueprint': no blueprint found in the MTHDS file" raise PipelexInterpreterError(msg) try: diff --git a/pipelex/graph/reactflow/templates/_styles.css.jinja2 b/pipelex/graph/reactflow/templates/_styles.css.jinja2 index f75fddf7e..04d0edfb1 100644 --- a/pipelex/graph/reactflow/templates/_styles.css.jinja2 +++ b/pipelex/graph/reactflow/templates/_styles.css.jinja2 @@ -111,7 +111,7 @@ /* Dracula palette - vibrant dark theme with high contrast */ [data-palette="dracula"] { - /* Pipes / Execution Units - Salmon red (matches plx syntax highlighting) */ + /* Pipes / Execution Units - Salmon red (matches MTHDS syntax highlighting) */ --color-pipe: #ff6b6b; --color-pipe-bg: rgba(224, 108, 117, 0.18); --color-pipe-text: #ffffff; diff --git a/pipelex/language/mthds_factory.py b/pipelex/language/mthds_factory.py index 6d84862aa..236a12e68 100644 --- a/pipelex/language/mthds_factory.py +++ b/pipelex/language/mthds_factory.py @@ -243,7 +243,7 @@ def make_template_table(cls, template_value: Mapping[str, Any]) -> Any: def make_construct_table(cls, construct_value: Mapping[str, Any]) -> Any: """Create a nested table for construct section in MTHDS format. - The construct_value should already be in MTHDS format (from ConstructBlueprint.to_plx_dict()) + The construct_value should already be in MTHDS format (from ConstructBlueprint.to_mthds_dict()) with field names at the root, not wrapped in a 'fields' key. """ tbl = table() diff --git a/pipelex/libraries/library_manager_abstract.py b/pipelex/libraries/library_manager_abstract.py index 10fa677db..22893bd3b 100644 --- a/pipelex/libraries/library_manager_abstract.py +++ b/pipelex/libraries/library_manager_abstract.py @@ -60,7 +60,7 @@ def load_concepts_only_from_blueprints(self, library_id: str, blueprints: list[P Args: library_id: The ID of the library to load into - blueprints: List of parsed PLX blueprints to load + blueprints: List of parsed MTHDS blueprints to load Returns: List of all concepts that were loaded @@ -99,7 +99,7 @@ def load_libraries_concepts_only( Args: library_id: The ID of the library to load into library_dirs: List of directories containing MTHDS files - library_file_paths: List of specific PLX file paths to load + library_file_paths: List of specific MTHDS file paths to load Returns: List of all concepts that were loaded diff --git a/pipelex/libraries/pipe/pipe_library.py b/pipelex/libraries/pipe/pipe_library.py index 25048f83a..805651306 100644 --- a/pipelex/libraries/pipe/pipe_library.py +++ b/pipelex/libraries/pipe/pipe_library.py @@ -59,7 +59,7 @@ def get_optional_pipe(self, pipe_code: str) -> PipeAbstract | None: def get_required_pipe(self, pipe_code: str) -> PipeAbstract: the_pipe = self.get_optional_pipe(pipe_code=pipe_code) if not the_pipe: - msg = f"Pipe '{pipe_code}' not found. Check for typos and make sure it is declared in plx file in an imported package." + msg = f"Pipe '{pipe_code}' not found. Check for typos and make sure it is declared in MTHDS file in an imported package." raise PipeNotFoundError(msg) return the_pipe diff --git a/pipelex/pipe_operators/compose/construct_blueprint.py b/pipelex/pipe_operators/compose/construct_blueprint.py index b954b1162..373f2e275 100644 --- a/pipelex/pipe_operators/compose/construct_blueprint.py +++ b/pipelex/pipe_operators/compose/construct_blueprint.py @@ -80,14 +80,14 @@ def validate_method_data_consistency(self) -> Self: raise ValueError(msg) return self - def to_plx_dict(self) -> Any: - """Convert to PLX-format dict for serialization. + def to_mthds_dict(self) -> Any: + """Convert to MTHDS-format dict for serialization. - Returns the format expected in PLX files: + Returns the format expected in MTHDS files: - FIXED: Just the value itself - FROM_VAR: { from: "path" } with optional list_to_dict_keyed_by - TEMPLATE: { template: "..." } - - NESTED: The nested construct's PLX dict + - NESTED: The nested construct's MTHDS dict """ match self.method: case ConstructFieldMethod.FIXED: @@ -101,7 +101,7 @@ def to_plx_dict(self) -> Any: return {"template": self.template} case ConstructFieldMethod.NESTED: if self.nested: - return self.nested.to_plx_dict() + return self.nested.to_mthds_dict() return {} @classmethod @@ -197,7 +197,7 @@ def make_from_raw(cls, raw: Any) -> ConstructFieldBlueprint: class ConstructBlueprint(BaseModel): """Blueprint for composing a StructuredContent from working memory. - Parsed from `[pipe.name.construct]` section in PLX files. + Parsed from `[pipe.name.construct]` section in MTHDS files. Attributes: fields: Dictionary mapping field names to their composition blueprints @@ -270,13 +270,13 @@ def get_required_variables(self) -> set[str]: return required - def to_plx_dict(self) -> dict[str, Any]: - """Convert to PLX-format dict (fields at root, no wrapper). + def to_mthds_dict(self) -> dict[str, Any]: + """Convert to MTHDS-format dict (fields at root, no wrapper). - Returns the format expected in PLX files where field names are at + Returns the format expected in MTHDS files where field names are at the root level, not wrapped in a 'fields' key. """ - return {field_name: field_bp.to_plx_dict() for field_name, field_bp in self.fields.items()} + return {field_name: field_bp.to_mthds_dict() for field_name, field_bp in self.fields.items()} @model_serializer(mode="wrap") def serialize_with_context(self, handler: SerializerFunctionWrapHandler, info: SerializationInfo) -> dict[str, Any]: @@ -286,7 +286,7 @@ def serialize_with_context(self, handler: SerializerFunctionWrapHandler, info: S Otherwise, uses default Pydantic serialization. """ if info.context and info.context.get("format") == "mthds": - return self.to_plx_dict() + return self.to_mthds_dict() result = handler(self) return dict(result) # Ensure dict return type diff --git a/pipelex/pipe_operators/compose/pipe_compose_blueprint.py b/pipelex/pipe_operators/compose/pipe_compose_blueprint.py index 6050137b7..fb2b41e21 100644 --- a/pipelex/pipe_operators/compose/pipe_compose_blueprint.py +++ b/pipelex/pipe_operators/compose/pipe_compose_blueprint.py @@ -24,7 +24,7 @@ class PipeComposeBlueprint(PipeBlueprint): # Either template or construct must be provided, but not both # Note: The field is named 'construct_blueprint' internally to avoid conflict with Pydantic's - # BaseModel.construct() method. In PLX/TOML files, use 'construct' (via aliases). + # BaseModel.construct() method. In MTHDS/TOML files, use 'construct' (via aliases). template: str | TemplateBlueprint | None = None construct_blueprint: ConstructBlueprint | None = Field(default=None, validation_alias="construct", serialization_alias="construct") diff --git a/pipelex/pipe_operators/extract/pipe_extract.py b/pipelex/pipe_operators/extract/pipe_extract.py index 0e97f2d84..e4217c2e4 100644 --- a/pipelex/pipe_operators/extract/pipe_extract.py +++ b/pipelex/pipe_operators/extract/pipe_extract.py @@ -137,7 +137,7 @@ async def _live_run_operator_pipe( extract_choice: ExtractModelChoice = self.extract_choice or get_model_deck().extract_choice_default extract_setting: ExtractSetting = get_model_deck().get_extract_setting(extract_choice=extract_choice) - # PLX-level max_page_images takes precedence if set, otherwise use ExtractSetting + # MTHDS-level max_page_images takes precedence if set, otherwise use ExtractSetting max_nb_images = self.max_page_images if self.max_page_images is not None else extract_setting.max_nb_images extract_job_params = ExtractJobParams( diff --git a/pipelex/pipeline/execute.py b/pipelex/pipeline/execute.py index 04895567c..a357950c5 100644 --- a/pipelex/pipeline/execute.py +++ b/pipelex/pipeline/execute.py @@ -33,7 +33,7 @@ async def execute_pipeline( library_id: str | None = None, library_dirs: list[str] | None = None, pipe_code: str | None = None, - plx_content: str | None = None, + mthds_content: str | None = None, bundle_uri: str | None = None, inputs: PipelineInputs | WorkingMemory | None = None, output_name: str | None = None, @@ -57,19 +57,19 @@ async def execute_pipeline( library_dirs: List of directory paths to load pipe definitions from. Combined with directories from the ``PIPELEXPATH`` environment variable (PIPELEXPATH directories are searched - first). When provided alongside ``plx_content``, definitions from both sources + first). When provided alongside ``mthds_content``, definitions from both sources are loaded into the library. pipe_code: - Code identifying the pipe to execute. Required when ``plx_content`` is not - provided. When both ``plx_content`` and ``pipe_code`` are provided, the - specified pipe from the PLX content will be executed (overriding any - ``main_pipe`` defined in the plx_content). - plx_content: - Complete PLX file content as a string. The pipe to execute is determined by - ``pipe_code`` (if provided) or the ``main_pipe`` property in the PLX content. + Code identifying the pipe to execute. Required when ``mthds_content`` is not + provided. When both ``mthds_content`` and ``pipe_code`` are provided, the + specified pipe from the MTHDS content will be executed (overriding any + ``main_pipe`` defined in the mthds_content). + mthds_content: + Complete MTHDS file content as a string. The pipe to execute is determined by + ``pipe_code`` (if provided) or the ``main_pipe`` property in the MTHDS content. Can be combined with ``library_dirs`` to load additional definitions. bundle_uri: - URI identifying the bundle. If ``plx_content`` is not provided and ``bundle_uri`` + URI identifying the bundle. If ``mthds_content`` is not provided and ``bundle_uri`` points to a local file path, the content will be read from that file. Also used to detect if the bundle was already loaded from library directories (e.g., via PIPELEXPATH) to avoid duplicate domain registration. @@ -107,11 +107,11 @@ async def execute_pipeline( # Use provided config or get default execution_config = execution_config or get_config().pipelex.pipeline_execution_config - # If plx_content is not provided but bundle_uri points to a file, read it - if plx_content is None and bundle_uri is not None: + # If MTHDS content is not provided but bundle_uri points to a file, read it + if mthds_content is None and bundle_uri is not None: bundle_path = Path(bundle_uri) if bundle_path.is_file(): - plx_content = bundle_path.read_text(encoding="utf-8") + mthds_content = bundle_path.read_text(encoding="utf-8") properties: dict[EventProperty, Any] graph_spec_result = None @@ -125,7 +125,7 @@ async def execute_pipeline( library_id=library_id, library_dirs=library_dirs, pipe_code=pipe_code, - plx_content=plx_content, + mthds_content=mthds_content, bundle_uri=bundle_uri, inputs=inputs, output_name=output_name, diff --git a/pipelex/pipeline/pipeline_run_setup.py b/pipelex/pipeline/pipeline_run_setup.py index 4ab943373..db0a356bc 100644 --- a/pipelex/pipeline/pipeline_run_setup.py +++ b/pipelex/pipeline/pipeline_run_setup.py @@ -47,7 +47,7 @@ async def pipeline_run_setup( library_id: str | None = None, library_dirs: list[str] | None = None, pipe_code: str | None = None, - plx_content: str | None = None, + mthds_content: str | None = None, bundle_uri: str | None = None, inputs: PipelineInputs | WorkingMemory | None = None, output_name: str | None = None, @@ -75,14 +75,14 @@ async def pipeline_run_setup( library_dirs: List of directory paths to load pipe definitions from. Combined with directories from the ``PIPELEXPATH`` environment variable (PIPELEXPATH directories are searched - first). When provided alongside ``plx_content``, definitions from both sources + first). When provided alongside ``mthds_content``, definitions from both sources are loaded into the library. pipe_code: - Code identifying the pipe to execute. Required when ``plx_content`` is not - provided. When both ``plx_content`` and ``pipe_code`` are provided, the + Code identifying the pipe to execute. Required when ``mthds_content`` is not + provided. When both ``mthds_content`` and ``pipe_code`` are provided, the specified pipe from the MTHDS content will be executed (overriding any ``main_pipe`` defined in the content). - plx_content: + mthds_content: Complete MTHDS file content as a string. The pipe to execute is determined by ``pipe_code`` (if provided) or the ``main_pipe`` property in the MTHDS content. Can be combined with ``library_dirs`` to load additional definitions. @@ -90,7 +90,7 @@ async def pipeline_run_setup( URI identifying the bundle. Used to detect if the bundle was already loaded from library directories (e.g., via PIPELEXPATH) to avoid duplicate domain registration. If provided and the resolved absolute path is already in the - loaded MTHDS paths, the ``plx_content`` loading will be skipped. + loaded MTHDS paths, the ``mthds_content`` loading will be skipped. inputs: Inputs passed to the pipeline. Can be either a ``PipelineInputs`` dictionary or a ``WorkingMemory`` instance. @@ -118,8 +118,8 @@ async def pipeline_run_setup( """ user_id = user_id or OTelConstants.DEFAULT_USER_ID - if not plx_content and not pipe_code: - msg = "Either pipe_code or plx_content must be provided to the pipeline API." + if not mthds_content and not pipe_code: + msg = "Either pipe_code or mthds_content must be provided to the pipeline API." raise ValueError(msg) pipeline = get_pipeline_manager().add_new_pipeline(pipe_code=pipe_code) @@ -148,9 +148,9 @@ async def pipeline_run_setup( else: log.verbose(f"No library directories to load ({source_label})") - # Then handle plx_content or pipe_code - if plx_content: - blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(plx_content=plx_content) + # Then handle MTHDS content or pipe_code + if mthds_content: + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(mthds_content=mthds_content) blueprints_to_load = [blueprint] # Check if this bundle was already loaded from library directories @@ -170,7 +170,7 @@ async def pipeline_run_setup( if not bundle_already_loaded: library_manager.load_from_blueprints(library_id=library_id, blueprints=blueprints_to_load) - # For now, we only support one blueprint when given a plx_content. So blueprints is of length 1. + # For now, we only support one blueprint when given MTHDS content. So blueprints is of length 1. # blueprint is already set from make_pipelex_bundle_blueprint above if pipe_code: pipe = get_required_pipe(pipe_code=pipe_code) @@ -182,7 +182,7 @@ async def pipeline_run_setup( elif pipe_code: pipe = get_required_pipe(pipe_code=pipe_code) else: - msg = "Either provide pipe_code or plx_content to the pipeline API. 'pipe_code' must be provided when 'plx_content' is None" + msg = "Either provide pipe_code or mthds_content to the pipeline API. 'pipe_code' must be provided when 'mthds_content' is None" raise PipeExecutionError(message=msg) pipe_code = pipe.code diff --git a/pipelex/pipeline/start.py b/pipelex/pipeline/start.py index f21de865c..cca041f7c 100644 --- a/pipelex/pipeline/start.py +++ b/pipelex/pipeline/start.py @@ -16,7 +16,7 @@ async def start_pipeline( library_id: str | None = None, library_dirs: list[str] | None = None, pipe_code: str | None = None, - plx_content: str | None = None, + mthds_content: str | None = None, bundle_uri: str | None = None, inputs: PipelineInputs | WorkingMemory | None = None, output_name: str | None = None, @@ -43,19 +43,19 @@ async def start_pipeline( library_dirs: List of directory paths to load pipe definitions from. Combined with directories from the ``PIPELEXPATH`` environment variable (PIPELEXPATH directories are searched - first). When provided alongside ``plx_content``, definitions from both sources + first). When provided alongside ``mthds_content``, definitions from both sources are loaded into the library. pipe_code: - Code identifying the pipe to execute. Required when ``plx_content`` is not - provided. When both ``plx_content`` and ``pipe_code`` are provided, the - specified pipe from the PLX content will be executed (overriding any + Code identifying the pipe to execute. Required when ``mthds_content`` is not + provided. When both ``mthds_content`` and ``pipe_code`` are provided, the + specified pipe from the MTHDS content will be executed (overriding any ``main_pipe`` defined in the content). - plx_content: - Complete PLX file content as a string. The pipe to execute is determined by - ``pipe_code`` (if provided) or the ``main_pipe`` property in the PLX content. + mthds_content: + Complete MTHDS file content as a string. The pipe to execute is determined by + ``pipe_code`` (if provided) or the ``main_pipe`` property in the MTHDS content. Can be combined with ``library_dirs`` to load additional definitions. bundle_uri: - URI identifying the bundle. If ``plx_content`` is not provided and ``bundle_uri`` + URI identifying the bundle. If ``mthds_content`` is not provided and ``bundle_uri`` points to a local file path, the content will be read from that file. Also used to detect if the bundle was already loaded from library directories (e.g., via PIPELEXPATH) to avoid duplicate domain registration. @@ -96,11 +96,11 @@ async def start_pipeline( # Use provided config or get default execution_config = execution_config or get_config().pipelex.pipeline_execution_config - # If plx_content is not provided but bundle_uri points to a file, read it - if plx_content is None and bundle_uri is not None: + # If MTHDS content is not provided but bundle_uri points to a file, read it + if mthds_content is None and bundle_uri is not None: bundle_path = Path(bundle_uri) if bundle_path.is_file(): - plx_content = bundle_path.read_text(encoding="utf-8") + mthds_content = bundle_path.read_text(encoding="utf-8") # TODO: make sure we close the graph tracer after the task completes pipe_job, pipeline_run_id, _library_id = await pipeline_run_setup( @@ -108,7 +108,7 @@ async def start_pipeline( library_id=library_id, library_dirs=library_dirs, pipe_code=pipe_code, - plx_content=plx_content, + mthds_content=mthds_content, bundle_uri=bundle_uri, inputs=inputs, output_name=output_name, diff --git a/pipelex/pipeline/validate_bundle.py b/pipelex/pipeline/validate_bundle.py index fe6172854..902b3489b 100644 --- a/pipelex/pipeline/validate_bundle.py +++ b/pipelex/pipeline/validate_bundle.py @@ -84,17 +84,17 @@ class ValidateBundleResult(BaseModel): async def validate_bundle( - plx_file_path: Path | None = None, - plx_content: str | None = None, + mthds_file_path: Path | None = None, + mthds_content: str | None = None, blueprints: list[PipelexBundleBlueprint] | None = None, library_dirs: Sequence[Path] | None = None, ) -> ValidateBundleResult: - provided_params = sum([blueprints is not None, plx_content is not None, plx_file_path is not None]) + provided_params = sum([blueprints is not None, mthds_content is not None, mthds_file_path is not None]) if provided_params == 0: - msg = "At least one of blueprints, plx_content, or plx_file_path must be provided to validate_bundle" + msg = "At least one of blueprints, mthds_content, or mthds_file_path must be provided to validate_bundle" raise ValidateBundleError(message=msg) if provided_params > 1: - msg = "Only one of blueprints, plx_content, or plx_file_path can be provided to validate_bundle, not multiple" + msg = "Only one of blueprints, mthds_content, or mthds_file_path can be provided to validate_bundle, not multiple" raise ValidateBundleError(message=msg) library_manager = get_library_manager() @@ -121,19 +121,19 @@ async def validate_bundle( dry_run_results = await dry_run_pipes(pipes=loaded_pipes, raise_on_failure=True) return ValidateBundleResult(blueprints=loaded_blueprints, pipes=loaded_pipes, dry_run_result=dry_run_results) - elif plx_content is not None: - blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(plx_content=plx_content) + elif mthds_content is not None: + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(mthds_content=mthds_content) loaded_blueprints = [blueprint] loaded_pipes = library_manager.load_from_blueprints(library_id=library_id, blueprints=[blueprint]) dry_run_results = await dry_run_pipes(pipes=loaded_pipes, raise_on_failure=True) return ValidateBundleResult(blueprints=loaded_blueprints, pipes=loaded_pipes, dry_run_result=dry_run_results) else: - assert plx_file_path is not None - blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=plx_file_path) + assert mthds_file_path is not None + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=mthds_file_path) loaded_blueprints = [blueprint] - if plx_file_path.resolve() not in library.loaded_mthds_paths: + if mthds_file_path.resolve() not in library.loaded_mthds_paths: # File not yet loaded - load it from the blueprint loaded_pipes = library_manager.load_from_blueprints(library_id=library_id, blueprints=[blueprint]) else: @@ -241,8 +241,8 @@ class LoadConceptsOnlyResult(BaseModel): def load_concepts_only( - plx_file_path: Path | None = None, - plx_content: str | None = None, + mthds_file_path: Path | None = None, + mthds_content: str | None = None, blueprints: list[PipelexBundleBlueprint] | None = None, library_dirs: Sequence[Path] | None = None, ) -> LoadConceptsOnlyResult: @@ -253,8 +253,8 @@ def load_concepts_only( and does not run dry runs. Args: - plx_file_path: Path to a single MTHDS file to load (mutually exclusive with others) - plx_content: MTHDS content string to load (mutually exclusive with others) + mthds_file_path: Path to a single MTHDS file to load (mutually exclusive with others) + mthds_content: MTHDS content string to load (mutually exclusive with others) blueprints: Pre-parsed blueprints to load (mutually exclusive with others) library_dirs: Optional directories containing additional MTHDS library files @@ -264,12 +264,12 @@ def load_concepts_only( Raises: ValidateBundleError: If loading fails due to interpreter or validation errors """ - provided_params = sum([blueprints is not None, plx_content is not None, plx_file_path is not None]) + provided_params = sum([blueprints is not None, mthds_content is not None, mthds_file_path is not None]) if provided_params == 0: - msg = "At least one of blueprints, plx_content, or plx_file_path must be provided to load_concepts_only" + msg = "At least one of blueprints, mthds_content, or mthds_file_path must be provided to load_concepts_only" raise ValidateBundleError(message=msg) if provided_params > 1: - msg = "Only one of blueprints, plx_content, or plx_file_path can be provided to load_concepts_only, not multiple" + msg = "Only one of blueprints, mthds_content, or mthds_file_path can be provided to load_concepts_only, not multiple" raise ValidateBundleError(message=msg) library_manager = get_library_manager() @@ -296,18 +296,18 @@ def load_concepts_only( loaded_concepts = library_manager.load_concepts_only_from_blueprints(library_id=library_id, blueprints=blueprints) return LoadConceptsOnlyResult(blueprints=loaded_blueprints, concepts=loaded_concepts) - elif plx_content is not None: - blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(plx_content=plx_content) + elif mthds_content is not None: + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(mthds_content=mthds_content) loaded_blueprints = [blueprint] loaded_concepts = library_manager.load_concepts_only_from_blueprints(library_id=library_id, blueprints=[blueprint]) return LoadConceptsOnlyResult(blueprints=loaded_blueprints, concepts=loaded_concepts) else: - assert plx_file_path is not None - blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=plx_file_path) + assert mthds_file_path is not None + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(bundle_path=mthds_file_path) loaded_blueprints = [blueprint] - if plx_file_path.resolve() not in library.loaded_mthds_paths: + if mthds_file_path.resolve() not in library.loaded_mthds_paths: # File not yet loaded - load it from the blueprint loaded_concepts = library_manager.load_concepts_only_from_blueprints(library_id=library_id, blueprints=[blueprint]) else: diff --git a/tests/e2e/pipelex/concepts/nested_concepts/generated_models/nested_concepts_test__customer.py b/tests/e2e/pipelex/concepts/nested_concepts/generated_models/nested_concepts_test__customer.py index f113938e3..b722d64c9 100644 --- a/tests/e2e/pipelex/concepts/nested_concepts/generated_models/nested_concepts_test__customer.py +++ b/tests/e2e/pipelex/concepts/nested_concepts/generated_models/nested_concepts_test__customer.py @@ -2,7 +2,7 @@ If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered diff --git a/tests/e2e/pipelex/concepts/nested_concepts/generated_models/nested_concepts_test__invoice.py b/tests/e2e/pipelex/concepts/nested_concepts/generated_models/nested_concepts_test__invoice.py index fda04acc4..943274969 100644 --- a/tests/e2e/pipelex/concepts/nested_concepts/generated_models/nested_concepts_test__invoice.py +++ b/tests/e2e/pipelex/concepts/nested_concepts/generated_models/nested_concepts_test__invoice.py @@ -2,7 +2,7 @@ If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered diff --git a/tests/e2e/pipelex/concepts/nested_concepts/generated_models/nested_concepts_test__line_item.py b/tests/e2e/pipelex/concepts/nested_concepts/generated_models/nested_concepts_test__line_item.py index a4c1e11b9..1b1333162 100644 --- a/tests/e2e/pipelex/concepts/nested_concepts/generated_models/nested_concepts_test__line_item.py +++ b/tests/e2e/pipelex/concepts/nested_concepts/generated_models/nested_concepts_test__line_item.py @@ -2,7 +2,7 @@ If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered diff --git a/tests/e2e/pipelex/concepts/nested_concepts/test_nested_concepts_pipe.py b/tests/e2e/pipelex/concepts/nested_concepts/test_nested_concepts_pipe.py index 55be1374c..56fc9c942 100644 --- a/tests/e2e/pipelex/concepts/nested_concepts/test_nested_concepts_pipe.py +++ b/tests/e2e/pipelex/concepts/nested_concepts/test_nested_concepts_pipe.py @@ -1,7 +1,7 @@ """E2E test for pipes with nested concept-to-concept references. This test verifies that: -1. Concepts with nested concept references can be loaded from PLX files +1. Concepts with nested concept references can be loaded from MTHDS files 2. The dependency graph correctly orders concept loading 3. Pipes can generate structured output with nested concepts 4. The generated output contains properly typed nested objects @@ -31,7 +31,7 @@ async def test_invoice_with_nested_customer_and_line_items(self, pipe_run_mode: """Test that a pipe can generate an Invoice with nested Customer and LineItem concepts. This test verifies the complete flow: - 1. PLX file with concept-to-concept references is loaded + 1. MTHDS file with concept-to-concept references is loaded 2. Concepts are loaded in topological order (LineItem, Customer before Invoice) 3. The LLM generates structured output with proper nested types 4. The output can be accessed via working_memory.get_stuff_as() with typed models diff --git a/tests/e2e/pipelex/concepts/nested_concepts/test_structure_generator_cli.py b/tests/e2e/pipelex/concepts/nested_concepts/test_structure_generator_cli.py index 819da6a5d..80de61124 100644 --- a/tests/e2e/pipelex/concepts/nested_concepts/test_structure_generator_cli.py +++ b/tests/e2e/pipelex/concepts/nested_concepts/test_structure_generator_cli.py @@ -48,7 +48,7 @@ async def test_generate_and_import_nested_concept_structures(self): output_directory = Path(temp_dir) # Validate the MTHDS file to get blueprints - validate_result = await validate_bundle(plx_file_path=mthds_file_path) + validate_result = await validate_bundle(mthds_file_path=mthds_file_path) blueprints = validate_result.blueprints # Generate structure files diff --git a/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_analysis.py b/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_analysis.py index 4b7dae325..588a1e206 100644 --- a/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_analysis.py +++ b/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_analysis.py @@ -2,7 +2,7 @@ If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered diff --git a/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_itvw_question.py b/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_itvw_question.py index 6e81215b0..131392fdc 100644 --- a/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_itvw_question.py +++ b/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_itvw_question.py @@ -2,7 +2,7 @@ If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered diff --git a/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_itvw_sheet.py b/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_itvw_sheet.py index cf0de5173..8b959acaf 100644 --- a/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_itvw_sheet.py +++ b/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_itvw_sheet.py @@ -2,7 +2,7 @@ If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered diff --git a/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_job_requirements.py b/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_job_requirements.py index 7fb93ab2d..1e611255a 100644 --- a/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_job_requirements.py +++ b/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_job_requirements.py @@ -2,7 +2,7 @@ If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered diff --git a/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_match_analysis.py b/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_match_analysis.py index d5aec53fb..735e1280e 100644 --- a/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_match_analysis.py +++ b/tests/e2e/pipelex/pipes/pipe_operators/pipe_compose/cv_job_matching_match_analysis.py @@ -2,7 +2,7 @@ If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered diff --git a/tests/integration/pipelex/builder/test_builder_mthds_validation.py b/tests/integration/pipelex/builder/test_builder_mthds_validation.py index b748a8ab9..acf085cba 100644 --- a/tests/integration/pipelex/builder/test_builder_mthds_validation.py +++ b/tests/integration/pipelex/builder/test_builder_mthds_validation.py @@ -32,7 +32,7 @@ class TestBuilderMthdsValidation: async def test_builder_mthds_loads_and_validates(self): """Test that builder.mthds can be loaded and validated successfully.""" result = await validate_bundle( - plx_file_path=TestData.BUILDER_MTHDS_PATH, + mthds_file_path=TestData.BUILDER_MTHDS_PATH, library_dirs=[BUILDER_DIR, BUILDER_DIR / "pipe"], ) @@ -45,7 +45,7 @@ async def test_builder_mthds_loads_and_validates(self): async def test_agentic_builder_mthds_loads_and_validates(self): """Test that agentic_builder.mthds can be loaded and validated successfully.""" result = await validate_bundle( - plx_file_path=TestData.AGENTIC_BUILDER_MTHDS_PATH, + mthds_file_path=TestData.AGENTIC_BUILDER_MTHDS_PATH, library_dirs=[BUILDER_DIR, BUILDER_DIR / "pipe"], ) @@ -58,7 +58,7 @@ async def test_agentic_builder_mthds_loads_and_validates(self): async def test_pipe_design_mthds_loads_and_validates(self): """Test that pipe_design.mthds can be loaded and validated successfully.""" result = await validate_bundle( - plx_file_path=TestData.PIPE_DESIGN_MTHDS_PATH, + mthds_file_path=TestData.PIPE_DESIGN_MTHDS_PATH, library_dirs=[BUILDER_DIR, BUILDER_DIR / "pipe"], ) diff --git a/tests/integration/pipelex/concepts/out_of_order_refines/test_out_of_order_refines.py b/tests/integration/pipelex/concepts/out_of_order_refines/test_out_of_order_refines.py index 120669374..fdae68714 100644 --- a/tests/integration/pipelex/concepts/out_of_order_refines/test_out_of_order_refines.py +++ b/tests/integration/pipelex/concepts/out_of_order_refines/test_out_of_order_refines.py @@ -27,7 +27,7 @@ async def test_simple_out_of_order_refines_single_file(self): # validate_bundle internally loads libraries which triggers ConceptFactory.make_from_blueprint # This should fail because VIPCustomer is defined before Customer # with pytest.raises(ConceptFactoryError) as exc_info: - await validate_bundle(plx_file_path=mthds_file_path) + await validate_bundle(mthds_file_path=mthds_file_path) async def test_multi_level_out_of_order_refines_across_files(self): """Test multi-level refinement chain fails when concepts are out of order across files. diff --git a/tests/integration/pipelex/pipeline/test_load_concepts_only.py b/tests/integration/pipelex/pipeline/test_load_concepts_only.py index a651bfaba..e24712437 100644 --- a/tests/integration/pipelex/pipeline/test_load_concepts_only.py +++ b/tests/integration/pipelex/pipeline/test_load_concepts_only.py @@ -36,7 +36,7 @@ def test_load_concepts_only_single_file(self, load_empty_library: Callable[[], s mthds_path = Path(tmp_dir) / "test.mthds" mthds_path.write_text(mthds_content, encoding="utf-8") - result = load_concepts_only(plx_file_path=mthds_path) + result = load_concepts_only(mthds_file_path=mthds_path) assert isinstance(result, LoadConceptsOnlyResult) assert len(result.blueprints) == 1 @@ -68,7 +68,7 @@ def test_load_concepts_only_skips_pipes(self, load_empty_library: Callable[[], s mthds_path = Path(tmp_dir) / "test.mthds" mthds_path.write_text(mthds_content, encoding="utf-8") - result = load_concepts_only(plx_file_path=mthds_path) + result = load_concepts_only(mthds_file_path=mthds_path) # Concepts should be loaded assert len(result.concepts) == 1 @@ -144,7 +144,7 @@ def test_load_concepts_only_with_concept_references(self, load_empty_library: Ca mthds_path = Path(tmp_dir) / "test.mthds" mthds_path.write_text(mthds_content, encoding="utf-8") - result = load_concepts_only(plx_file_path=mthds_path) + result = load_concepts_only(mthds_file_path=mthds_path) assert len(result.concepts) == 2 @@ -183,7 +183,7 @@ def test_load_concepts_only_detects_cycles(self, load_empty_library: Callable[[] mthds_path.write_text(mthds_content, encoding="utf-8") with pytest.raises(Exception, match=r"[Cc]ycle"): - load_concepts_only(plx_file_path=mthds_path) + load_concepts_only(mthds_file_path=mthds_path) def test_load_concepts_only_with_library_dirs(self, load_empty_library: Callable[[], str]): """Test loading concepts with library dependencies.""" @@ -220,7 +220,7 @@ def test_load_concepts_only_with_library_dirs(self, load_empty_library: Callable main_mthds_path.write_text(main_mthds, encoding="utf-8") result = load_concepts_only( - plx_file_path=main_mthds_path, + mthds_file_path=main_mthds_path, library_dirs=[Path(lib_dir)], ) @@ -252,7 +252,7 @@ def test_load_concepts_only_with_mthds_content(self, load_empty_library: Callabl name = { type = "text", description = "Item name" } """ - result = load_concepts_only(plx_content=mthds_content) + result = load_concepts_only(mthds_content=mthds_content) assert len(result.blueprints) == 1 assert len(result.concepts) == 1 @@ -280,7 +280,7 @@ def test_load_concepts_only_with_refines(self, load_empty_library: Callable[[], mthds_path = Path(tmp_dir) / "test.mthds" mthds_path.write_text(mthds_content, encoding="utf-8") - result = load_concepts_only(plx_file_path=mthds_path) + result = load_concepts_only(mthds_file_path=mthds_path) assert len(result.concepts) == 2 diff --git a/tests/integration/pipelex/pipes/controller/pipe_batch/test_pipe_batch_simple.py b/tests/integration/pipelex/pipes/controller/pipe_batch/test_pipe_batch_simple.py index ae9a7ea83..ee089f9c7 100644 --- a/tests/integration/pipelex/pipes/controller/pipe_batch/test_pipe_batch_simple.py +++ b/tests/integration/pipelex/pipes/controller/pipe_batch/test_pipe_batch_simple.py @@ -50,7 +50,7 @@ async def test_simple_batch_processing( pipe_batch_blueprint = PipeBatchBlueprint( description="Simple batch processing test", - branch_pipe_code="uppercase_transformer", # This exists in the PLX file + branch_pipe_code="uppercase_transformer", # This exists in the MTHDS file inputs={ "text_list": concept_1.concept_ref, }, diff --git a/tests/integration/pipelex/pipes/controller/pipe_condition/test_pipe_condition_simple.py b/tests/integration/pipelex/pipes/controller/pipe_condition/test_pipe_condition_simple.py index 601a42655..5b50cf027 100644 --- a/tests/integration/pipelex/pipes/controller/pipe_condition/test_pipe_condition_simple.py +++ b/tests/integration/pipelex/pipes/controller/pipe_condition/test_pipe_condition_simple.py @@ -108,7 +108,7 @@ async def test_condition_short_text_processing( ): """Test PipeCondition with short text that should trigger add_prefix_short_text pipe.""" load_test_library([Path("tests/integration/pipelex/pipes/controller/pipe_condition")]) - # Create PipeCondition instance - pipes are loaded from PLX files + # Create PipeCondition instance - pipes are loaded from MTHDS files pipe_condition_blueprint = PipeConditionBlueprint( description="Text length condition for short text testing", inputs={"input_text": f"{SpecialDomain.NATIVE}.{NativeConceptCode.TEXT}"}, diff --git a/tests/integration/pipelex/pipes/controller/pipe_parallel/test_pipe_parallel_simple.py b/tests/integration/pipelex/pipes/controller/pipe_parallel/test_pipe_parallel_simple.py index 5d572009c..2b9a9bbdf 100644 --- a/tests/integration/pipelex/pipes/controller/pipe_parallel/test_pipe_parallel_simple.py +++ b/tests/integration/pipelex/pipes/controller/pipe_parallel/test_pipe_parallel_simple.py @@ -27,7 +27,7 @@ async def test_parallel_text_analysis( ): """Test PipeParallel running three text analysis pipes in parallel.""" load_test_library([Path("tests/integration/pipelex/pipes/controller/pipe_parallel")]) - # Create PipeParallel instance - pipes are loaded from PLX files + # Create PipeParallel instance - pipes are loaded from MTHDS files pipe_parallel_blueprint = PipeParallelBlueprint( description="Parallel text analysis pipeline", inputs={"input_text": f"{SpecialDomain.NATIVE}.{NativeConceptCode.TEXT}"}, diff --git a/tests/integration/pipelex/pipes/controller/pipe_sequence/test_pipe_sequence_list_output_bug.py b/tests/integration/pipelex/pipes/controller/pipe_sequence/test_pipe_sequence_list_output_bug.py index baa62a205..ffb44e92f 100644 --- a/tests/integration/pipelex/pipes/controller/pipe_sequence/test_pipe_sequence_list_output_bug.py +++ b/tests/integration/pipelex/pipes/controller/pipe_sequence/test_pipe_sequence_list_output_bug.py @@ -91,7 +91,7 @@ async def test_pipe_llm_list_output_produces_list_content_in_sequence(self): # Load the bundle result = await validate_bundle( - plx_file_path=mthds_file, + mthds_file_path=mthds_file, library_dirs=[temp_path], ) @@ -122,7 +122,7 @@ async def test_standalone_pipe_llm_with_list_output(self): # Load the bundle await validate_bundle( - plx_file_path=mthds_file, + mthds_file_path=mthds_file, library_dirs=[temp_path], ) @@ -257,7 +257,7 @@ async def test_nested_sequence_with_list_output_and_batch_over(self): # Load the bundle result = await validate_bundle( - plx_file_path=mthds_file, + mthds_file_path=mthds_file, library_dirs=[temp_path], ) @@ -284,7 +284,7 @@ async def test_inner_sequence_directly(self): # Load the bundle await validate_bundle( - plx_file_path=mthds_file, + mthds_file_path=mthds_file, library_dirs=[temp_path], ) diff --git a/tests/integration/pipelex/pipes/controller/pipe_sequence/test_pipe_sequence_simple.py b/tests/integration/pipelex/pipes/controller/pipe_sequence/test_pipe_sequence_simple.py index fce24895b..0f59065bb 100644 --- a/tests/integration/pipelex/pipes/controller/pipe_sequence/test_pipe_sequence_simple.py +++ b/tests/integration/pipelex/pipes/controller/pipe_sequence/test_pipe_sequence_simple.py @@ -41,7 +41,7 @@ async def test_simple_sequence_processing( concept_library.add_concepts([concept_1]) concept_2 = get_native_concept(native_concept=NativeConceptCode.TEXT) - # Create PipeSequence instance - pipes are loaded from PLX files + # Create PipeSequence instance - pipes are loaded from MTHDS files pipe_sequence_blueprint = PipeSequenceBlueprint( description="Simple sequence for text processing", inputs={"input_text": concept_1.concept_ref}, diff --git a/tests/integration/pipelex/pipes/llm_prompt_inputs/test_image_inputs_inference.py b/tests/integration/pipelex/pipes/llm_prompt_inputs/test_image_inputs_inference.py index 7954d17d1..b633b1b48 100644 --- a/tests/integration/pipelex/pipes/llm_prompt_inputs/test_image_inputs_inference.py +++ b/tests/integration/pipelex/pipes/llm_prompt_inputs/test_image_inputs_inference.py @@ -147,7 +147,7 @@ async def test_analyze_image_collection( assert pipe_output.main_stuff is not None if pipe_run_mode.is_live: - # Verify that the output is the Analysis concept from the PLX file + # Verify that the output is the Analysis concept from the MTHDS file assert pipe_output.main_stuff.concept.code == "Analysis" async def test_compare_two_image_collections( @@ -198,7 +198,7 @@ async def test_compare_two_image_collections( assert pipe_output.main_stuff is not None if pipe_run_mode.is_live: - # Verify that the output is the Analysis concept from the PLX file + # Verify that the output is the Analysis concept from the MTHDS file assert pipe_output.main_stuff.concept.code == "Analysis" @pytest.mark.parametrize(("_topic", "data_url"), ImageTestCases.DATA_URL_VISION_TEST_CASES) diff --git a/tests/integration/pipelex/pipes/operator/pipe_compose_structured/test_pipe_compose_structured.py b/tests/integration/pipelex/pipes/operator/pipe_compose_structured/test_pipe_compose_structured.py index 43ee0c3cf..2e84c0bce 100644 --- a/tests/integration/pipelex/pipes/operator/pipe_compose_structured/test_pipe_compose_structured.py +++ b/tests/integration/pipelex/pipes/operator/pipe_compose_structured/test_pipe_compose_structured.py @@ -1,7 +1,7 @@ """Integration tests for PipeCompose with construct (StructuredContent output). These tests verify that PipeCompose can produce StructuredContent objects -using the construct blueprint syntax in PLX files. +using the construct blueprint syntax in MTHDS files. """ from pathlib import Path diff --git a/tests/integration/pipelex/pipes/operator/pipe_func/test_pipe_func_validation_errors.py b/tests/integration/pipelex/pipes/operator/pipe_func/test_pipe_func_validation_errors.py index bc30af6d9..5c73034f7 100644 --- a/tests/integration/pipelex/pipes/operator/pipe_func/test_pipe_func_validation_errors.py +++ b/tests/integration/pipelex/pipes/operator/pipe_func/test_pipe_func_validation_errors.py @@ -207,7 +207,7 @@ async def test_pipe_func_missing_return_type_reports_clear_error(self): # Currently raises LibraryError, but ValidateBundleError is also acceptable with pytest.raises((ValidateBundleError, LibraryError)) as exc_info: await validate_bundle( - plx_file_path=mthds_file, + mthds_file_path=mthds_file, library_dirs=[temp_path], ) @@ -252,7 +252,7 @@ async def test_pipe_func_with_return_type_validates_successfully(self): # Validate the bundle - should succeed result = await validate_bundle( - plx_file_path=mthds_file, + mthds_file_path=mthds_file, library_dirs=[temp_path], ) @@ -284,7 +284,7 @@ async def test_pipe_func_decorated_but_ineligible_not_silently_ignored(self): # Try to validate - should fail with informative error with pytest.raises((ValidateBundleError, LibraryError)) as exc_info: await validate_bundle( - plx_file_path=mthds_file, + mthds_file_path=mthds_file, library_dirs=[temp_path], ) @@ -339,7 +339,7 @@ async def test_ineligible_function_returns_correct_error( # Validate the bundle - should fail with a specific error message with pytest.raises((ValidateBundleError, LibraryError)) as exc_info: await validate_bundle( - plx_file_path=mthds_file, + mthds_file_path=mthds_file, library_dirs=[temp_path], ) @@ -405,7 +405,7 @@ async def func_wrong_structure_class(working_memory: WorkingMemory) -> MyStructu # Validate the bundle - should fail because return type doesn't match concept's structure class with pytest.raises((ValidateBundleError, LibraryError, TypeError)) as exc_info: await validate_bundle( - plx_file_path=mthds_file, + mthds_file_path=mthds_file, library_dirs=[temp_path], ) @@ -462,7 +462,7 @@ async def func_returns_list_content(working_memory: WorkingMemory) -> ListConten # Validate the bundle - should succeed result = await validate_bundle( - plx_file_path=mthds_file, + mthds_file_path=mthds_file, library_dirs=[temp_path], ) @@ -515,7 +515,7 @@ async def func_returns_wrong_list_content(working_memory: WorkingMemory) -> List # Validate the bundle - should fail with clear error about item type mismatch with pytest.raises((ValidateBundleError, LibraryError, TypeError)) as exc_info: await validate_bundle( - plx_file_path=mthds_file, + mthds_file_path=mthds_file, library_dirs=[temp_path], ) @@ -573,7 +573,7 @@ async def func_returns_single_instead_of_list(working_memory: WorkingMemory) -> # Validate the bundle - should fail because return type is not ListContent with pytest.raises((ValidateBundleError, LibraryError, TypeError)) as exc_info: await validate_bundle( - plx_file_path=mthds_file, + mthds_file_path=mthds_file, library_dirs=[temp_path], ) diff --git a/tests/unit/pipelex/cli/test_agent_graph_cmd.py b/tests/unit/pipelex/cli/test_agent_graph_cmd.py index e864f669f..ea53943be 100644 --- a/tests/unit/pipelex/cli/test_agent_graph_cmd.py +++ b/tests/unit/pipelex/cli/test_agent_graph_cmd.py @@ -14,7 +14,7 @@ from pytest_mock import MockerFixture from pipelex.cli.agent_cli.commands.graph_cmd import GraphFormat, graph_cmd -from pipelex.core.interpreter.exceptions import PLXDecodeError +from pipelex.core.interpreter.exceptions import MthdsDecodeError GRAPH_CMD_MODULE = "pipelex.cli.agent_cli.commands.graph_cmd" @@ -245,13 +245,13 @@ def test_mthds_parse_error_produces_error( capsys: pytest.CaptureFixture[str], tmp_path: Path, ) -> None: - """MTHDS parse error should produce a PLXDecodeError.""" + """MTHDS parse error should produce a MthdsDecodeError.""" mthds_file = tmp_path / "bundle.mthds" mthds_file.write_text("invalid toml {{{{") mocker.patch( f"{GRAPH_CMD_MODULE}.PipelexInterpreter.make_pipelex_bundle_blueprint", - side_effect=PLXDecodeError(message="bad toml", doc="invalid toml {{{{", pos=0, lineno=1, colno=1), + side_effect=MthdsDecodeError(message="bad toml", doc="invalid toml {{{{", pos=0, lineno=1, colno=1), ) with pytest.raises(typer.Exit) as exc_info: @@ -260,4 +260,4 @@ def test_mthds_parse_error_produces_error( assert exc_info.value.exit_code == 1 parsed = json.loads(capsys.readouterr().err) assert parsed["error"] is True - assert parsed["error_type"] == "PLXDecodeError" + assert parsed["error_type"] == "MthdsDecodeError" diff --git a/tests/unit/pipelex/core/concepts/structure_generation/test_structure_generator.py b/tests/unit/pipelex/core/concepts/structure_generation/test_structure_generator.py index f2ce7b608..180c2082c 100644 --- a/tests/unit/pipelex/core/concepts/structure_generation/test_structure_generator.py +++ b/tests/unit/pipelex/core/concepts/structure_generation/test_structure_generator.py @@ -29,7 +29,7 @@ def test_simple_structure_generation(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -85,7 +85,7 @@ def test_complex_types_generation(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -127,7 +127,7 @@ def test_choices_generation(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -181,7 +181,7 @@ def test_typed_choices_generation(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -217,7 +217,7 @@ def test_empty_structure(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -253,7 +253,7 @@ def test_concept_get_structure_method(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -290,7 +290,7 @@ def test_generate_from_blueprint_dict_function(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -342,7 +342,7 @@ def test_all_field_types(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -384,7 +384,7 @@ def test_required_vs_optional_fields(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -438,7 +438,7 @@ def test_default_values(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -493,7 +493,7 @@ def test_nested_list_types(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -551,7 +551,7 @@ def test_nested_dict_types(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -615,7 +615,7 @@ def test_mixed_complexity_structure(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -665,7 +665,7 @@ def test_mixed_structure_blueprint_normalization(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -711,7 +711,7 @@ def test_code_validation_success(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -788,7 +788,7 @@ def test_inheritance_from_text_content(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -850,7 +850,7 @@ def test_inheritance_from_image_content(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -904,7 +904,7 @@ def test_inheritance_from_number_content(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -957,7 +957,7 @@ def test_inheritance_from_json_content(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -1004,7 +1004,7 @@ def test_inheritance_with_empty_structure(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -1060,7 +1060,7 @@ def test_inheritance_from_document_content(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered diff --git a/tests/unit/pipelex/core/concepts/structure_generation/test_structure_generator_concept_refs.py b/tests/unit/pipelex/core/concepts/structure_generation/test_structure_generator_concept_refs.py index a8c2c26cb..fbbef723d 100644 --- a/tests/unit/pipelex/core/concepts/structure_generation/test_structure_generator_concept_refs.py +++ b/tests/unit/pipelex/core/concepts/structure_generation/test_structure_generator_concept_refs.py @@ -14,7 +14,7 @@ If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered diff --git a/tests/unit/pipelex/core/concepts/structure_generation/test_structure_generator_escaping.py b/tests/unit/pipelex/core/concepts/structure_generation/test_structure_generator_escaping.py index b0d1565a3..b7ca21a7e 100644 --- a/tests/unit/pipelex/core/concepts/structure_generation/test_structure_generator_escaping.py +++ b/tests/unit/pipelex/core/concepts/structure_generation/test_structure_generator_escaping.py @@ -39,7 +39,7 @@ def test_escape_double_quotes_in_description(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -84,7 +84,7 @@ def test_escape_single_quotes_in_description(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -129,7 +129,7 @@ def test_escape_mixed_quotes_in_description(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -174,7 +174,7 @@ def test_escape_backslashes_in_description(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -219,7 +219,7 @@ def test_escape_newlines_in_description(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -264,7 +264,7 @@ def test_escape_tabs_in_description(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -310,7 +310,7 @@ def test_escape_multiple_special_characters_combined(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -356,7 +356,7 @@ def test_escape_default_value_with_quotes(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -404,7 +404,7 @@ def test_escape_default_value_with_backslashes(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -449,7 +449,7 @@ def test_empty_string_description(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -500,7 +500,7 @@ def test_very_long_description_with_quotes(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -545,7 +545,7 @@ def test_unicode_characters_in_description(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -590,7 +590,7 @@ def test_carriage_return_in_description(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered @@ -649,7 +649,7 @@ def test_multiple_fields_with_various_escaping_needs(self): If you want to customize this structure: 1. Copy this file to your own module - 2. Remove the 'structure' or 'refines' declaration from the concept in the PLX file + 2. Remove the 'structure' or 'refines' declaration from the concept in the MTHDS file and declare it in inline mode (see https://docs.pipelex.com/home/6-build-reliable-ai-workflows/concepts/define_your_concepts/#basic-concept-definition) 3. Make sure your custom class is importable and registered diff --git a/tests/unit/pipelex/core/interpreter/test_interpreter.py b/tests/unit/pipelex/core/interpreter/test_interpreter.py index f5dde26ec..4297537b1 100644 --- a/tests/unit/pipelex/core/interpreter/test_interpreter.py +++ b/tests/unit/pipelex/core/interpreter/test_interpreter.py @@ -10,7 +10,7 @@ class TestPipelexInterpreter: @pytest.mark.parametrize(("test_name", "mthds_content", "expected_blueprint"), InterpreterTestCases.VALID_TEST_CASES) def test_make_pipelex_bundle_blueprint(self, test_name: str, mthds_content: str, expected_blueprint: PipelexBundleBlueprint): """Test making blueprint from various valid MTHDS content.""" - blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(plx_content=mthds_content) + blueprint = PipelexInterpreter.make_pipelex_bundle_blueprint(mthds_content=mthds_content) pretty_print(blueprint, title=f"Blueprint {test_name}") pretty_print(expected_blueprint, title=f"Expected blueprint {test_name}") @@ -21,4 +21,4 @@ def test_invalid_mthds_should_raise_exception(self, test_name: str, invalid_mthd """Test that invalid MTHDS content raises appropriate exceptions.""" log.verbose(f"Testing invalid MTHDS content: {test_name}") with pytest.raises(expected_exception): - PipelexInterpreter.make_pipelex_bundle_blueprint(plx_content=invalid_mthds_content) + PipelexInterpreter.make_pipelex_bundle_blueprint(mthds_content=invalid_mthds_content) diff --git a/tests/unit/pipelex/core/test_data/errors/invalid_plx.py b/tests/unit/pipelex/core/test_data/errors/invalid_mthds.py similarity index 94% rename from tests/unit/pipelex/core/test_data/errors/invalid_plx.py rename to tests/unit/pipelex/core/test_data/errors/invalid_mthds.py index 5017ac2ec..ea5f67d10 100644 --- a/tests/unit/pipelex/core/test_data/errors/invalid_plx.py +++ b/tests/unit/pipelex/core/test_data/errors/invalid_mthds.py @@ -1,4 +1,4 @@ -from pipelex.core.interpreter.interpreter import PipelexInterpreterError, PLXDecodeError +from pipelex.core.interpreter.interpreter import MthdsDecodeError, PipelexInterpreterError INVALID_MTHDS_SYNTAX = ( "invalid_mthds_syntax", @@ -7,7 +7,7 @@ [concept] InvalidConcept = "This is missing a closing quote""", - PLXDecodeError, + MthdsDecodeError, ) MALFORMED_SECTION = ( @@ -18,7 +18,7 @@ [concept TestConcept = "Missing closing bracket" """, - PLXDecodeError, + MthdsDecodeError, ) UNCLOSED_STRING = ( @@ -26,7 +26,7 @@ """domain = "test_domain" description = "Domain with unclosed string """, - PLXDecodeError, + MthdsDecodeError, ) DUPLICATE_KEYS = ( @@ -38,7 +38,7 @@ [concept] TestConcept = "A test concept" """, - PLXDecodeError, + MthdsDecodeError, ) INVALID_ESCAPE_SEQUENCE = ( @@ -49,7 +49,7 @@ [concept] TestConcept = "A test concept" """, - PLXDecodeError, + MthdsDecodeError, ) # PipelexBundleBlueprint Structure Errors @@ -174,7 +174,7 @@ [concept.] InvalidName = "Empty table name" """, - PLXDecodeError, + MthdsDecodeError, ) INVALID_ARRAY_SYNTAX = ( @@ -185,7 +185,7 @@ [concept] TestConcept = ["Unclosed array" """, - PLXDecodeError, + MthdsDecodeError, ) INVALID_ARRAY_SYNTAX2 = ( "invalid_array_syntax", @@ -195,7 +195,7 @@ [concept] [concept] """, - PLXDecodeError, + MthdsDecodeError, ) # Export all error test cases diff --git a/tests/unit/pipelex/core/test_data/interpreter_test_cases.py b/tests/unit/pipelex/core/test_data/interpreter_test_cases.py index db5ca3dae..f69f5c37c 100644 --- a/tests/unit/pipelex/core/test_data/interpreter_test_cases.py +++ b/tests/unit/pipelex/core/test_data/interpreter_test_cases.py @@ -6,7 +6,7 @@ from tests.unit.pipelex.core.test_data.concepts.simple_concepts import SIMPLE_CONCEPT_TEST_CASES from tests.unit.pipelex.core.test_data.concepts.structured_concepts import STRUCTURED_CONCEPT_TEST_CASES from tests.unit.pipelex.core.test_data.domain.simple_domains import DOMAIN_TEST_CASES -from tests.unit.pipelex.core.test_data.errors.invalid_plx import ERROR_TEST_CASES +from tests.unit.pipelex.core.test_data.errors.invalid_mthds import ERROR_TEST_CASES from tests.unit.pipelex.core.test_data.pipes.controllers.batch.pipe_batch import PIPE_BATCH_TEST_CASES from tests.unit.pipelex.core.test_data.pipes.controllers.condition.pipe_condition import PIPE_CONDITION_TEST_CASES from tests.unit.pipelex.core.test_data.pipes.controllers.parallel.pipe_parallel import PIPE_PARALLEL_TEST_CASES diff --git a/tests/unit/pipelex/pipe_operators/pipe_compose/test_construct_blueprint.py b/tests/unit/pipelex/pipe_operators/pipe_compose/test_construct_blueprint.py index e61cf87fd..239897614 100644 --- a/tests/unit/pipelex/pipe_operators/pipe_compose/test_construct_blueprint.py +++ b/tests/unit/pipelex/pipe_operators/pipe_compose/test_construct_blueprint.py @@ -1,6 +1,6 @@ """Unit tests for ConstructBlueprint - the container for field blueprints. -ConstructBlueprint is parsed from the `[pipe.name.construct]` section in PLX files. +ConstructBlueprint is parsed from the `[pipe.name.construct]` section in MTHDS files. """ from typing import Any, ClassVar diff --git a/tests/unit/pipelex/tools/test_jinja2_required_variables.py b/tests/unit/pipelex/tools/test_jinja2_required_variables.py index 95f81b774..54b7c87eb 100644 --- a/tests/unit/pipelex/tools/test_jinja2_required_variables.py +++ b/tests/unit/pipelex/tools/test_jinja2_required_variables.py @@ -181,19 +181,19 @@ class TestData: ), ] - PLX_STYLE_TEMPLATES: ClassVar[list[tuple[str, str, set[str]]]] = [ + MTHDS_STYLE_TEMPLATES: ClassVar[list[tuple[str, str, set[str]]]] = [ ( - "plx_at_variable_preprocessed", + "mthds_at_variable_preprocessed", '{{ page.page_view|tag("page.page_view") }}', {"page.page_view"}, ), ( - "plx_dollar_variable_preprocessed", + "mthds_dollar_variable_preprocessed", "{{ page.text_and_images.text.text|format() }}", {"page.text_and_images.text.text"}, ), ( - "plx_mixed_preprocessed", + "mthds_mixed_preprocessed", '{{ page.page_view|tag("page.page_view") }}\n{{ page.text_and_images.text.text|format() }}', {"page.page_view", "page.text_and_images.text.text"}, ), @@ -343,15 +343,15 @@ def test_optional_variables( @pytest.mark.parametrize( ("topic", "template_source", "expected_paths"), - TestData.PLX_STYLE_TEMPLATES, + TestData.MTHDS_STYLE_TEMPLATES, ) - def test_plx_style_templates( + def test_mthds_style_templates( self, topic: str, template_source: str, expected_paths: set[str], ): - """Test detection in PLX-style preprocessed templates with tag/format filters.""" + """Test detection in MTHDS-style preprocessed templates with tag/format filters.""" result = detect_jinja2_required_variables( template_category=TemplateCategory.LLM_PROMPT, template_source=template_source, @@ -647,7 +647,7 @@ def test_same_variable_multiple_times_combines_filters(self) -> None: assert "upper" in result[0].filters def test_format_filter_detected(self) -> None: - """Test that format filter (common in PLX templates) is detected.""" + """Test that format filter (common in MTHDS templates) is detected.""" result = detect_jinja2_variable_references( template_category=TemplateCategory.LLM_PROMPT, template_source="{{ content|format() }}", From ada1bb2a86ec0343f1e3724d090f591a807bd6bd Mon Sep 17 00:00:00 2001 From: Louis Choquel Date: Wed, 11 Feb 2026 19:23:59 +0100 Subject: [PATCH 3/5] Replace "workflow" with "method" in Pipelex-specific contexts Update remaining instances where "workflow" referred to Pipelex executable methods in docstrings, error messages, and project docs. Generic programming usage of "workflow" is left unchanged. Co-Authored-By: Claude Opus 4.6 --- CLAUDE.md | 2 +- pipelex/builder/bundle_spec.py | 2 +- pipelex/builder/pipe/pipe_sequence_spec.py | 2 +- pipelex/cogt/models/model_deck.py | 4 ++-- pipelex/kit/agent_rules/pytest_standards.md | 2 +- pipelex/system/telemetry/otel_constants.py | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/CLAUDE.md b/CLAUDE.md index 2603f071f..5642d5262 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -249,7 +249,7 @@ NEVER EVER put more than one TestClass into a test module. - Place test files in the appropriate test category directory: - `tests/unit/` - for unit tests that test individual functions/classes in isolation - `tests/integration/` - for integration tests that test component interactions - - `tests/e2e/` - for end-to-end tests that test complete workflows + - `tests/e2e/` - for end-to-end tests that test complete methods - Do NOT add `__init__.py` files to test directories. Test directories do not need to be Python packages. - Fixtures are defined in conftest.py modules at different levels of the hierarchy, their scope is handled by pytest - Test data is placed inside test_data.py at different levels of the hierarchy, they must be imported with package paths from the root like `from tests.integration.pipelex.cogt.test_data`. Their content is all constants, regrouped inside classes to keep things tidy. diff --git a/pipelex/builder/bundle_spec.py b/pipelex/builder/bundle_spec.py index d3103e838..4dc565355 100644 --- a/pipelex/builder/bundle_spec.py +++ b/pipelex/builder/bundle_spec.py @@ -22,7 +22,7 @@ class PipelexBundleSpec(StructuredContent): Represents the top-level structure of a Pipelex bundle, which defines a domain with its concepts, pipes, and configuration. Bundles are the primary unit of - organization for Pipelex workflows, loaded from TOML files. + organization for Pipelex methods, loaded from TOML files. Attributes: domain: The domain identifier for this bundle in snake_case format. diff --git a/pipelex/builder/pipe/pipe_sequence_spec.py b/pipelex/builder/pipe/pipe_sequence_spec.py index a75187be1..7bf19d980 100644 --- a/pipelex/builder/pipe/pipe_sequence_spec.py +++ b/pipelex/builder/pipe/pipe_sequence_spec.py @@ -16,7 +16,7 @@ class PipeSequenceSpec(PipeSpec): """PipeSequenceSpec orchestrates the execution of multiple pipes in a defined order, where each pipe's output can be used as input for subsequent pipes. This enables - building complex data processing workflows with step-by-step transformations. + building powerful methods with step-by-step transformations. """ type: SkipJsonSchema[Literal["PipeSequence"]] = "PipeSequence" diff --git a/pipelex/cogt/models/model_deck.py b/pipelex/cogt/models/model_deck.py index 6823efe75..dcc70550f 100644 --- a/pipelex/cogt/models/model_deck.py +++ b/pipelex/cogt/models/model_deck.py @@ -629,8 +629,8 @@ def _resolve_waterfall( msg = ( f"Inference model fallback: '{ideal_model_handle}' was not found in the model deck, " f"so it was replaced by '{fallback}'. " - f"As a consequence, the results of the workflow may not have the expected quality, " - f"and the workflow might fail due to feature limitations such as context window size, etc. " + f"As a consequence, the results of the method may not have the expected quality, " + f"and the method might fail due to feature limitations such as context window size, etc. " f"Consider getting access to '{ideal_model_handle}'." ) enabled_backends = self._get_enabled_backends() diff --git a/pipelex/kit/agent_rules/pytest_standards.md b/pipelex/kit/agent_rules/pytest_standards.md index a37ff9f44..e7c6a1b41 100644 --- a/pipelex/kit/agent_rules/pytest_standards.md +++ b/pipelex/kit/agent_rules/pytest_standards.md @@ -11,7 +11,7 @@ NEVER EVER put more than one TestClass into a test module. - Place test files in the appropriate test category directory: - `tests/unit/` - for unit tests that test individual functions/classes in isolation - `tests/integration/` - for integration tests that test component interactions - - `tests/e2e/` - for end-to-end tests that test complete workflows + - `tests/e2e/` - for end-to-end tests that test complete methods - Do NOT add `__init__.py` files to test directories. Test directories do not need to be Python packages. - Fixtures are defined in conftest.py modules at different levels of the hierarchy, their scope is handled by pytest - Test data is placed inside test_data.py at different levels of the hierarchy, they must be imported with package paths from the root like `from tests.integration.pipelex.cogt.test_data`. Their content is all constants, regrouped inside classes to keep things tidy. diff --git a/pipelex/system/telemetry/otel_constants.py b/pipelex/system/telemetry/otel_constants.py index 4e934e16e..69c314719 100644 --- a/pipelex/system/telemetry/otel_constants.py +++ b/pipelex/system/telemetry/otel_constants.py @@ -108,7 +108,7 @@ def make_otel_gen_ai_output_type(output_type: str) -> otel_gen_ai_attributes.Gen class PipelexSpanAttr(StrEnum): - """Pipelex-specific span attribute keys for workflow tracing.""" + """Pipelex-specific span attribute keys for method tracing.""" TRACE_NAME = "pipelex.trace.name" TRACE_NAME_REDACTED = "pipelex.trace.name.redacted" From 82062f471b079bec0a1d5ef60415e2825cff7b80 Mon Sep 17 00:00:00 2001 From: Louis Choquel Date: Wed, 11 Feb 2026 20:14:45 +0100 Subject: [PATCH 4/5] Rename "pipeline" to "method" in mkdocs nav and README link text Co-Authored-By: Claude Opus 4.6 --- README.md | 4 ++-- mkdocs.yml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index c72ad64d4..1994db69b 100644 --- a/README.md +++ b/README.md @@ -331,8 +331,8 @@ Each pipe processes information using **Concepts** (typing with meaning) to ensu **Learn More:** -- [Design and Run Pipelines](https://docs.pipelex.com/pre-release/home/6-build-reliable-ai-workflows/pipes/) - Complete guide with examples -- [Kick off a Pipeline Project](https://docs.pipelex.com/pre-release/home/6-build-reliable-ai-workflows/kick-off-a-methods-project/) - Deep dive into Pipelex +- [Design and Run Methods](https://docs.pipelex.com/pre-release/home/6-build-reliable-ai-workflows/pipes/) - Complete guide with examples +- [Kick off a Method Project](https://docs.pipelex.com/pre-release/home/6-build-reliable-ai-workflows/kick-off-a-methods-project/) - Deep dive into Pipelex - [Configure AI Providers](https://docs.pipelex.com/pre-release/home/5-setup/configure-ai-providers/) - Set up AI providers and models ## 🔧 IDE Extension diff --git a/mkdocs.yml b/mkdocs.yml index c1b33f9b8..c9f38de90 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -131,7 +131,7 @@ nav: - Python classes: home/6-build-reliable-ai-workflows/concepts/python-classes.md - Native Concepts: home/6-build-reliable-ai-workflows/concepts/native-concepts.md - Refining Concepts: home/6-build-reliable-ai-workflows/concepts/refining-concepts.md - - Design and Run Pipelines: + - Design and Run Methods: - Overview: home/6-build-reliable-ai-workflows/pipes/index.md - Libraries: home/6-build-reliable-ai-workflows/libraries.md - Executing Pipelines: home/6-build-reliable-ai-workflows/pipes/executing-pipelines.md From 2a6779e356607160a81d520e8ce51d826d7784ee Mon Sep 17 00:00:00 2001 From: Louis Choquel Date: Thu, 12 Feb 2026 17:32:11 +0100 Subject: [PATCH 5/5] Rename parallel graph test bundles from .plx to .mthds extension Co-Authored-By: Claude Opus 4.6 --- .../{parallel_graph_3branch.plx => parallel_graph_3branch.mthds} | 0 ...{parallel_graph_add_each.plx => parallel_graph_add_each.mthds} | 0 ...{parallel_graph_combined.plx => parallel_graph_combined.mthds} | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/{parallel_graph_3branch.plx => parallel_graph_3branch.mthds} (100%) rename tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/{parallel_graph_add_each.plx => parallel_graph_add_each.mthds} (100%) rename tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/{parallel_graph_combined.plx => parallel_graph_combined.mthds} (100%) diff --git a/tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/parallel_graph_3branch.plx b/tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/parallel_graph_3branch.mthds similarity index 100% rename from tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/parallel_graph_3branch.plx rename to tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/parallel_graph_3branch.mthds diff --git a/tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/parallel_graph_add_each.plx b/tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/parallel_graph_add_each.mthds similarity index 100% rename from tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/parallel_graph_add_each.plx rename to tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/parallel_graph_add_each.mthds diff --git a/tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/parallel_graph_combined.plx b/tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/parallel_graph_combined.mthds similarity index 100% rename from tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/parallel_graph_combined.plx rename to tests/e2e/pipelex/pipes/pipe_controller/pipe_parallel/parallel_graph_combined.mthds