diff --git a/README.md b/README.md index ef3cdad..e344788 100644 --- a/README.md +++ b/README.md @@ -746,6 +746,7 @@ Options: - `--output LOCATION`: Specify where to save the generated code. Supports `${VAR}`/`$VAR` expansion from `-e/--env`. The default file name is `.`. If an environment variable `PDD_GENERATE_OUTPUT_PATH` is set, the file will be saved in that path unless overridden by this option. - `--original-prompt FILENAME`: The original prompt file used to generate the existing code. If not specified, the command automatically uses the last committed version of the prompt file from git. - `--incremental`: Force incremental patching even if changes are significant. This option is only valid when an output location is specified and the file exists. +- `--unit-test FILENAME`: Path to a unit test file. If provided, the content of this file is appended to the prompt, instructing the model to generate code that passes the specified tests. **Parameter Variables (-e/--env)**: Pass key=value pairs to parameterize a prompt so one prompt can generate multiple variants (e.g., multiple files) by invoking `generate` repeatedly with different values. diff --git a/pdd/cli.py b/pdd/cli.py index 857d410..b19e796 100644 --- a/pdd/cli.py +++ b/pdd/cli.py @@ -700,6 +700,13 @@ def collect_usage_pieces(self, ctx: click.Context) -> List[str]: default=None, help="Use a packaged/project template by name (e.g., architecture/architecture_json)", ) +@click.option( + "--unit-test", + "unit_test_file", + type=click.Path(exists=True, dir_okay=False), + default=None, + help="Path to a unit test file to include in the prompt.", +) @click.pass_context @track_cost def generate( @@ -710,6 +717,7 @@ def generate( incremental_flag: bool, env_kv: Tuple[str, ...], template_name: Optional[str], + unit_test_file: Optional[str], ) -> Optional[Tuple[str, float, str]]: """ Generate code from a prompt file. @@ -766,6 +774,7 @@ def generate( original_prompt_file_path=original_prompt_file_path, force_incremental_flag=incremental_flag, env_vars=env_vars or None, + unit_test_file=unit_test_file, ) return generated_code, total_cost, model_name except Exception as exception: diff --git a/pdd/code_generator_main.py b/pdd/code_generator_main.py index 364c20c..7993961 100644 --- a/pdd/code_generator_main.py +++ b/pdd/code_generator_main.py @@ -187,6 +187,7 @@ def code_generator_main( original_prompt_file_path: Optional[str], force_incremental_flag: bool, env_vars: Optional[Dict[str, str]] = None, + unit_test_file: Optional[str] = None, ) -> Tuple[str, bool, float, str]: """ CLI wrapper for generating code from prompts. Handles full and incremental generation, @@ -223,6 +224,20 @@ def code_generator_main( prompt_content = body else: prompt_content = raw_prompt_content + + if unit_test_file: + try: + with open(unit_test_file, 'r', encoding='utf-8') as f: + unit_test_content = f.read() + + prompt_content += "\n\n\n" + prompt_content += "The following is the unit test content that the generated code must pass:\n" + prompt_content += "```\n" + prompt_content += unit_test_content + prompt_content += "\n```\n" + prompt_content += "\n" + except Exception as e: + console.print(f"[yellow]Warning: Could not read unit test file {unit_test_file}: {e}[/yellow]") # Determine LLM state early to avoid unnecessary overwrite prompts llm_enabled: bool = True diff --git a/tests/test_code_generator_main.py b/tests/test_code_generator_main.py index 644dba0..914af99 100644 --- a/tests/test_code_generator_main.py +++ b/tests/test_code_generator_main.py @@ -1563,3 +1563,105 @@ def render_side_effect(cmd, *args, **kwargs): expected = json.dumps(unformatted_entries, indent=2) + "\n" actual = output_path.read_text(encoding="utf-8") assert actual == expected + + +def test_full_gen_local_with_unit_test( + mock_ctx, temp_dir_setup, mock_construct_paths_fixture, mock_local_generator_fixture, mock_env_vars +): + mock_ctx.obj['local'] = True + prompt_file_path = temp_dir_setup["prompts_dir"] / "unit_test_prompt.prompt" + prompt_content = "Generate code that passes the test." + create_file(prompt_file_path, prompt_content) + + unit_test_file = temp_dir_setup["tmp_path"] / "test_something.py" + unit_test_content = "def test_hello(): assert True" + create_file(unit_test_file, unit_test_content) + + output_file_path_str = str(temp_dir_setup["output_dir"] / "output_with_test.py") + + mock_construct_paths_fixture.return_value = ( + {}, + {"prompt_file": prompt_content}, + {"output": output_file_path_str}, + "python" + ) + + code_generator_main( + mock_ctx, + str(prompt_file_path), + output_file_path_str, + None, + False, + unit_test_file=str(unit_test_file) + ) + + called_kwargs = mock_local_generator_fixture.call_args.kwargs + called_prompt = called_kwargs["prompt"] + + assert prompt_content in called_prompt + # Unit test content should now be wrapped in tags + assert "" in called_prompt + assert unit_test_content in called_prompt + assert "" in called_prompt + + +def test_full_gen_local_with_unit_test_and_front_matter_conflict( + mock_ctx, temp_dir_setup, mock_construct_paths_fixture, mock_local_generator_fixture, mock_env_vars +): + """ + Ensure that a unit test file starting with '---' does not interfere with + the prompt's front matter parsing, and that injection happens after parsing. + """ + mock_ctx.obj['local'] = True + + # Prompt with front matter + prompt_file_path = temp_dir_setup["prompts_dir"] / "conflict_prompt.prompt" + prompt_body = "This is the main prompt body." + prompt_content = f"""--- +language: json +--- +{prompt_body} +""" + create_file(prompt_file_path, prompt_content) + + # Unit test file that looks like it has front matter + unit_test_file = temp_dir_setup["tmp_path"] / "test_conflict.py" + unit_test_content = """--- +this: looks +like: frontmatter +--- +def test_conflict(): pass +""" + create_file(unit_test_file, unit_test_content) + + output_file_path_str = str(temp_dir_setup["output_dir"] / "conflict_output.json") + + mock_construct_paths_fixture.return_value = ( + {}, + {"prompt_file": prompt_content}, + {"output": output_file_path_str}, + "json" + ) + + code_generator_main( + mock_ctx, + str(prompt_file_path), + output_file_path_str, + None, + False, + unit_test_file=str(unit_test_file) + ) + + called_kwargs = mock_local_generator_fixture.call_args.kwargs + + # Verify metadata from front matter was respected + assert called_kwargs["language"] == "json" + + # Verify prompt content + called_prompt = called_kwargs["prompt"] + assert prompt_body in called_prompt + assert "" in called_prompt + assert unit_test_content in called_prompt + assert "" in called_prompt + # Ensure the prompt's front matter is NOT in the final prompt passed to generator + assert "language: json" not in called_prompt