|
60 | 60 | from paperqa.docs import Docs |
61 | 61 | from paperqa.prompts import CANNOT_ANSWER_PHRASE, CONTEXT_INNER_PROMPT_NOT_DETAILED |
62 | 62 | from paperqa.settings import AgentSettings, IndexSettings, Settings |
63 | | -from paperqa.types import Context, Doc, PQASession, Text |
| 63 | +from paperqa.types import Context, Doc, DocDetails, PQASession, Text |
64 | 64 | from paperqa.utils import encode_id, extract_thought, get_year, md5sum |
65 | 65 |
|
66 | 66 |
|
@@ -521,15 +521,22 @@ async def test_propagate_options(agent_test_settings: Settings) -> None: |
521 | 521 | agent_test_settings.prompts.context_inner = CONTEXT_INNER_PROMPT_NOT_DETAILED |
522 | 522 | agent_test_settings.answer.evidence_skip_summary = True |
523 | 523 |
|
| 524 | + docs = Docs() |
524 | 525 | response = await agent_query( |
525 | 526 | query="What is a self-explanatory model?", |
526 | 527 | settings=agent_test_settings, |
| 528 | + docs=docs, |
527 | 529 | agent_type=FAKE_AGENT_TYPE, |
528 | 530 | ) |
529 | 531 | assert response.status == AgentStatus.SUCCESS, "Agent did not succeed" |
530 | 532 | result = response.session |
531 | 533 | assert len(result.answer) > 200, "Answer did not return any results" |
532 | 534 | assert "###" in result.answer, "Answer did not propagate system prompt" |
| 535 | + assert docs.docs, "Expected docs to have been added" |
| 536 | + assert all(isinstance(d, DocDetails) for d in docs.docs.values()) |
| 537 | + assert all( |
| 538 | + d.file_location for d in docs.docs.values() # type: ignore[union-attr] |
| 539 | + ), "Expected file location to be populated" |
533 | 540 | assert len(result.contexts) >= 2, "Test expects a few contexts" |
534 | 541 | # Subtract 2 to allow tolerance for chunks with leading/trailing whitespace |
535 | 542 | num_contexts_sufficient_length = sum( |
|
0 commit comments