diff --git a/.cursor/plans/cms-opens-e29ee941.plan.md b/.cursor/plans/cms-opens-e29ee941.plan.md deleted file mode 100644 index f6da6ae..0000000 --- a/.cursor/plans/cms-opens-e29ee941.plan.md +++ /dev/null @@ -1,246 +0,0 @@ - -# Make Dream Minimal - Clean MVC Architecture - -## Goal - -Transform Dream into minimal routing toolkit, extract all utilities to modules, validate clean MVC (no anonymous functions, no nested cases) with CMS example. - -## Phase 1: Extract to Modules - -### Dream Core Keeps - -- `src/dream/router.gleam` -- `src/dream/http/transaction.gleam` -- `src/dream/http/method.gleam` -- `src/dream/context.gleam` -- `src/dream/servers/mist/` - -### Dream Core Deletes - -- `src/dream/http/statuses.gleam` -- `src/dream/core/singleton.gleam` -- `src/dream/validators/` -- `src/dream/services/` -- `src/dream/utilities/` - -### Create 6 Modules - -- `modules/helpers/` → dream_helpers (statuses, validators, response builders) -- `modules/singleton/` → dream_singleton (generic singleton) -- `modules/config/` → dream_config (dotenv loading) -- `modules/postgres/` → dream_postgres (query helpers, singleton) -- `modules/http_client/` → dream_http_client (HTTP client) -- `modules/opensearch/` → dream_opensearch (document store) - -## Phase 2: Update 7 Existing Examples - -All examples add `dream_helpers` and update imports. - -Specific updates: - -- database → add dream_postgres -- streaming → add dream_http_client -- singleton → add dream_singleton - -## Phase 3: Create CMS Example - -### File Structure - -``` -examples/cms/ -├── docker-compose.yml -├── gleam.toml -├── Makefile -├── README.md -├── .env.example -├── priv/migrations/ -├── src/ -│ ├── main.gleam -│ ├── router.gleam -│ ├── context.gleam -│ ├── services.gleam -│ ├── config.gleam -│ ├── types/ -│ │ ├── user.gleam -│ │ ├── post.gleam -│ │ ├── event.gleam -│ │ └── errors.gleam -│ ├── models/ -│ │ ├── user/ -│ │ │ ├── user.gleam -│ │ │ ├── sql.gleam -│ │ │ └── sql/*.sql -│ │ ├── post/ -│ │ │ ├── post.gleam -│ │ │ ├── sql.gleam -│ │ │ └── sql/*.sql -│ │ └── event/ -│ │ └── event.gleam -│ ├── views/ -│ │ ├── user_view.gleam -│ │ ├── post_view.gleam -│ │ └── event_view.gleam -│ ├── controllers/ -│ │ ├── users_controller.gleam -│ │ ├── posts_controller.gleam -│ │ └── events_controller.gleam -│ ├── operations/ -│ │ ├── publish_post.gleam -│ │ ├── export_posts.gleam -│ │ └── enrich_events.gleam -│ └── middleware/ -│ └── logging_middleware.gleam -``` - -### Makefile - -```makefile -migrate-up: - gleam run -m cigogne -- migrate up - -migrate-down: - gleam run -m cigogne -- migrate down - psql postgresql://postgres:postgres@localhost:5435/cms_db -c "ALTER SEQUENCE cigogne_migrations_id_seq RESTART WITH 1;" - -migrate-new: - gleam run -m cigogne -- migrate create $(name) -``` - -### Clean Pattern Examples (No Anonymous Functions, No Nested Cases) - -**operations/enrich_events.gleam:** - -```gleam -pub fn execute(services: Services, limit: Int) -> Result(List(EnrichedEvent), DataError) { - use events <- result.try(event.recent(services.opensearch, limit)) - Ok(enrich_all_events(events, services)) -} - -fn enrich_all_events(events: List(Event), services: Services) -> List(EnrichedEvent) { - list.map(events, enrich_single_event(_, services)) -} - -fn enrich_single_event(evt: Event, services: Services) -> EnrichedEvent { - EnrichedEvent(event: evt, user: load_user_if_present(evt, services)) -} - -fn load_user_if_present(evt: Event, services: Services) -> Option(User) { - case evt.user_id { - option.Some(id) -> user.get(services.db, id) |> result.to_option() - option.None -> option.None - } -} -``` - -**operations/export_posts.gleam:** - -```gleam -pub fn execute(services: Services) -> Result(yielder.Yielder(BitArray), DataError) { - use posts <- result.try(post.list(services.db)) - Ok(create_csv_stream(posts)) -} - -fn create_csv_stream(posts: List(Post)) -> yielder.Yielder(BitArray) { - let header = "id,title,author_id,status\n" - let rows = list.map(posts, post_to_csv) - - yielder.from_list([header, ..rows]) - |> yielder.map(string_to_bits) -} - -fn post_to_csv(post: Post) -> String { - int.to_string(post.id) <> "," <> post.title <> "," - <> int.to_string(post.author_id) <> "," - <> status_to_string(post.status) <> "\n" -} - -fn string_to_bits(s: String) -> BitArray { - <> -} -``` - -**controllers/events_controller.gleam:** - -```gleam -pub fn stream(_request: Request, _context: Context, services: Services) -> Response { - let stream = create_event_stream(services) - sse_response(ok_status(), stream, "text/event-stream") -} - -fn create_event_stream(services: Services) -> yielder.Yielder(BitArray) { - yielder.repeatedly(poll_events(_, services)) - |> yielder.flatten() - |> yielder.map(string_to_bits) -} - -fn poll_events(_: Nil, services: Services) -> List(String) { - process.sleep(1000) - fetch_and_format_events(services) -} - -fn fetch_and_format_events(services: Services) -> List(String) { - case event.recent(services.opensearch, 10) { - Ok(events) -> format_all_events_as_sse(events) - Error(_) -> [] - } -} - -fn format_all_events_as_sse(events: List(Event)) -> List(String) { - list.map(events, format_single_event_as_sse) -} - -fn format_single_event_as_sse(evt: Event) -> String { - "data: " <> event_view.to_json(evt) <> "\n\n" -} - -fn string_to_bits(s: String) -> BitArray { - <> -} -``` - -**views/event_view.gleam:** - -```gleam -pub fn to_json(event: Event) -> String { - to_json_object(event) - |> json.to_string() -} - -pub fn list_to_json(events: List(Event)) -> String { - events - |> list.map(to_json_object) - |> json.array(from: _, of: identity) - |> json.to_string() -} - -fn to_json_object(event: Event) -> json.Json { - json.object([ - #("id", json.string(event.id)), - #("event_type", json.string(event_type_to_string(event.event_type))), - #("method", json.string(event.method)), - #("path", json.string(event.path)), - #("status_code", json.int(event.status_code)), - ]) -} - -fn identity(x) -> x { - x -} -``` - -## Phase 4: Documentation - -**docs/reference/architecture.md:** - -Comprehensive MVC explanation with modules ecosystem. - -**docs/guides/controllers-and-models.md:** - -Updated patterns using dream_postgres, showing clean style. - -## Success Criteria - -1. Dream core minimal (no service code) -2. 6 modules as independent packages -3. All 8 examples work -4. CMS validates architecture with clean code (no anon functions, no nested cases) \ No newline at end of file diff --git a/.cursor/plans/dream-ets-module-replacement-88c3efed.plan.md b/.cursor/plans/dream-ets-module-replacement-88c3efed.plan.md deleted file mode 100644 index 20b26cc..0000000 --- a/.cursor/plans/dream-ets-module-replacement-88c3efed.plan.md +++ /dev/null @@ -1,603 +0,0 @@ - -# Create dream_ets Module - Dream Quality Standards - -## Overview - -Build a comprehensive ETS (Erlang Term Storage) wrapper module `dream_ets` that exemplifies Dream's quality standards. This module will serve as a reference implementation demonstrating how to build high-quality Dream modules. - -## Dream Quality Standards (MANDATORY) - -### Code Quality - -1. **No Anonymous Functions** - All functions must be explicitly named. No `fn()` closures in public API. -2. **No Nested Cases** - Each case statement must be in a separate named function. Complex logic split into helper functions. -3. **Builder Pattern** - Mandatory for all table creation. Consistent with `dream_postgres`, `dream_http_client`. -4. **Type Safety First** - Leverage Gleam's type system fully. Make invalid states unrepresentable. -5. **Explicit Over Implicit** - No magic, no hidden behavior. Everything visible in code. -6. **No Closures** - All dependencies explicit in function signatures. - -### Testing Standards - -1. **100% Test Coverage** - Every public function must have tests. -2. **Black Box Testing** - Test public interfaces only, not implementation details. -3. **AAA Pattern** - Arrange, Act, Assert with blank lines between sections. -4. **Test Naming** - `___test()` -5. **No External Dependencies** - Tests must be isolated, fast, deterministic. -6. **Edge Cases** - Test error paths, boundary conditions, invalid inputs. - -### Documentation Standards - -1. **All Public Functions Documented** - Every public function has doc comments. -2. **Examples Required** - All documentation includes usage examples with imports. -3. **Builder Pattern Examples** - All examples show builder usage prominently. -4. **Clear and Concise** - Documentation explains what, why, and how. - -### Code Organization - -1. **Small, Focused Functions** - Each function does one thing well. -2. **Composability** - Functions work together, can be used independently. -3. **Consistent Naming** - Follow `{verb}_{noun}` pattern, no module prefixes. -4. **Simple Over Clever** - Code should be obvious, not clever. - -## Phase 1: Module Structure Setup - -### 1.1 Create Module Directory Structure - -**Location:** `modules/ets/` - -**Files to create:** - -- `gleam.toml` - Package configuration -- `manifest.toml` - Manifest file (auto-generated) -- `Makefile` - Build/test commands (mirror other modules) -- `README.md` - Comprehensive documentation emphasizing quality standards -- `src/dream_ets/` - Source directory - - `config.gleam` - Builder configuration (MANDATORY builder pattern) - - `table.gleam` - Table type and core operations - - `operations.gleam` - All ETS operations (no nested cases) - - `encoders.gleam` - Built-in encoders/decoders (all named functions) - - `helpers.gleam` - Convenience helpers (use builder internally) - - `internal.gleam` - Internal FFI wrappers (no public API) - - `internal_ffi.erl` - Erlang FFI (pure wrappers, no logic) -- `test/dream_ets_test.gleam` - Test entry point - - `config_test.gleam` - Builder pattern tests (AAA pattern) - - `table_test.gleam` - Table operations tests - - `operations_test.gleam` - Advanced operations tests - - `encoders_test.gleam` - Encoder/decoder tests - - `helpers_test.gleam` - Helper function tests - -### 1.2 Dependencies - -**gleam.toml:** - -```toml -name = "dream_ets" -version = "0.1.0" - -[dependencies] -gleam_stdlib = ">= 0.44.0" -gleam_erlang = ">= 1.0.0" -gleam_json = ">= 2.2.0" -gleam_dynamic = ">= 1.0.0" - -[dev-dependencies] -gleeunit = ">= 1.0.0" -``` - -## Phase 2: Core Implementation (Following Quality Standards) - -### 2.1 FFI Layer (`internal_ffi.erl`) - -**Quality Standard: Pure wrappers, no logic** - -All FFI functions are direct Erlang ETS calls with no business logic: - -- `ets_new/2` - Create table with options -- `ets_insert/2` - Insert objects -- `ets_lookup/2` - Lookup by key -- `ets_delete/2` - Delete table -- `ets_delete_object/2` - Delete specific object -- `ets_delete_all_objects/1` - Clear table -- `ets_first/1` - Get first key -- `ets_next/2` - Get next key -- `ets_match/2` - Pattern matching -- `ets_match_object/2` - Match objects -- `ets_select/2` - Select with match spec -- `ets_tab2file/2` - Save to file -- `ets_file2tab/1` - Load from file -- `ets_info/1` - Get table info -- `ets_update_element/3` - Update element in tuple -- `ets_insert_new/2` - Insert only if new -- `ets_take/2` - Lookup and delete -- `ets_member/2` - Check membership - -**Quality Check:** Each function is a single Erlang call, no conditional logic. - -### 2.2 Builder Configuration (`config.gleam`) - -**Quality Standards:** - -- Builder pattern MANDATORY (no direct table creation) -- All functions explicitly named -- No nested cases -- Type-safe at each step - -**Type:** - -```gleam -pub opaque type TableConfig(k, v) { - TableConfig( - name: String, - table_type: TableType, - access: Access, - keypos: Int, - read_concurrency: Bool, - write_concurrency: Bool, - compressed: Bool, - named_table: Bool, - key_encoder: Option(fn(k) -> Dynamic), - key_decoder: Option(Decoder(k)), - value_encoder: Option(fn(v) -> Dynamic), - value_decoder: Option(Decoder(v)), - ) -} - -pub type TableType { - Set - OrderedSet - Bag - DuplicateBag -} - -pub type Access { - Public - Protected - Private -} -``` - -**Builder Functions (All Named, No Nesting):** - -- `new(name: String) -> TableConfig(k, v)` - Start builder with defaults -- `table_type(config, type_) -> TableConfig` - Set table type -- `access(config, access) -> TableConfig` - Set access mode -- `keypos(config, pos) -> TableConfig` - Set key position -- `read_concurrency(config, enabled) -> TableConfig` - Enable concurrent reads -- `write_concurrency(config, enabled) -> TableConfig` - Enable concurrent writes -- `compressed(config, enabled) -> TableConfig` - Compress table data -- `key(config, encoder, decoder) -> TableConfig` - Set key encoding -- `value(config, encoder, decoder) -> TableConfig` - Set value encoding -- `key_string(config) -> TableConfig(String, v)` - Convenience for string keys -- `value_string(config) -> TableConfig(k, String)` - Convenience for string values -- `value_json(config, to_json, decoder) -> TableConfig` - JSON serialization -- `counter(config) -> TableConfig(String, Int)` - Counter table shortcut -- `create(config) -> Result(Table(k, v), EtsError)` - Create table - -**Quality Check:** Each function is a single flat update, no nested cases or conditionals. - -**Example Usage (All Documentation Must Show This):** - -```gleam -// Simple table with defaults -let assert Ok(table) = ets.new("my_table") - |> ets.create() - -// Configured table -let assert Ok(table) = ets.new("sessions") - |> ets.key_string() - |> ets.value_json(session.to_json, session.decoder()) - |> ets.read_concurrency(True) - |> ets.create() -``` - -### 2.3 Table Type (`table.gleam`) - -**Quality Standards:** - -- Opaque type hiding implementation -- Encapsulates encoding/decoding -- Type-safe operations - -**Type:** - -```gleam -pub opaque type Table(k, v) { - Table( - table_ref: EtsTableRef, - name: String, - key_encoder: fn(k) -> Dynamic, - key_decoder: Decoder(k), - value_encoder: fn(v) -> Dynamic, - value_decoder: Decoder(v), - ) -} - -pub type EtsError { - TableNotFound - TableAlreadyExists - InvalidKey - InvalidValue - DecodeError(dynamic.DecodeError) - EncodeError(String) - OperationFailed(String) -} -``` - -### 2.4 Core Operations (`operations.gleam`) - -**Quality Standards:** - -- No nested cases - each operation is a separate named function -- Encoding/decoding in separate helper functions -- Explicit error handling - -**Pattern for All Operations:** - -1. Encode key/value (separate helper function) -2. Call FFI (separate helper function) -3. Decode result (separate helper function) -4. Return Result - -**Basic Operations:** - -- `set(table, key, value) -> Result(Nil, EtsError)` - Insert/update -- `get(table, key) -> Result(Option(v), EtsError)` - Lookup -- `delete(table, key) -> Result(Nil, EtsError)` - Delete key -- `member(table, key) -> Bool` - Check membership -- `delete_table(table) -> Result(Nil, EtsError)` - Delete entire table -- `size(table) -> Int` - Get table size -- `keys(table) -> List(k)` - Get all keys -- `values(table) -> List(v)` - Get all values -- `to_list(table) -> List(#(k, v))` - Convert to list - -**Advanced Operations:** - -- `insert_new(table, key, value) -> Result(Bool, EtsError)` - Insert only if new -- `take(table, key) -> Result(Option(v), EtsError)` - Lookup and delete -- `update_element(table, key, pos, value) -> Result(Nil, EtsError)` - Update tuple element -- `delete_all_objects(table) -> Result(Nil, EtsError)` - Clear table -- `match(table, pattern) -> List(v)` - Pattern matching -- `select(table, match_spec) -> List(v)` - Select with match spec - -**Quality Check:** Each function calls named helpers for encoding/decoding, no inline logic. - -### 2.5 Encoders (`encoders.gleam`) - -**Quality Standards:** - -- All functions explicitly named -- No anonymous functions -- Each encoder/decoder is a separate function - -**Built-in Encoders/Decoders:** - -- `string_encoder(s: String) -> Dynamic` -- `string_decoder() -> Decoder(String)` -- `int_encoder(i: Int) -> Dynamic` -- `int_decoder() -> Decoder(Int)` -- `bool_encoder(b: Bool) -> Dynamic` -- `bool_decoder() -> Decoder(Bool)` -- `float_encoder(f: Float) -> Dynamic` -- `float_decoder() -> Decoder(Float)` -- `json_encoder(to_json: fn(v) -> json.Json) -> fn(v) -> Dynamic` - Returns named function -- `json_decoder(decoder: Decoder(v)) -> Decoder(v)` - -**Quality Check:** No closures in public API. `json_encoder` returns a function, but it's a named return type. - -### 2.6 Helpers (`helpers.gleam`) - -**Quality Standards:** - -- All convenience functions MUST use builder internally -- No direct table creation bypassing builder -- Explicit function names - -**Convenience Functions:** - -- `new_counter(name: String) -> Result(Table(String, Int), EtsError)` - Uses builder internally -- `new_string_table(name: String) -> Result(Table(String, String), EtsError)` - Uses builder internally - -**Counter Operations:** - -- `increment(table, key) -> Result(Int, EtsError)` - Atomic increment -- `increment_by(table, key, amount) -> Result(Int, EtsError)` - Increment by amount -- `decrement(table, key) -> Result(Int, EtsError)` - Atomic decrement -- `decrement_by(table, key, amount) -> Result(Int, EtsError)` - Decrement by amount - -**Quality Check:** `new_counter()` implementation must show builder usage: - -```gleam -pub fn new_counter(name: String) -> Result(Table(String, Int), EtsError) { - new(name) - |> counter() - |> create() -} -``` - -## Phase 3: Comprehensive Testing (Following Testing Standards) - -### 3.1 Test Structure - -**Testing Standards:** - -- **100% Coverage** - Every public function tested -- **Black Box** - Test public interfaces only -- **AAA Pattern** - Arrange, Act, Assert with blank lines -- **Test Naming** - `___test()` -- **No External Dependencies** - Isolated, fast, deterministic - -### 3.2 Test Files - -**config_test.gleam - Builder Pattern Tests:** - -- `new_with_name_creates_config_with_defaults_test()` -- `table_type_sets_table_type_test()` -- `access_sets_access_mode_test()` -- `read_concurrency_enables_read_concurrency_test()` -- `write_concurrency_enables_write_concurrency_test()` -- `compressed_enables_compression_test()` -- `key_string_sets_string_key_encoding_test()` -- `value_string_sets_string_value_encoding_test()` -- `counter_creates_counter_config_test()` -- `create_with_valid_config_returns_table_test()` -- `create_with_duplicate_name_returns_error_test()` -- `builder_pattern_allows_chaining_test()` - -**table_test.gleam - Basic Operations:** - -- `set_with_valid_key_value_stores_value_test()` -- `set_with_existing_key_updates_value_test()` -- `get_with_existing_key_returns_value_test()` -- `get_with_nonexistent_key_returns_none_test()` -- `delete_with_existing_key_removes_key_test()` -- `delete_with_nonexistent_key_returns_error_test()` -- `member_with_existing_key_returns_true_test()` -- `member_with_nonexistent_key_returns_false_test()` -- `size_with_empty_table_returns_zero_test()` -- `size_with_three_items_returns_three_test()` -- `keys_with_empty_table_returns_empty_list_test()` -- `keys_with_items_returns_all_keys_test()` -- `values_with_empty_table_returns_empty_list_test()` -- `values_with_items_returns_all_values_test()` -- `to_list_with_empty_table_returns_empty_list_test()` -- `to_list_with_items_returns_all_pairs_test()` - -**operations_test.gleam - Advanced Operations:** - -- `insert_new_with_new_key_returns_true_test()` -- `insert_new_with_existing_key_returns_false_test()` -- `take_with_existing_key_returns_value_and_deletes_test()` -- `take_with_nonexistent_key_returns_none_test()` -- `update_element_updates_tuple_element_test()` -- `update_element_with_invalid_position_returns_error_test()` -- `delete_all_objects_clears_table_test()` -- `match_with_pattern_returns_matching_objects_test()` -- `match_with_no_matches_returns_empty_list_test()` -- `select_with_match_spec_returns_matching_objects_test()` - -**encoders_test.gleam - Encoder/Decoder Tests:** - -- `string_encoder_encodes_string_test()` -- `string_decoder_decodes_string_test()` -- `string_decoder_with_invalid_input_returns_error_test()` -- `int_encoder_encodes_int_test()` -- `int_decoder_decodes_int_test()` -- `int_decoder_with_invalid_input_returns_error_test()` -- `bool_encoder_encodes_bool_test()` -- `bool_decoder_decodes_bool_test()` -- `float_encoder_encodes_float_test()` -- `float_decoder_decodes_float_test()` -- `json_encoder_encodes_json_test()` -- `json_decoder_decodes_json_test()` -- `json_decoder_with_invalid_json_returns_error_test()` - -**helpers_test.gleam - Helper Functions:** - -- `new_counter_creates_counter_table_test()` -- `new_counter_uses_builder_internally_test()` - Verify builder usage -- `new_string_table_creates_string_table_test()` -- `new_string_table_uses_builder_internally_test()` - Verify builder usage -- `increment_with_new_key_sets_to_one_test()` -- `increment_with_existing_key_increments_value_test()` -- `increment_by_with_amount_adds_amount_test()` -- `increment_by_with_new_key_sets_to_amount_test()` -- `decrement_with_existing_key_decrements_value_test()` -- `decrement_with_new_key_sets_to_negative_one_test()` -- `decrement_by_with_amount_subtracts_amount_test()` - -**Table Type Tests:** - -- `set_table_stores_unique_keys_test()` -- `set_table_replaces_existing_key_test()` -- `ordered_set_table_maintains_order_test()` -- `bag_table_allows_duplicate_keys_test()` -- `bag_table_rejects_duplicate_objects_test()` -- `duplicate_bag_table_allows_duplicate_objects_test()` -- `public_table_allows_external_access_test()` -- `protected_table_allows_external_reads_test()` -- `protected_table_restricts_external_writes_test()` -- `private_table_restricts_external_access_test()` - -**Error Handling Tests:** - -- `get_with_invalid_key_returns_error_test()` -- `set_with_invalid_value_returns_error_test()` -- `delete_table_with_nonexistent_table_returns_error_test()` -- `decode_error_returns_decode_error_test()` -- `encode_error_returns_encode_error_test()` - -**Concurrency Tests:** - -- `multiple_processes_can_read_concurrently_test()` -- `read_concurrency_enables_concurrent_reads_test()` -- `write_concurrency_enables_concurrent_writes_test()` - -**Quality Check:** Every test follows AAA pattern with blank lines. All test names follow convention. - -## Phase 4: Documentation (Following Documentation Standards) - -### 4.1 Module README - -**File:** `modules/ets/README.md` - -**Must Include:** - -- Overview of ETS and why it's useful -- **Builder pattern examples (PRIMARY focus)** - Show builder usage prominently -- Type safety with encoders/decoders -- All table types explained with examples -- Common use cases (rate limiting, caching, sessions) with builder examples -- Complete API reference -- **Emphasize: Builder pattern is the only way to create tables** -- Quality standards section explaining code organization - -### 4.2 Code Documentation - -**Every Public Function Must Have:** - -- Brief description -- Example usage with imports -- Builder pattern examples where applicable -- Important notes or caveats - -**Example:** - -````gleam -/// Creates a new ETS table configuration with sensible defaults. -/// -/// The builder pattern is mandatory for table creation. Start with `new()`, -/// configure options, then call `create()` to finalize. -/// -/// ## Example -/// -/// ```gleam -/// import dream_ets as ets -/// -/// // Simple table with defaults -/// let assert Ok(table) = ets.new("my_table") -/// |> ets.create() -/// -/// // Configured table -/// let assert Ok(table) = ets.new("sessions") -/// |> ets.key_string() -/// |> ets.value_json(session.to_json, session.decoder()) -/// |> ets.read_concurrency(True) -/// |> ets.create() -/// ``` -pub fn new(name: String) -> TableConfig(k, v) { - // Implementation -} -```` - -## Phase 5: Quality Assurance - -### 5.1 Code Review Checklist - -**Code Quality:** - -- [ ] No anonymous functions in public API -- [ ] No nested cases - all split into named functions -- [ ] Builder pattern used for all table creation -- [ ] All functions explicitly named -- [ ] Type-safe throughout -- [ ] No magic, everything explicit - -**Testing:** - -- [ ] 100% test coverage of public functions -- [ ] All tests follow AAA pattern -- [ ] All test names follow convention -- [ ] Edge cases covered -- [ ] Error paths tested -- [ ] No external dependencies in tests - -**Documentation:** - -- [ ] All public functions documented -- [ ] All examples show builder pattern -- [ ] README comprehensive -- [ ] Code examples compile and run - -**Consistency:** - -- [ ] Follows naming conventions -- [ ] Consistent with other Dream modules -- [ ] Code formatted (`gleam format`) -- [ ] No linter errors - -### 5.2 Final Verification - -**Before Completion:** - -1. Run `gleam check` - No type errors -2. Run `gleam test` - All tests pass -3. Run `gleam format --check` - Code formatted -4. Review all public functions for documentation -5. Verify builder pattern in all examples -6. Check for anonymous functions -7. Check for nested cases -8. Verify test coverage - -## Success Criteria - -1. ✅ Module created following all Dream quality standards -2. ✅ Builder pattern mandatory - all table creation uses builder -3. ✅ No anonymous functions in public API -4. ✅ No nested cases - all logic in named functions -5. ✅ 100% test coverage of public functions -6. ✅ All tests follow AAA pattern and naming convention -7. ✅ Comprehensive documentation with builder examples -8. ✅ Type-safe throughout -9. ✅ Explicit over implicit - no magic -10. ✅ Consistent with other Dream modules -11. ✅ Code formatted and linted -12. ✅ All quality checks pass - -## Files to Create - -**Create:** - -- `modules/ets/gleam.toml` -- `modules/ets/manifest.toml` (auto-generated) -- `modules/ets/Makefile` -- `modules/ets/README.md` -- `modules/ets/src/dream_ets/config.gleam` -- `modules/ets/src/dream_ets/table.gleam` -- `modules/ets/src/dream_ets/operations.gleam` -- `modules/ets/src/dream_ets/encoders.gleam` -- `modules/ets/src/dream_ets/helpers.gleam` -- `modules/ets/src/dream_ets/internal.gleam` -- `modules/ets/src/dream_ets/internal_ffi.erl` -- `modules/ets/test/dream_ets_test.gleam` -- `modules/ets/test/config_test.gleam` -- `modules/ets/test/table_test.gleam` -- `modules/ets/test/operations_test.gleam` -- `modules/ets/test/encoders_test.gleam` -- `modules/ets/test/helpers_test.gleam` - -**No modifications to examples** - Focus solely on creating a high-quality module that exemplifies Dream's standards. - -### To-dos - -- [ ] Create modules/ets/ directory with gleam.toml, Makefile, README.md, manifest.toml, src/, and test/ directories -- [ ] Implement core types: Table, TableConfig, TableType, Access, EtsError in src/dream_ets.gleam -- [ ] Implement builder pattern functions: new(), table_type(), access(), read_concurrency(), write_concurrency(), compressed(), create() -- [ ] Implement convenience helpers: encoders/decoders for primitives, json_encoder/decoder, key_string(), counter(), value_json() -- [ ] Implement table operations: set(), get(), delete(), member(), keys(), values(), to_list(), size(), delete_table() -- [ ] Implement counter operations: increment(), increment_by(), decrement() -- [ ] Write builder tests in test/builder_test.gleam (~10 tests) -- [ ] Write table operations tests in test/table_operations_test.gleam (~12 tests) -- [ ] Write counter tests in test/counter_test.gleam (~6 tests) -- [ ] Write type safety tests in test/type_safety_test.gleam (~7 tests) -- [ ] Write error handling tests in test/error_handling_test.gleam (~5 tests) -- [ ] Write concurrency tests in test/concurrency_test.gleam (~3 tests) -- [ ] Rewrite examples/singleton/src/services/rate_limiter_service.gleam to use dream_ets -- [ ] Update examples/singleton/src/services.gleam to use new rate limiter API -- [ ] Update examples/singleton/src/middleware/rate_limit_middleware.gleam -- [ ] Update examples/singleton/gleam.toml to use dream_ets instead of dream_singleton -- [ ] Run and test the rate limiter example end-to-end -- [ ] Delete modules/singleton/ directory -- [ ] Remove dream_singleton from examples/cms/gleam.toml -- [ ] Update MODULAR_ARCHITECTURE.md to reference dream_ets instead of dream_singleton -- [ ] Write comprehensive README.md for modules/ets/ with examples and decision guides -- [ ] Rename examples/singleton/ to examples/rate_limiter/ \ No newline at end of file diff --git a/.cursor/plans/trustbound-feature-ac3d9475.plan.md b/.cursor/plans/trustbound-feature-ac3d9475.plan.md deleted file mode 100644 index 0125df9..0000000 --- a/.cursor/plans/trustbound-feature-ac3d9475.plan.md +++ /dev/null @@ -1,271 +0,0 @@ - -# Trustbound to Dream Feature Gap Analysis & Implementation Plan - -## Current State Analysis - -**TrustBound Application** is a full-featured web application with: - -- Multi-tenant SaaS architecture with organization/team management -- AWS Cognito authentication with JWT validation -- HTML templating using Matcha (via Cigogne) -- Static file serving (CSS, JS, images) -- Cookie-based session management -- Role-based authorization (Public, Authenticated, OrganizationOwner, OrganizationMember, Admin) -- Database migrations with PostgreSQL -- External service integrations (Cognito, SES, Stripe) -- Telemetry and structured logging -- HTMX-powered interactive UI - -**Dream Framework** currently provides: - -- Basic routing with path parameters -- Middleware system -- PostgreSQL support via Pog/Squirrel -- JSON validation -- HTTP client with streaming -- Simple response builders - -## Critical Missing Features - -### 1. HTML Templating System (CRITICAL) - -**Gap**: Dream has no HTML templating. TrustBound uses Matcha templates (.matcha files) compiled via Cigogne. - -**Files to reference**: - -- `/Users/dcrockwell/Documents/Code/FileStory/trustbound/client/src/layouts/main_layout.matcha` -- `/Users/dcrockwell/Documents/Code/FileStory/trustbound/client/src/pages/dashboard.matcha` - -**Implementation needed**: - -- Add Cigogne as dependency to dream -- Create `dream/services/templates` module for template rendering -- Support passing data to templates -- Template helper functions for common patterns -- Flash message system (success/error/info) - -### 2. Static File Serving (CRITICAL) - -**Gap**: No static file serving utilities. - -**Reference**: `/Users/dcrockwell/Documents/Code/FileStory/trustbound/server/src/utilities/http/http_static_file_handler.gleam` - -**Implementation needed**: - -- Add `dream/utilities/http/static.gleam` module -- Support content-type detection by extension -- Cache-Control headers -- 404 handling for missing files -- Security: prevent directory traversal - -### 3. Cookie Management (CRITICAL) - -**Gap**: No cookie utilities. - -**Reference**: `/Users/dcrockwell/Documents/Code/FileStory/trustbound/server/src/utilities/http/http_cookie_manager.gleam` - -**Implementation needed**: - -- Add `dream/utilities/http/cookies.gleam` module -- Set cookies with all attributes (HttpOnly, Secure, SameSite, Domain, Max-Age, Path) -- Read cookies from request headers -- Clear cookies -- Support multiple Set-Cookie headers - -### 4. Authentication & Authorization System (CRITICAL) - -**Gap**: No auth system. TrustBound has sophisticated JWT + role-based auth. - -**Reference**: `/Users/dcrockwell/Documents/Code/FileStory/trustbound/server/src/utilities/http/http_router.gleam` (lines 60-141) - -**Implementation needed**: - -- Add `dream/utilities/auth` module with: - - JWT validation (using existing Gleam JWT libraries) - - JWKS caching - - Authorization levels/roles - - Extract auth from headers and cookies - - Permission checking -- Integrate with router for route-level auth requirements -- Add auth context to request handling - -### 5. Configuration Management (HIGH PRIORITY) - -**Gap**: No config loading utilities. - -**Reference**: `/Users/dcrockwell/Documents/Code/FileStory/trustbound/server/src/config.gleam` - -**Implementation needed**: - -- Add `dream/utilities/config.gleam` module -- Environment variable extraction with defaults -- Required vs optional configs -- Type conversions (string to int, bool) -- Validation (e.g., cookie domain format) - -### 6. Enhanced Request/Response Utilities (HIGH PRIORITY) - -**Gaps**: - -- No form data parsing (TrustBound has URL-encoded form parsing) -- No request details extraction pattern (to prevent body re-reading) -- Limited response builders (no HTML, no streaming SSE) - -**References**: - -- `/Users/dcrockwell/Documents/Code/FileStory/trustbound/server/src/types/http_types.gleam` (RequestDetails type) -- `/Users/dcrockwell/Documents/Code/FileStory/trustbound/server/src/utilities/http/http_response_builder.gleam` - -**Implementation needed**: - -- Add `dream/utilities/http/forms.gleam` for form parsing -- Add RequestDetails pattern to extract request once -- Enhance response builders: - - HTML responses with status codes - - SSE streaming responses - - Error response templates (401, 403, 404, 500) - - HTMX-specific headers (HX-Redirect, HX-Retarget, HX-Reswap) - -### 7. Enhanced Router Features (MEDIUM PRIORITY) - -**Gaps**: - -- No built-in way to handle multiple HTTP methods per path -- Path params returned as list, not dict -- No auth requirement per route - -**Reference**: `/Users/dcrockwell/Documents/Code/FileStory/trustbound/server/src/router.gleam` - -**Implementation needed**: - -- Return path params as `Dict(String, String)` instead of list -- Add `route_multi` function for multiple methods on same path -- Integrate auth requirements into route definitions -- Route groups with shared middleware/auth - -### 8. Migration System (MEDIUM PRIORITY) - -**Gap**: No migration runner. - -**Reference**: `/Users/dcrockwell/Documents/Code/FileStory/trustbound/server/priv/migrations/` - -**Implementation needed**: - -- Add `dream/utilities/database/migrations.gleam` -- Migration file format (timestamp-based naming) -- Up/down migration support -- Migration tracking table -- CLI helper or Makefile targets - -### 9. Logging/Telemetry (OPTIONAL - Can be external) - -**Gap**: No structured logging. TrustBound has extensive telemetry. - -**Note**: This could remain application-specific rather than framework feature. - -### 10. Error Page Templates (LOW PRIORITY) - -**Gap**: No default error pages. - -**Implementation needed**: - -- Add default HTML templates for common errors -- Allow customization via templates -- Flash message system for user feedback - -## Dependencies to Add - -```toml -# Add to dream/gleam.toml -cigogne = ">= 5.0.0 and < 6.0.0" # For Matcha templates -simplifile = ">= 2.2.1 and < 3.0.0" # For static files -envoy = ">= 1.0.2 and < 2.0.0" # For environment variables -gleam_crypto = ">= 1.5.0 and < 2.0.0" # For JWT/auth -birl = ">= 1.8.0 and < 2.0.0" # For timestamps -``` - -## Recommended Implementation Priority - -**Phase 1 (Blocking - Cannot port without these)**: - -1. HTML templating integration (Cigogne/Matcha) -2. Static file serving -3. Cookie management -4. Form data parsing - -**Phase 2 (High Value - Needed for auth)**: - -5. JWT validation utilities -6. Authorization system -7. Configuration management -8. Enhanced response builders - -**Phase 3 (Polish - Can work around temporarily)**: - -9. Router enhancements (dict params, route groups) -10. Migration system -11. Default error pages -12. Telemetry helpers - -## Alternative Approaches - -**Option 1**: Keep trustbound-specific code in trustbound - -- Only add truly generic/reusable features to dream -- External services (Cognito, Stripe, SES) stay in trustbound/adapters -- Organization/team logic stays in trustbound -- Dream focuses on HTTP/templating/cookies/auth primitives - -**Option 2**: Full framework approach - -- Move more batteries into dream -- Create dream/services/auth with Cognito adapter -- Create dream/services/email -- Risk: dream becomes opinionated/bloated - -**Recommendation**: Option 1. Keep dream lightweight and composable. Add primitives (cookies, templates, JWT validation) but not integrations (Cognito client, Stripe client). - -## Files That Can Stay in Trustbound - -These are application-specific and should NOT move to dream: - -- All `/adapters/` (Cognito, SES, Stripe, OpenAI, BAML) -- All `/orchestrators/` (business logic) -- All `/processors/` (domain logic) -- All `/types/*_types.gleam` except generic HTTP types -- All `/queries/sql/` (database queries) -- Client templates in `/client/src/pages/` (app-specific pages) - -## Success Criteria - -Dream successfully supports trustbound when: - -1. Can serve HTML pages using Matcha templates -2. Can serve static CSS/JS/images -3. Can set/read/clear cookies with all security attributes -4. Can validate JWTs and enforce route-level authorization -5. Can parse form data from POST requests -6. Can load configuration from environment variables -7. Can handle multiple HTTP methods per path elegantly - -## Next Steps - -1. Clarify with user: Should dream be minimal (just add primitives) or more complete (include service adapters)? -2. Confirm priority order above aligns with porting needs -3. Start with Phase 1 implementation -4. Create example showing trustbound-style auth in dream -5. Document migration path from trustbound patterns to dream patterns - -### To-dos - -- [ ] Integrate Cigogne/Matcha templating system into dream with render utilities and flash message support -- [ ] Add static file serving module with content-type detection and security -- [ ] Create cookie utilities for setting/reading/clearing with all security attributes -- [ ] Add URL-encoded form data parsing utility -- [ ] Add JWT validation and JWKS caching utilities -- [ ] Create authorization system with role-based access control -- [ ] Add configuration loading from environment with validation -- [ ] Enhance response builders with HTML, SSE streaming, and HTMX headers -- [ ] Update router to use Dict for params, support route groups, and integrate auth requirements -- [ ] Create database migration system with timestamp-based files -- [ ] Add default error page templates (401, 403, 404, 500) \ No newline at end of file diff --git a/.cursor/plans/websocket-implementation-9cff2a91.plan.md b/.cursor/plans/websocket-implementation-9cff2a91.plan.md deleted file mode 100644 index 838500f..0000000 --- a/.cursor/plans/websocket-implementation-9cff2a91.plan.md +++ /dev/null @@ -1,549 +0,0 @@ - -# Complete Websocket Implementation Plan - -## Phase 1: Core Websocket Types - -### 1.1 Create `src/dream/websocket.gleam` - -Create the server-agnostic websocket abstraction module with complete documentation for hex docs. - -**Module documentation:** - -- Overview of websocket support in Dream -- When to use websockets vs HTTP streaming vs SSE -- Quick start example -- Explanation of Event messages for pub/sub - -**Types to implement:** - -- `Message(event)` - Incoming messages (Text, Binary, Ping, Pong, Close, Event) -- `Frame` - Outgoing frames (SendText, SendBinary, SendPing, SendPong, SendClose) -- `HandlerResult(state, event)` - Continue or Close with state and frames to send -- `Handler(state, services, event)` - Message handler function type -- `MessageMiddleware(state, services, event)` - Message middleware function type - -**Functions to implement:** - -- `continue(state)` - Continue with state, no messages -- `continue_with_text(state, text)` - Continue and send text -- `continue_with_binary(state, data)` - Continue and send binary -- `continue_with_frames(state, frames)` - Continue and send multiple frames -- `continue_with_selector(state, selector)` - Continue with new selector -- `continue_with_pong(state, data)` - Continue and respond to ping -- `close()` - Close connection with normal closure code -- `close_with_reason(code, reason)` - Close with specific code and reason - -**Constants to define:** - -- Standard websocket close codes (normal_closure, going_away, protocol_error, etc.) - -**Documentation requirements:** - -- Every type has comprehensive doc comments with examples -- Every function has doc comments with parameters, return values, and examples -- Examples show realistic chat room, pub/sub, and game scenarios -- Explain Event messages with concrete pub/sub example - -### 1.2 Update `src/dream/router.gleam` - -Add websocket route support to the router. - -**New types:** - -- `WebsocketInit(state, event)` - Initial state and selector for websocket connection - -**Update Route type:** - -- Add `WebsocketRoute` variant with all fields: - - path - - on_init function - - handler function - - on_close function - - middleware (HTTP upgrade middleware) - - message_middleware (per-message middleware) - -**New function:** - -- `websocket_route(router, path, on_init, handler, on_close, middleware, message_middleware)` -- Full documentation explaining: - - HTTP middleware runs once on upgrade (can reject) - - Message middleware runs on each message - - on_init can validate and authenticate - - State is the websocket's context (not Dream context) - -**Update find_route function:** - -- Handle WebsocketRoute variant in pattern matching -- Extract path parameters for websocket routes - -## Phase 2: Mist Adapter Implementation - -### 2.1 Create `src/dream/servers/mist/websocket.gleam` - -Internal adapter module that translates Dream websocket types to Mist's implementation. - -**Functions to implement:** - -`translate_incoming_message_from_mist(mist_message) -> dream_message` - -- Convert mist.Text to websocket.Text -- Convert mist.Binary to websocket.Binary -- Convert mist.Ping to websocket.Ping -- Convert mist.Pong to websocket.Pong -- Convert mist.Closed to websocket.Close -- Convert mist.Custom to websocket.Event -- No nested cases - use helper functions for each conversion - -`execute_outgoing_frames(frames, mist_connection) -> Result` - -- Iterate over frames list -- Call appropriate mist send function for each frame -- SendText -> mist.send_text_frame -- SendBinary -> mist.send_binary_frame -- SendPing -> mist.send_ping_frame (if Mist supports) -- SendPong -> mist.send_pong_frame (if Mist supports) -- SendClose -> handled by mist.stop() -- No nested cases - use helper function per frame type - -`build_message_middleware_chain(message_middleware_list, final_handler) -> wrapped_handler` - -- Build middleware chain from list -- Each middleware wraps the next -- Final handler at the end -- Return single wrapped handler function - -`upgrade_to_websocket(mist_request, dream_request, context, services, on_init, handler, on_close, http_middleware, message_middleware) -> mist_response` - -- Run HTTP middleware on dream_request first (can reject upgrade) -- Call on_init with dream_request, context, services -- If on_init returns Error(response), convert response and return -- If on_init returns Ok(WebsocketInit), proceed with upgrade -- Wrap handler with message middleware chain -- Create adapter state that holds user state -- Call mist.websocket with translated functions -- Translate Dream HandlerResult back to Mist Next -- Handle Continue (send frames, update state, set selector) -- Handle Close (send final frames, stop) -- No anonymous functions except where Mist requires them - -**Documentation:** - -- Mark module as internal -- Document translation approach -- Document adapter state pattern - -### 2.2 Update `src/dream/servers/mist/handler.gleam` - -Update handler to recognize and process websocket routes. - -**Update `handle_routed_request` function:** - -- Add case for WebsocketRoute -- Extract WebsocketRoute fields -- Build HTTP middleware chain -- Run HTTP middleware (can reject upgrade before websocket starts) -- Call websocket.upgrade_to_websocket with all parameters -- Keep HttpRoute case unchanged -- No nested cases - -## Phase 3: Comprehensive Example Application - -### 3.1 Create `examples/websocket_chat/` - -Structure: - -``` -examples/websocket_chat/ - src/ - main.gleam - router.gleam - services.gleam - controllers/ - websocket_controller.gleam - http_controller.gleam - middleware/ - auth_middleware.gleam - logging_middleware.gleam - rate_limit_middleware.gleam - models/ - chat_room.gleam - views/ - chat_view.gleam - test/ - integration/ - features/ - websocket.feature - step_definitions/ - websocket_steps.exs - test_helper.exs - cucumber_test.exs - gleam.toml - manifest.toml - mix.exs - mix.lock - Makefile - README.md -``` - -### 3.2 Implement Chat Room Application - -**services.gleam:** - -- Services type with database connection and chat_rooms Subject registry -- initialize_services() function -- Chat room registry using process Subjects for pub/sub - -**models/chat_room.gleam:** - -- ChatState type (room_id, user_id, username, room_subject, message_count) -- ChatEvent type (UserJoined, UserLeft, NewMessage, RoomClosed) -- get_or_create_room_subject(registry, room_id) function -- broadcast_to_room(room_subject, event) function - -**controllers/websocket_controller.gleam:** - -`chat_room_init(request, context, services) -> Result(WebsocketInit, Response)` - -- Extract room_id from path parameters -- Validate room_id -- Get authenticated user from context -- If not authenticated, return 401 Response -- Get or create room Subject from registry -- Create selector for room Subject -- Broadcast UserJoined event -- Create ChatState with room_id, user_id, room_subject, message_count -- Return Ok(WebsocketInit(state, selector)) -- No nested cases - use helper functions - -`chat_room_handler(state, message, services) -> HandlerResult` - -- Handle websocket.Text: save message, broadcast NewMessage event -- Handle websocket.Event(NewMessage): format and send to client -- Handle websocket.Event(UserJoined): send join notification -- Handle websocket.Event(UserLeft): send leave notification -- Handle websocket.Ping: respond with Pong -- Handle websocket.Close: return close -- Handle websocket.Binary: ignore or log -- Handle websocket.Pong: ignore -- No nested cases - each message type has its own helper function - -`chat_room_close(state, services) -> Nil` - -- Broadcast UserLeft event to room -- Log disconnect with message_count -- Clean up any resources - -**Helper functions (no nested cases):** - -- `handle_client_text_message(state, text, services) -> HandlerResult` -- `handle_new_message_event(state, user, text) -> HandlerResult` -- `handle_user_joined_event(state, username) -> HandlerResult` -- `handle_user_left_event(state, username) -> HandlerResult` -- `format_chat_message_json(user, text) -> String` -- `format_user_event_json(event_type, username) -> String` - -**controllers/http_controller.gleam:** - -- index_controller: serve HTML page with websocket client -- rooms_list_controller: list active chat rooms - -**middleware/logging_middleware.gleam:** - -`log_websocket_message_middleware(state, message, services, next) -> HandlerResult` - -- Log incoming message type and state -- Call next(state, message, services) -- Log outgoing frames from result -- Return result unchanged -- No nested cases - -**middleware/rate_limit_middleware.gleam:** - -`rate_limit_middleware(state, message, services, next) -> HandlerResult` - -- Check if message is Text or Binary (client messages) -- If not, call next immediately -- If yes, check state.message_count -- If over limit, return websocket.close_with_reason(policy_violation, "Rate limit exceeded") -- If under limit, call next -- No nested cases - use helper to check rate limit - -**views/chat_view.gleam:** - -- HTML page with websocket client JavaScript -- Connect to websocket -- Send messages -- Display incoming messages -- Show user join/leave events -- Handle ping/pong for keepalive -- Show connection status - -**main.gleam:** - -- Initialize services with chat room registry -- Create router with HTTP and websocket routes -- Start server on port 3000 - -**router.gleam:** - -- GET "/" -> serve chat HTML page -- GET "/rooms" -> list active rooms -- websocket_route "/chat/:room_id" with: - - on_init: chat_room_init - - handler: chat_room_handler - - on_close: chat_room_close - - middleware: [auth_middleware, logging_middleware] - - message_middleware: [rate_limit_middleware, logging_middleware] - -**README.md:** - -- Overview of chat application -- Features demonstrated (pub/sub, middleware, ping/pong, rate limiting) -- How to run -- How to test with multiple clients -- Code walkthrough - -### 3.3 Example Makefile - -```makefile -.PHONY: run test test-integration clean - -run: - @gleam run -m main - -test: - @gleam test - -test-integration: - # Start server, run cucumber tests, stop server - # Similar to streaming example Makefile -``` - -## Phase 4: Unit Tests - -### 4.1 Create `test/dream/websocket_test.gleam` - -Test all helper functions and types: - -- `continue_with_text_creates_correct_result_test()` -- `continue_with_binary_creates_correct_result_test()` -- `continue_with_frames_creates_correct_result_test()` -- `continue_with_pong_responds_to_ping_test()` -- `close_uses_normal_closure_code_test()` -- `close_with_reason_uses_custom_code_test()` -- All close code constants are correct values - -### 4.2 Create `test/dream/router/websocket_test.gleam` - -Test websocket routing: - -- `websocket_route_adds_route_to_router_test()` -- `websocket_route_matches_correct_path_test()` -- `websocket_route_extracts_path_parameters_test()` -- `websocket_route_with_middleware_runs_middleware_test()` -- `websocket_route_with_message_middleware_stores_middleware_test()` -- `find_route_matches_websocket_route_test()` -- `find_route_returns_websocket_route_with_params_test()` - -### 4.3 Create `test/dream/servers/mist/websocket_test.gleam` - -Test Mist adapter: - -- `translate_incoming_message_text_from_mist_test()` -- `translate_incoming_message_binary_from_mist_test()` -- `translate_incoming_message_ping_from_mist_test()` -- `translate_incoming_message_pong_from_mist_test()` -- `translate_incoming_message_close_from_mist_test()` -- `translate_incoming_message_event_from_mist_test()` -- `execute_outgoing_frames_sends_text_test()` -- `execute_outgoing_frames_sends_binary_test()` -- `execute_outgoing_frames_sends_multiple_frames_test()` -- `build_message_middleware_chain_wraps_handler_test()` -- `build_message_middleware_chain_executes_in_order_test()` - -## Phase 5: Integration Tests - -### 5.1 Create Cucumber Feature File - -`examples/websocket_chat/test/integration/features/websocket.feature` - -**Scenarios to test:** - -Connection and Basic Protocol: - -- Connect to websocket endpoint successfully -- Send text message and receive echo -- Send binary message and receive echo -- Send ping and receive pong -- Close connection gracefully -- Reject connection with invalid room ID -- Reject connection without authentication - -Chat Room Features: - -- Join chat room and receive welcome message -- Send message and receive it back -- Multiple clients in same room receive messages -- User join event sent to all clients in room -- User leave event sent when client disconnects -- Switch rooms and receive messages from new room - -Middleware: - -- HTTP middleware rejects unauthenticated websocket upgrade -- Message middleware logs all messages -- Rate limit middleware blocks after 100 messages -- Rate limit middleware allows messages under limit - -Error Cases: - -- Invalid message format handled gracefully -- Server shutdown closes connections cleanly -- Network disconnect handled correctly - -### 5.2 Create Step Definitions - -`examples/websocket_chat/test/integration/step_definitions/websocket_steps.exs` - -Implement step definitions for: - -- Starting websocket connections -- Sending websocket messages -- Receiving websocket messages -- Asserting on message content -- Managing multiple clients -- Checking connection state - -Use Elixir websocket client library for testing. - -### 5.3 Create Test Helper - -`examples/websocket_chat/test/integration/test_helper.exs` - -Helper functions: - -- Start test server on port 3000 -- Create websocket client -- Connect to websocket -- Send messages -- Receive messages with timeout -- Close connection -- Clean up after tests - -## Phase 6: Documentation - -### 6.1 Module Documentation - -Each module needs: - -- Overview paragraph (what it does) -- When to use it section -- Quick examples section -- Detailed usage section (for complex modules) - -Modules to document: - -- `dream/websocket.gleam` (core types and functions) -- `dream/router.gleam` (websocket_route function) -- All example application modules - -### 6.2 Type Documentation - -Every type needs: - -- Purpose description -- Field explanations -- Usage examples -- Related types - -Types to document: - -- Message(event) -- Frame -- HandlerResult(state, event) -- Handler(state, services, event) -- MessageMiddleware(state, services, event) -- WebsocketInit(state, event) - -### 6.3 Function Documentation - -Every function needs: - -- Purpose description -- Parameter descriptions -- Return value description -- At least one example -- Related functions - -All public functions in websocket.gleam and router.gleam. - -### 6.4 Example Documentation - -Examples need: - -- README.md with overview -- Inline code comments explaining key concepts -- Architecture documentation (how components interact) -- How to run and test - -### 6.5 Guide Documentation - -Create `docs/guides/websockets.md`: - -- Introduction to websockets in Dream -- When to use websockets vs streaming vs SSE -- Basic websocket route setup -- Handling different message types -- Using Event messages for pub/sub -- Message middleware patterns -- Testing websockets -- Production considerations -- Common patterns and recipes - -## Implementation Notes - -**Code Style Requirements:** - -- NO ABBREVIATIONS in variable names, function names, or types -- NO NESTED CASES - extract to helper functions -- NO ANONYMOUS FUNCTIONS except where Mist API requires them -- Use descriptive names (chat_room_handler not chat_handler) -- Use helper functions for each case branch -- Clear separation of concerns - -**Testing Strategy:** - -- Unit tests for pure functions -- Integration tests for full scenarios -- Test error cases -- Test middleware execution order -- Test multiple concurrent clients -- Test all protocol features (ping/pong, close codes) - -**Documentation Standards:** - -- Complete hex docs on all public APIs -- Examples in every function doc -- Real-world scenarios in module docs -- Architecture explanations in guides -- No assumed knowledge - explain everything - -## File Creation Order - -1. Core types: `src/dream/websocket.gleam` -2. Router updates: `src/dream/router.gleam` -3. Mist adapter: `src/dream/servers/mist/websocket.gleam` -4. Handler updates: `src/dream/servers/mist/handler.gleam` -5. Example services: `examples/websocket_chat/src/services.gleam` -6. Example models: `examples/websocket_chat/src/models/chat_room.gleam` -7. Example middleware: `examples/websocket_chat/src/middleware/*.gleam` -8. Example controller: `examples/websocket_chat/src/controllers/websocket_controller.gleam` -9. Example views: `examples/websocket_chat/src/views/chat_view.gleam` -10. Example router: `examples/websocket_chat/src/router.gleam` -11. Example main: `examples/websocket_chat/src/main.gleam` -12. Unit tests: `test/dream/websocket_test.gleam` -13. Router tests: `test/dream/router/websocket_test.gleam` -14. Adapter tests: `test/dream/servers/mist/websocket_test.gleam` -15. Integration tests: `examples/websocket_chat/test/integration/*` -16. Documentation: `docs/guides/websockets.md` -17. Example README: `examples/websocket_chat/README.md` \ No newline at end of file diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index 606556a..d81281b 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -8,3 +8,5 @@ custom: + + diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index a0241b0..23401f3 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -123,3 +123,5 @@ body: + + diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index cbc58c8..364a3a6 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -15,3 +15,5 @@ contact_links: + + diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index d7c9060..7c47fbb 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -94,3 +94,5 @@ body: + + diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 651e23a..3267c3a 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -77,3 +77,5 @@ N/A + + diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index eb0a1dd..2b65ae7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -48,3 +48,5 @@ jobs: + + diff --git a/.gitignore b/.gitignore index 053ea0c..a15d41e 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,13 @@ erl_crash.dump !/test/fixtures/file/temp/.gitkeep /test/fixtures/snapshots/temp/* !/test/fixtures/snapshots/temp/.gitkeep +/test/fixtures/snapshots/clearable_tmp_* + +# Local/editor scratch & generated temp output +.cursor/ +/test/tmp/ +/examples/**/test/tmp/ +PLAN.md # Analysis reports (generated files) COMPATIBILITY_REPORT.md diff --git a/AGENTS.md b/AGENTS.md index 6bafa4e..e3b03b5 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -26,13 +26,13 @@ It is **not** user-facing documentation. It exists to reduce repeated mistakes. - In Gleam, importing specific names from a module using `{...}` **also** brings the module name into scope. - `import dream_test/unit.{describe, it}` imports `unit` and the unqualified values `describe` and `it`. - - `import dream_test/assertions/should.{or_fail_with}` imports `should` and the unqualified value `or_fail_with`, so `should.equal` and `or_fail_with(...)` are both valid. + - `import dream_test/matchers.{or_fail_with}` imports `matchers` and the unqualified value `or_fail_with`, so `matchers.be_equal` and `or_fail_with(...)` are both valid. - Only use `as` when you genuinely need to distinguish between two different modules with the **same** final name. - - Do **not** write `import dream_test/assertions/should as should`; it adds no information and is banned in this project. + - Do **not** write `import dream_test/matchers as should`; it adds no information and is banned in this project. - To avoid duplicate imports and confusion, follow the project standards: - Use unqualified imports for: - `describe`, `it` from `dream_test/unit`. - - Piped helpers like `or_fail_with` from `dream_test/assertions/should`. + - Piped helpers like `or_fail_with` from `dream_test/matchers`. - Core types via `{type ...}` imports when the module name adds no clarity. ## 3. Type Imports @@ -47,7 +47,7 @@ It is **not** user-facing documentation. It exists to reduce repeated mistakes. - `assert` is a **reserved word** in Gleam. - Do **not** use it as a directory or module name (e.g. `dream_test/assert`). - - Use `assertions` instead (e.g. `dream_test/assertions/context` or `dream_test/bootstrap/assertions`). + - Use `matchers` instead (e.g. `dream_test/matchers`). ## 5. Pattern Matching & Blocks @@ -77,8 +77,8 @@ It is **not** user-facing documentation. It exists to reduce repeated mistakes. - Do **not** return functions from assertion helpers for general chaining. - This project prefers simple, multi-argument functions that are pipe-friendly. - Correct pattern for assertions: - - `value |> should() |> equal(expected) |> or_fail_with("message")` - - Not: `value |> should.equal(expected)` (old pattern without chaining). + - `value |> should |> be_equal(expected) |> or_fail_with("message")` + - Not: `value |> should.be_equal(expected)` (old pattern without chaining). ## 8. No Closures, No Anonymous Functions diff --git a/CHANGELOG.md b/CHANGELOG.md index 28017e9..bf80167 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,61 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [2.0.0] - 2025-12-27 + +### Added + +- **Suite-first runner builder** (`dream_test/runner`) + + - New `runner.new([suite]) |> ... |> runner.run()` pipeline for configuring and running suites + - Configuration is applied via builder functions (`max_concurrency`, `default_timeout_ms`, `progress_reporter`, `results_reporters`, `output`, `silent`, `exit_on_failure`, `filter_tests`) + - Suite list can be built incrementally with `add_suites(...)`, and specific suites can run with an execution config override via `add_suites_with_config(...)` + +- **Runtime test discovery** (`dream_test/discover`) + + - Builder for discovering compiled test modules under `./test/` via module path globs (e.g. `"unit/**_test.gleam"`) + - Loads modules that export `tests/0` and calls them to obtain `TestSuite(Nil)` values + +- **Reporting split** (`dream_test/reporters/types`, `dream_test/reporters/bdd`, `dream_test/reporters/json`, `dream_test/reporters/progress`) + + - Runner emits structured `ReporterEvent`s (`RunStarted`, `TestFinished`, `RunFinished`, plus hook events) + - Live progress via `runner.progress_reporter(progress.new())` + - End-of-run reporting via `runner.results_reporters([bdd.new(), json.new(), ...])` + +- **Live progress bar reporter** (`dream_test/reporters/progress`) + + - In-place single-line progress bar that adapts to terminal width + +- **Selective sandbox crash reports** (`dream_test/sandbox`) + + - New `SandboxConfig.show_crash_reports` flag (default `False`) to suppress noisy BEAM crash reports while still reporting failures + - Convenience helper `sandbox.with_crash_reports` for local debugging + +### Changed + +- **Unit DSL is suite-first** (`dream_test/unit`, `dream_test/types`) + + - Suite items are now typed (`SuiteItem(ctx)`) and suites carry context (`TestSuite(ctx)`) + - Test bodies now return `Result(AssertionResult, String)` for explicit failure reporting + - Lifecycle hooks now return `Result(ctx, String)` for explicit failure reporting + +- **Parallel runner API** (`dream_test/parallel`) + + - Added event-driven entrypoints for driving reporters during parallel execution + +### Documentation + +- Updated docs to the new v2 suite-first pipeline and event-driven reporter model. + +### Breaking Changes + +- `dream_test/runner`: replaced `run_all*` / `run_suite*` free functions with the `RunBuilder` pipeline (`runner.new(...) |> ... |> runner.run()`). +- `dream_test/unit`: test bodies now return `Result(AssertionResult, String)` instead of `AssertionResult`. Hooks now return `Result(ctx, String)` instead of `ctx`. +- `dream_test/unit`: replaced the old `UnitTest` tree + `to_test_suite` conversion with typed suite builders (`describe`, `group`, `describe_with_hooks`, `SuiteHooks`). +- `dream_test/types`: suites and test cases are now context-typed (`TestSuite(ctx)`, `SuiteTestCase(ctx)`), so user code matching these types must be updated. +- **Reporters refactored and split**: live output via `runner.progress_reporter(progress.new())`, end-of-run output via `runner.results_reporters([bdd.new(), json.new(), ...])`. The old `dream_test/reporter` module is replaced by `dream_test/reporters/*`. +- `dream_test/gherkin/world.get`: error type changed from `Result(a, Nil)` to `Result(a, String)` for more informative failures. + ## [1.2.0] - 2025-12-04 ### Added @@ -89,7 +144,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Per-test and total duration in BDD and Gherkin reporters - Monotonic time measurement for accurate elapsed time -- **JSON Reporter** (`dream_test/reporter/json`) +- **JSON Reporter** (`dream_test/reporters/json`) - Machine-readable JSON output for CI/CD integration - `format` and `format_pretty` for string output @@ -178,7 +233,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - **Assertions Module** (`dream_test/assertions/should`) - - Fluent assertion API with `should()` builder + - Fluent assertion API with `should` builder - `or_fail_with` for custom failure messages - Chainable assertion pattern @@ -199,7 +254,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Test timeout handling - Comprehensive test result reporting -- **BDD Reporter** (`dream_test/reporter/bdd`) +- **BDD Reporter** (`dream_test/reporters/bdd`) - Colorized terminal output - Hierarchical test result display @@ -228,7 +283,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - STANDARDS document for code conventions - API documentation for all public modules -[Unreleased]: https://github.com/TrustBound/dream_test/compare/1.2.0...HEAD +[Unreleased]: https://github.com/TrustBound/dream_test/compare/2.0.0...HEAD +[2.0.0]: https://github.com/TrustBound/dream_test/compare/1.2.0...2.0.0 [1.2.0]: https://github.com/TrustBound/dream_test/compare/1.1.0...1.2.0 [1.1.0]: https://github.com/TrustBound/dream_test/compare/1.0.3...1.1.0 [1.0.3]: https://github.com/TrustBound/dream_test/compare/1.0.2...1.0.3 diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index b615910..d1a6759 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -139,3 +139,5 @@ For answers to common questions about this code of conduct, see the FAQ at + + diff --git a/COMPATIBILITY.md b/COMPATIBILITY.md index 771ed8f..e550274 100644 --- a/COMPATIBILITY.md +++ b/COMPATIBILITY.md @@ -10,4 +10,4 @@ All versions tested with: `gleam test` -Last tested: 2025-12-04 \ No newline at end of file +Last tested: 2025-12-27 \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 64d5255..da720b0 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -182,7 +182,7 @@ Add tests in the appropriate `test/dream_test/` subdirectory matching the source ```gleam import dream_test/unit.{describe, it} -import dream_test/assertions/should.{should, equal, fail_with, or_fail_with} +import dream_test/matchers.{should, , fail_with, or_fail_with} import dream_test/types.{AssertionOk, MatchFailed, MatchOk} pub fn tests() { @@ -263,11 +263,11 @@ Every public function needs: /// /// ```gleam /// 42 -/// |> should() -/// |> equal(42) +/// |> should +/// |> be_equal(42) /// |> or_fail_with("Should be 42") /// ``` -pub fn equal(result: MatchResult(a), expected: a) -> MatchResult(a) { +pub fn be_equal(result: MatchResult(a), expected: a) -> MatchResult(a) { // ... } ```` @@ -276,7 +276,7 @@ pub fn equal(result: MatchResult(a), expected: a) -> MatchResult(a) { See these files for documentation quality standards: -- `src/dream_test/assertions/should.gleam` — module docs with tables, chaining examples +- `src/dream_test/matchers.gleam` — module docs with tables, chaining examples - `src/dream_test/unit.gleam` — DSL usage patterns - `src/dream_test/runner.gleam` — configuration examples diff --git a/Makefile b/Makefile index f147129..60c4060 100644 --- a/Makefile +++ b/Makefile @@ -9,8 +9,11 @@ test: # Run example project tests examples: cd examples/snippets && gleam test + @echo "" + @echo "NOTE: failure_showcase is expected to fail (for reporter demo)." + cd examples/failure_showcase && gleam test || true cd examples/shopping_cart && gleam test cd examples/cache_app && gleam test -# Run everything: dream_test tests + examples -all: test examples +# Run everything: examples first, then dream_test tests last +all: examples test diff --git a/README.md b/README.md index e0f41b5..bf17c70 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@
- Dream Logo + Dream Test logo

Dream Test

-

A testing framework for Gleam that gets out of your way.

+

Feature-rich unit and integration testing for Gleam.

Hex.pm @@ -9,111 +9,95 @@ Documentation - + License

-```gleam -import dream_test/unit.{describe, it} -import dream_test/assertions/should.{be_error, be_ok, equal, or_fail_with, should} +## Install Dream Test -pub fn tests() { - describe("Calculator", [ - it("adds two numbers", fn() { - add(2, 3) - |> should() - |> equal(5) - |> or_fail_with("2 + 3 should equal 5") - }), - it("handles division", fn() { - divide(10, 2) - |> should() - |> be_ok() - |> equal(5) - |> or_fail_with("10 / 2 should equal 5") - }), - it("returns error for division by zero", fn() { - divide(1, 0) - |> should() - |> be_error() - |> or_fail_with("Division by zero should error") - }), - ]) -} +```sh +gleam add --dev dream_test ``` -``` -Calculator - ✓ adds two numbers - ✓ handles division - ✓ returns error for division by zero +## Why Dream Test? -Summary: 3 run, 0 failed, 3 passed in 2ms -``` +Rapid application development needs testing tools that scale and support the growing needs of the application without slowing down progress. Dream test was designed to help engineers write expressive unit and integration tests for their applications using the tools and techniques they know from other ecosystems; adapted properly to gleam and the beam. -🧪 [Tested source](examples/snippets/test/hero.gleam) +### Test Styles ---- +| Feature | What you get | +| ----------------------- | ------------------------------------------------ | +| 🧪 **Unit tests** | `describe`/`group`/`it` for organizing tests | +| 🥒 **Gherkin specs** | `.feature` files or inline Gleam DSL | +| 📸 **Snapshot testing** | Lock in complex output without manual assertions | -## Contents +### Assertions -- [Installation](#installation) -- [Why Dream Test?](#why-dream-test) -- [Quick Start](#quick-start) -- [The Assertion Pattern](#the-assertion-pattern) -- [Lifecycle Hooks](#lifecycle-hooks) -- [Snapshot Testing](#snapshot-testing) -- [Gherkin / BDD Testing](#gherkin--bdd-testing) -- [BEAM-Powered Test Isolation](#beam-powered-test-isolation) -- [Tagging, CI & Reporters](#tagging-ci--reporters) -- [How It Works](#how-it-works) +| Feature | What you get | +| ---------------------------- | ----------------------------------------------------------------- | +| ⛓️ **Pipe-first assertions** | Matchers that chain and compose | +| 📦 **Built-in matchers** | Equality, booleans, options, results, lists, strings, comparisons | +| 🎁 **Unwrapping matchers** | Option/Result matchers that unwrap for continued assertion | +| 🛠️ **Custom matchers** | Write your own for your domain | ---- +### Test Organization -## Installation +| Feature | What you get | +| -------------------------- | ------------------------------------------------------ | +| 🔄 **Lifecycle hooks** | `before_all`, `after_all`, `before_each`, `after_each` | +| 🔗 **Context-aware tests** | Shared setup across tests with `unit_context` | +| 🏷️ **Tags** | Filter and organize test runs | -```toml -# gleam.toml -[dev-dependencies] -dream_test = "~> 1.2" -``` +### Execution ---- +| Feature | What you get | +| -------------------------------------- | --------------------------------------------------------------------------------------------------------- | +| ⚡ **Parallel execution** | Configurable concurrency for fast runs | +| 🛡️ **Isolation** | Crashes and timeouts don't break the run | +| ⏱️ **Timeouts** | Per-test timeout control | +| 🔍 **Test discovery** | Find tests from file paths | +| 🚨 **Exit-on-failure** | Fail fast for CI | +| 🧩 **Suite-specific execution config** | Run some suites sequential/with custom timeouts in the same runner (`runner.add_suites_with_config(...)`) | -## Why Dream Test? +### Reporting -| Feature | What you get | -| ----------------------- | ---------------------------------------------------------------------------- | -| **Blazing fast** | Parallel execution + BEAM lightweight processes = 214 tests in 300ms | -| **Parallel by default** | Tests run concurrently across all cores—configurable concurrency | -| **Crash-proof** | Each test runs in an isolated BEAM process; one crash doesn't kill the suite | -| **Timeout-protected** | Hanging tests get killed automatically; no more stuck CI pipelines | -| **Lifecycle hooks** | `before_all`, `before_each`, `after_each`, `after_all` for setup/teardown | -| **Snapshot testing** | Compare output against golden files; auto-create on first run | -| **Tagging & filtering** | Tag tests and run subsets with custom filter predicates | -| **Gleam-native** | Pipe-first assertions that feel natural; no macros, no reflection, no magic | -| **Multiple reporters** | BDD-style human output or JSON for CI/tooling integration | -| **Familiar syntax** | If you've used Jest, RSpec, or Mocha, you already know the basics | -| **Type-safe** | Your tests are just Gleam code; the compiler catches mistakes early | -| **Gherkin/BDD** | Write specs in plain English with Cucumber-style Given/When/Then | -| **Self-hosting** | Dream Test tests itself; we eat our own cooking | +| Feature | What you get | +| ---------------------------- | ---------------------------------------------------- | +| 📝 **BDD results reporter** | Human-readable, hierarchical output (printed at end) | +| 📊 **Progress reporter** | Live single-line progress bar during the run | +| 📋 **JSON results reporter** | Machine-readable JSON (printed at end) | +| 🌿 **Gherkin formatting** | Dedicated output for feature tests | ---- +Dream Test splits reporting into: + +- **Progress** (during the run): `runner.progress_reporter(progress.new())` +- **Results** (after the run): `runner.results_reporters([bdd.new(), json.new(), ...])` + +## Full Usage Guide -## Quick Start +1. [Installation](documentation/01-installation.md) +2. [Quick Start](documentation/02-quick-start.md) +3. [Writing Tests](documentation/03-writing-tests.md) +4. [Context-Aware Tests](documentation/04-context-aware-tests.md) +5. [Assertions & Matchers](documentation/05-assertions-and-matchers.md) +6. [Lifecycle Hooks](documentation/06-lifecycle-hooks.md) +7. [Runner & Execution](documentation/07-runner-and-execution.md) +8. [Reporters](documentation/08-reporters.md) +9. [Snapshot Testing](documentation/09-snapshot-testing.md) +10. [Gherkin BDD](documentation/10-gherkin-bdd.md) +11. [Utilities](documentation/11-utilities.md) -### 1. Write tests with `describe` and `it` +## Unit Tests ```gleam -// test/my_app_test.gleam -import dream_test/unit.{describe, it, to_test_cases} -import dream_test/runner.{exit_on_failure, run_all} -import dream_test/reporter/bdd.{report} -import dream_test/assertions/should.{should, equal, or_fail_with} -import gleam/io +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, it} import gleam/string pub fn tests() { @@ -121,847 +105,115 @@ pub fn tests() { it("trims whitespace", fn() { " hello " |> string.trim() - |> should() - |> equal("hello") + |> should + |> be_equal("hello") |> or_fail_with("Should remove surrounding whitespace") }), - it("finds substrings", fn() { - "hello world" - |> string.contains("world") - |> should() - |> equal(True) - |> or_fail_with("Should find 'world' in string") - }), ]) } pub fn main() { - to_test_cases("my_app_test", tests()) - |> run_all() - |> report(io.print) - |> exit_on_failure() -} -``` - -🧪 [Tested source](examples/snippets/test/quick_start.gleam) - -### 2. Run with gleam test - -```sh -gleam test -``` - -### 3. See readable output - -``` -String utilities - ✓ trims whitespace - ✓ finds substrings - -Summary: 2 run, 0 failed, 2 passed in 1ms -``` - ---- - -## The Assertion Pattern - -Every assertion follows the same pattern: - -```gleam -value |> should() |> matcher() |> or_fail_with("message") -``` - -### Chaining matchers - -Matchers can be chained. Each one passes its unwrapped value to the next: - -```gleam -// Unwrap Some, then check the value -Some(42) -|> should() -|> be_some() -|> equal(42) -|> or_fail_with("Should contain 42") - -// Unwrap Ok, then check the value -Ok("success") -|> should() -|> be_ok() -|> equal("success") -|> or_fail_with("Should be Ok with 'success'") -``` - -🧪 [Tested source](examples/snippets/test/chaining.gleam) - -### Available matchers - -| Category | Matchers | -| --------------- | ------------------------------------------------------------------------------------------- | -| **Equality** | `equal`, `not_equal` | -| **Boolean** | `be_true`, `be_false` | -| **Option** | `be_some`, `be_none` | -| **Result** | `be_ok`, `be_error` | -| **Collections** | `contain`, `not_contain`, `have_length`, `be_empty` | -| **Comparison** | `be_greater_than`, `be_less_than`, `be_at_least`, `be_at_most`, `be_between`, `be_in_range` | -| **String** | `start_with`, `end_with`, `contain_string` | -| **Snapshot** | `match_snapshot`, `match_snapshot_inspect` | - -### Custom matchers - -Create your own matchers by working with `MatchResult(a)`. A matcher receives a result, checks if it already failed (propagate), or validates the value: - -```gleam -import dream_test/types.{ - type MatchResult, AssertionFailure, CustomMatcherFailure, MatchFailed, MatchOk, -} -import gleam/option.{Some} - -pub fn be_even(result: MatchResult(Int)) -> MatchResult(Int) { - case result { - // Propagate existing failures - MatchFailed(failure) -> MatchFailed(failure) - // Check our condition - MatchOk(value) -> case value % 2 == 0 { - True -> MatchOk(value) - False -> MatchFailed(AssertionFailure( - operator: "be_even", - message: "", - payload: Some(CustomMatcherFailure( - actual: int.to_string(value), - description: "expected an even number", - )), - )) - } - } + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() } ``` -Use it like any built-in matcher: - -```gleam -4 -|> should() -|> be_even() -|> or_fail_with("Should be even") -``` - -🧪 [Tested source](examples/snippets/test/custom_matchers.gleam) - -### Explicit success and failure - -When you need to explicitly succeed or fail in conditional branches: - -```gleam -import dream_test/assertions/should.{fail_with, succeed} - -case result { - Ok(_) -> succeed() - Error(_) -> fail_with("Should have succeeded") -} -``` - -🧪 [Tested source](examples/snippets/test/explicit_failures.gleam) - -### Skipping tests - -Use `skip` instead of `it` to temporarily disable a test: - -```gleam -import dream_test/unit.{describe, it, skip} - -describe("Feature", [ - it("works correctly", fn() { ... }), - skip("not implemented yet", fn() { ... }), // Skipped - it("handles edge cases", fn() { ... }), -]) -``` - -``` -Feature - ✓ works correctly - - not implemented yet - ✓ handles edge cases - -Summary: 3 run, 0 failed, 2 passed, 1 skipped -``` - -The test body is preserved but not executed—just change `skip` back to `it` when ready. - -🧪 [Tested source](examples/snippets/test/skipping_tests.gleam) - ---- - -## Lifecycle Hooks - -Setup and teardown logic for your tests. Dream_test supports four lifecycle hooks -that let you run code before and after tests. - -```gleam -import dream_test/unit.{describe, it, before_each, after_each, before_all, after_all} -import dream_test/assertions/should.{succeed} - -describe("Database tests", [ - before_all(fn() { - start_database() - succeed() - }), - - before_each(fn() { - begin_transaction() - succeed() - }), - - it("creates a user", fn() { ... }), - it("deletes a user", fn() { ... }), - - after_each(fn() { - rollback_transaction() - succeed() - }), - - after_all(fn() { - stop_database() - succeed() - }), -]) -``` - -🧪 [Tested source](examples/snippets/test/lifecycle_hooks.gleam) - -### Hook Types - -| Hook | Runs | Use case | -| ------------- | --------------------------------- | --------------------------------- | -| `before_all` | Once before all tests in group | Start services, create temp files | -| `before_each` | Before each test | Reset state, begin transaction | -| `after_each` | After each test (even on failure) | Rollback, cleanup temp data | -| `after_all` | Once after all tests in group | Stop services, remove temp files | - -### Two Execution Modes - -Choose the mode based on which hooks you need: - -| Mode | Function | Hooks supported | -| ----- | ----------------------------- | --------------------------- | -| Flat | `to_test_cases` → `run_all` | `before_each`, `after_each` | -| Suite | `to_test_suite` → `run_suite` | All four hooks | - -**Flat mode** — simpler, faster; use when you only need per-test setup: - -```gleam -import dream_test/unit.{describe, it, before_each, to_test_cases} -import dream_test/runner.{run_all} - -to_test_cases("my_test", tests()) -|> run_all() -|> report(io.print) -``` - -**Suite mode** — preserves group structure; use when you need once-per-group setup: - -```gleam -import dream_test/unit.{describe, it, before_all, after_all, to_test_suite} -import dream_test/runner.{run_suite} - -to_test_suite("my_test", tests()) -|> run_suite() -|> report(io.print) -``` - -🧪 [Tested source](examples/snippets/test/execution_modes.gleam) - -### Hook Inheritance - -Nested `describe` blocks inherit parent hooks. Hooks run outer-to-inner for -setup, inner-to-outer for teardown: - -```gleam -describe("Outer", [ - before_each(fn() { - io.println("1. outer setup") - succeed() - }), - after_each(fn() { - io.println("4. outer teardown") - succeed() - }), - describe("Inner", [ - before_each(fn() { - io.println("2. inner setup") - succeed() - }), - after_each(fn() { - io.println("3. inner teardown") - succeed() - }), - it("test", fn() { - io.println("(test)") - succeed() - }), - ]), -]) -// Output: 1. outer setup → 2. inner setup → (test) → 3. inner teardown → 4. outer teardown -``` - -🧪 [Tested source](examples/snippets/test/hook_inheritance.gleam) - -### Hook Failure Behavior - -If a hook fails, Dream Test handles it gracefully: - -| Failure in | Result | -| ------------- | ------------------------------------------------- | -| `before_all` | All tests in group marked `SetupFailed`, skipped | -| `before_each` | That test marked `SetupFailed`, skipped | -| `after_each` | Test result preserved; hook failure recorded | -| `after_all` | Hook failure recorded; all test results preserved | - -```gleam -describe("Handles failures", [ - before_all(fn() { - case connect_to_database() { - Ok(_) -> succeed() - Error(e) -> fail_with("Database connection failed: " <> e) - } - }), - // If before_all fails, these tests are marked SetupFailed (not run) - it("test1", fn() { succeed() }), - it("test2", fn() { succeed() }), -]) -``` - -🧪 [Tested source](examples/snippets/test/hook_failure.gleam) - ---- - -## Snapshot Testing - -Snapshot tests compare output against stored "golden" files. On first run, the snapshot is created automatically. On subsequent runs, any difference is a failure. - -```gleam -import dream_test/assertions/should.{should, match_snapshot, or_fail_with} - -it("renders user profile", fn() { - render_profile(user) - |> should() - |> match_snapshot("./test/snapshots/user_profile.snap") - |> or_fail_with("Profile should match snapshot") -}) -``` - -| Scenario | Behavior | -| ---------------- | --------------------------- | -| Snapshot missing | Creates it, test **passes** | -| Snapshot matches | Test **passes** | -| Snapshot differs | Test **fails** with diff | - -**Updating snapshots** — delete the file and re-run the test: - -```sh -rm ./test/snapshots/user_profile.snap -gleam test -``` - -**Testing non-strings** — use `match_snapshot_inspect` for complex data: - -```gleam -build_config() -|> should() -|> match_snapshot_inspect("./test/snapshots/config.snap") -|> or_fail_with("Config should match snapshot") -``` - -This serializes values using `string.inspect`, so you can snapshot records, lists, tuples, etc. +🧪 [Tested source](examples/snippets/test/snippets/unit/quick_start.gleam) · 📖 [Guide](documentation/02-quick-start.md) -**Clearing snapshots programmatically:** - -```gleam -import dream_test/matchers/snapshot +## Gherkin Integration Tests -// Clear one snapshot -let _ = snapshot.clear_snapshot("./test/snapshots/old.snap") - -// Clear all .snap files in a directory -let _ = snapshot.clear_snapshots_in_directory("./test/snapshots") +```gherkin +Feature: Shopping Cart + Scenario: Adding items + Given I have 3 items in my cart + When I add 2 more items + Then I should have 5 items total ``` -🧪 [Tested source](examples/snippets/test/snapshot_testing.gleam) - ---- - -## Gherkin / BDD Testing - -Write behavior-driven tests using Cucumber-style Given/When/Then syntax. - -### Inline DSL - -Define features directly in Gleam—no `.feature` files needed: - ```gleam -import dream_test/assertions/should.{succeed} -import dream_test/gherkin/feature.{feature, scenario, given, when, then} -import dream_test/gherkin/steps.{type StepContext, get_int, new_registry, step} +import dream_test/gherkin/feature.{FeatureConfig, to_test_suite} +import dream_test/gherkin/parser +import dream_test/gherkin/steps.{type StepContext, get_int, step} import dream_test/gherkin/world.{get_or, put} -import dream_test/types.{type AssertionResult} +import dream_test/matchers.{be_equal, or_fail_with, should, succeed} +import gleam/result -fn step_have_items(context: StepContext) -> AssertionResult { +fn step_have_items(context: StepContext) { put(context.world, "cart", get_int(context.captures, 0) |> result.unwrap(0)) - succeed() + Ok(succeed()) } -fn step_add_items(context: StepContext) -> AssertionResult { +fn step_add_items(context: StepContext) { let current = get_or(context.world, "cart", 0) - let to_add = get_int(context.captures, 0) |> result.unwrap(0) - put(context.world, "cart", current + to_add) - succeed() + put(context.world, "cart", current + { get_int(context.captures, 0) |> result.unwrap(0) }) + Ok(succeed()) } -fn step_should_have(context: StepContext) -> AssertionResult { - let expected = get_int(context.captures, 0) |> result.unwrap(0) +fn step_verify_count(context: StepContext) { get_or(context.world, "cart", 0) - |> should() - |> equal(expected) + |> should + |> be_equal(get_int(context.captures, 0) |> result.unwrap(0)) |> or_fail_with("Cart count mismatch") } pub fn tests() { let steps = - new_registry() + steps.new() |> step("I have {int} items in my cart", step_have_items) |> step("I add {int} more items", step_add_items) - |> step("I should have {int} items total", step_should_have) - - feature("Shopping Cart", steps, [ - scenario("Adding items to cart", [ - given("I have 3 items in my cart"), - when("I add 2 more items"), - then("I should have 5 items total"), - ]), - ]) -} -``` + |> step("I should have {int} items total", step_verify_count) -``` -Feature: Shopping Cart - Scenario: Adding items to cart ✓ (3ms) - -1 scenario (1 passed) in 3ms -``` - -🧪 [Tested source](examples/snippets/test/gherkin_hero.gleam) - -### .feature File Support - -Parse standard Gherkin `.feature` files: - -```gherkin -# test/cart.feature -@shopping -Feature: Shopping Cart - As a customer I want to add items to my cart - - Background: - Given I have an empty cart - - @smoke - Scenario: Adding items - When I add 3 items - Then the cart should have 3 items -``` - -🧪 [Tested source](examples/snippets/test/cart.feature) - -```gleam -import dream_test/gherkin/feature.{FeatureConfig, to_test_suite} -import dream_test/gherkin/parser - -pub fn tests() { - let steps = new_registry() |> register_steps() - - // Parse the .feature file - let assert Ok(feature) = parser.parse_file("test/cart.feature") - - // Convert to TestSuite - let config = FeatureConfig(feature: feature, step_registry: steps) - to_test_suite("cart_test", config) + let assert Ok(feature) = parser.parse_file("test/shopping_cart.feature") + to_test_suite(FeatureConfig(feature: feature, step_registry: steps)) } ``` -🧪 [Tested source](examples/snippets/test/gherkin_file.gleam) - -### Step Placeholders - -Capture values from step text using typed placeholders: +🧪 [Tested source](examples/snippets/test/snippets/gherkin/gherkin_file.gleam) · 📖 [Guide](documentation/10-gherkin-bdd.md) -| Placeholder | Matches | Example | -| ----------- | -------------------- | --------------- | -| `{int}` | Integers | `42`, `-5` | -| `{float}` | Decimals | `3.14`, `-0.5` | -| `{string}` | Quoted strings | `"hello world"` | -| `{word}` | Single unquoted word | `alice` | - -Numeric placeholders work with prefixes/suffixes—`${float}` matches `$19.99` and captures `19.99`: +## Gherkin Syntax in Gleam ```gleam -fn step_have_balance(context: StepContext) -> AssertionResult { - // {float} captures the numeric value (even with $ prefix) - let balance = get_float(context.captures, 0) |> result.unwrap(0.0) - put(context.world, "balance", balance) - succeed() -} - -pub fn register(registry: StepRegistry) -> StepRegistry { - registry - |> step("I have a balance of ${float}", step_have_balance) - |> step("I withdraw ${float}", step_withdraw) - |> step("my balance should be ${float}", step_balance_is) -} -``` - -🧪 [Tested source](examples/snippets/test/gherkin_step_handler.gleam) - -### Background & Tags - -Use `background` for shared setup and `with_tags` for filtering: - -```gleam -import dream_test/gherkin/feature.{ - background, feature_with_background, scenario, with_tags, -} +import dream_test/gherkin/feature.{feature, given, scenario, then, when} +import dream_test/gherkin/steps.{type StepContext, get_int, step} +import dream_test/gherkin/world.{get_or, put} +import dream_test/matchers.{be_equal, or_fail_with, should, succeed} +import gleam/result pub fn tests() { - let bg = background([given("I have an empty cart")]) + let steps = + steps.new() + |> step("I have {int} items", fn(ctx: StepContext) { + put(ctx.world, "cart", get_int(ctx.captures, 0) |> result.unwrap(0)) + Ok(succeed()) + }) + |> step("I add {int} more", fn(ctx: StepContext) { + let current = get_or(ctx.world, "cart", 0) + put(ctx.world, "cart", current + { get_int(ctx.captures, 0) |> result.unwrap(0) }) + Ok(succeed()) + }) + |> step("I should have {int} items", fn(ctx: StepContext) { + get_or(ctx.world, "cart", 0) + |> should |> be_equal(get_int(ctx.captures, 0) |> result.unwrap(0)) + |> or_fail_with("Cart count mismatch") + }) - feature_with_background("Shopping Cart", steps, bg, [ + feature("Shopping Cart", steps, [ scenario("Adding items", [ - when("I add 3 items"), - then("I should have 3 items"), - ]) - |> with_tags(["smoke"]), - scenario("Adding more items", [ - when("I add 2 items"), - and("I add 3 items"), + given("I have 3 items"), + when("I add 2 more"), then("I should have 5 items"), ]), ]) } ``` -🧪 [Tested source](examples/snippets/test/gherkin_feature.gleam) - -### Feature Discovery - -Load multiple `.feature` files with glob patterns: - -```gleam -import dream_test/gherkin/discover - -pub fn tests() { - let steps = new_registry() |> register_steps() - - // Discover and load all .feature files - discover.features("test/**/*.feature") - |> discover.with_registry(steps) - |> discover.to_suite("my_features") -} -``` - -🧪 [Tested source](examples/snippets/test/gherkin_discover.gleam) - -Supported glob patterns: - -| Pattern | Matches | -| -------------------- | ----------------------------------------- | -| `features/*.feature` | All `.feature` files in `features/` | -| `test/**/*.feature` | Recursive search in `test/` | -| `*.feature` | All `.feature` files in current directory | - -### Parallel Execution - -Gherkin scenarios run in parallel like all other tests. Each scenario gets its own isolated World state, but external resources (databases, servers) are shared. See [Shared Resource Warning](#shared-resource-warning) for guidance on handling shared state. - -### Full Example - -See [examples/shopping_cart](examples/shopping_cart) for a complete Gherkin BDD example with: - -- Inline DSL features ([test/features/shopping_cart.gleam](examples/shopping_cart/test/features/shopping_cart.gleam)) -- `.feature` file ([features/shopping_cart.feature](examples/shopping_cart/features/shopping_cart.feature)) -- Step definitions ([test/steps/](examples/shopping_cart/test/steps/)) -- Application code ([src/shopping_cart/](examples/shopping_cart/src/shopping_cart/)) - ---- - -## BEAM-Powered Test Isolation - -Every test runs in its own lightweight BEAM process—this is what makes Dream Test fast: - -| Feature | What it means | -| ---------------------- | ------------------------------------------------------------ | -| **Parallel execution** | Tests run concurrently; 207 tests complete in ~300ms | -| **Crash isolation** | A `panic` in one test doesn't affect others | -| **Timeout handling** | Slow tests get killed; suite keeps running | -| **Per-test timing** | See exactly how long each test takes | -| **Automatic cleanup** | Resources linked to the test process are freed automatically | - -```gleam -// This test crashes, but others keep running -it("handles edge case", fn() { - panic as "oops" // Other tests still execute and report -}) - -// This test hangs, but gets killed after timeout -it("fetches data", fn() { - infinite_loop() // Killed after 5 seconds (default) -}) -``` - -### Configuring execution - -```gleam -import dream_test/runner.{run_all_with_config, RunnerConfig} - -let config = RunnerConfig( - max_concurrency: 8, - default_timeout_ms: 10_000, -) - -let test_cases = to_test_cases("my_test", tests()) -run_all_with_config(config, test_cases) -|> report(io.print) -``` - -🧪 [Tested source](examples/snippets/test/runner_config.gleam) - -### Shared Resource Warning - -⚠️ **Tests share external resources.** Each test runs in its own BEAM process with isolated memory, but databases, servers, file systems, and APIs are shared. - -If your tests interact with shared resources, either: - -1. **Isolate resources per test** — unique database names, separate ports, temp directories -2. **Limit concurrency** — set `max_concurrency: 1` for sequential execution - -```gleam -// Sequential execution for tests with shared state -let config = RunnerConfig(max_concurrency: 1, default_timeout_ms: 30_000) -run_all_with_config(config, test_cases) -``` - -🧪 [Tested source](examples/snippets/test/sequential_execution.gleam) - ---- - -## Tagging, CI & Reporters - -### Tagging and filtering - -Add tags to tests for selective execution: - -```gleam -import dream_test/unit.{describe, it, with_tags} - -describe("Calculator", [ - it("adds numbers", fn() { ... }) - |> with_tags(["unit", "fast"]), - it("complex calculation", fn() { ... }) - |> with_tags(["integration", "slow"]), -]) -``` - -Filter which tests run via `RunnerConfig.test_filter`: - -```gleam -import dream_test/runner.{RunnerConfig, run_all_with_config} -import gleam/list - -let config = RunnerConfig( - max_concurrency: 4, - default_timeout_ms: 5000, - test_filter: Some(fn(c) { list.contains(c.tags, "unit") }), -) - -test_cases |> run_all_with_config(config) -``` - -The filter is a predicate function receiving `SingleTestConfig`, so you can filter by tags, name, or any other field. You control how to populate the filter—from environment variables, CLI args, or hardcoded for debugging. - -| Use case | Filter example | -| ------------------ | ------------------------------------------ | -| Run tagged "unit" | `fn(c) { list.contains(c.tags, "unit") }` | -| Exclude "slow" | `fn(c) { !list.contains(c.tags, "slow") }` | -| Match name pattern | `fn(c) { string.contains(c.name, "add") }` | -| Run all (default) | `None` | - -For Gherkin scenarios, use `dream_test/gherkin/feature.with_tags` instead. - -### CI integration - -Use `exit_on_failure` to ensure your CI pipeline fails when tests fail: - -```gleam -import dream_test/runner.{exit_on_failure, run_all} - -pub fn main() { - to_test_cases("my_test", tests()) - |> run_all() - |> report(io.print) - |> exit_on_failure() // Exits with code 1 if any tests failed -} -``` - -| Result | Exit Code | -| ------------------------------------------------ | --------- | -| All tests passed | 0 | -| Any test failed, timed out, or had setup failure | 1 | - -🧪 [Tested source](examples/snippets/test/quick_start.gleam) - -### JSON reporter - -Output test results as JSON for CI/CD integration, test aggregation, or tooling: - -```gleam -import dream_test/reporter/json -import dream_test/reporter/bdd.{report} - -pub fn main() { - to_test_cases("my_test", tests()) - |> run_all() - |> report(io.print) // Human-readable to stdout - |> json.report(write_to_file) // JSON to file - |> exit_on_failure() -} -``` - -The JSON output includes system info, timing, and detailed failure data: - -```json -{ - "version": "1.0", - "timestamp_ms": 1733151045123, - "duration_ms": 315, - "system": { "os": "darwin", "otp_version": "27", "gleam_version": "0.67.0" }, - "summary": { "total": 3, "passed": 2, "failed": 1, ... }, - "tests": [ - { - "name": "adds numbers", - "full_name": ["Calculator", "add", "adds numbers"], - "status": "passed", - "duration_ms": 2, - "kind": "unit", - "failures": [] - } - ] -} -``` - -🧪 [Tested source](examples/snippets/test/json_reporter.gleam) - ---- - -## How It Works - -Dream_test uses an explicit pipeline—no hidden globals, no magic test discovery. - -### Flat Mode (most common) - -``` -describe/it → to_test_cases → run_all → report - (DSL) (flatten) (execute) (format) -``` - -1. **Define** tests with `describe`/`it` — builds a test tree -2. **Convert** with `to_test_cases` — flattens to runnable cases -3. **Run** with `run_all` — executes in parallel with isolation -4. **Report** with your choice of formatter — outputs results - -### Suite Mode (for `before_all`/`after_all`) - -``` -describe/it → to_test_suite → run_suite → report - (DSL) (preserve) (execute) (format) -``` - -Suite mode preserves the group hierarchy so hooks can run at group boundaries. - -### Under the Hood - -Each test runs in its own BEAM process: - -```mermaid -flowchart TB - runner[Test Runner] - runner --> t1[Test 1] - runner --> t2[Test 2] - runner --> t3[Test 3] - runner --> t4[Test 4] - t1 --> collect[Collect Results] - t2 --> collect - t3 --> collect - t4 --> collect - collect --> report[Report] -``` - -Benefits: - -- A crashing test doesn't affect others -- Timeouts are enforced via process killing -- Resources linked to test processes are cleaned up automatically - ---- - -## Documentation - -| Document | Audience | -| --------------------------------------------- | --------------------------- | -| **[Hexdocs](https://hexdocs.pm/dream_test/)** | API reference with examples | -| **[CONTRIBUTING.md](CONTRIBUTING.md)** | How to contribute | -| **[STANDARDS.md](STANDARDS.md)** | Coding conventions | - ---- - -## Status - -**Stable** — v1.2 release. API is stable and ready for production use. - -| Feature | Status | -| --------------------------------- | --------- | -| Core DSL (`describe`/`it`/`skip`) | ✅ Stable | -| Lifecycle hooks | ✅ Stable | -| Assertions (`should.*`) | ✅ Stable | -| Snapshot testing | ✅ Stable | -| BDD Reporter | ✅ Stable | -| JSON Reporter | ✅ Stable | -| Parallel execution | ✅ Stable | -| Process isolation | ✅ Stable | -| Crash handling | ✅ Stable | -| Timeout handling | ✅ Stable | -| Per-test timing | ✅ Stable | -| CI exit codes | ✅ Stable | -| Polling helpers | ✅ Stable | -| Gherkin/Cucumber BDD | ✅ Stable | -| Tagging & filtering | ✅ Stable | - ---- - -## Contributing - -```sh -git clone https://github.com/TrustBound/dream_test -cd dream_test -make all # build, test, format -``` - -See [CONTRIBUTING.md](CONTRIBUTING.md) for development workflow and guidelines. - ---- - -## License - -MIT — see [LICENSE.md](LICENSE.md) +🧪 [Tested source](examples/shopping_cart/test/features/shopping_cart.gleam) · 📖 [Guide](documentation/10-gherkin-bdd.md) ---
- Built in Gleam, on the BEAM, by the Dream Team ❤️ + Built in Gleam, on the BEAM, by the Dream Team.
diff --git a/STANDARDS.md b/STANDARDS.md index e796167..dcb93cd 100644 --- a/STANDARDS.md +++ b/STANDARDS.md @@ -159,18 +159,18 @@ pub fn add_failure(context, failure) { | ❌ Avoid | ✅ Use instead | | ------------------- | ----------------------- | -| `dream_test/assert` | `dream_test/assertions` | +| `dream_test/assert` | `dream_test/matchers` | | `dream_test/type` | `dream_test/types` | **Module path conventions**: ``` dream_test/context # Per-test state -dream_test/assertions/should # Assertion API +dream_test/matchers # Matcher API dream_test/types # Shared data types dream_test/runner # Test execution dream_test/unit # describe/it DSL -dream_test/reporter/bdd # Output formatting +dream_test/reporters/bdd # Output formatting ``` --- @@ -185,7 +185,7 @@ dream_test/reporter/bdd # Output formatting ```gleam value -|> should.equal(expected) +|> should.be_equal(expected) |> or_fail_with("message") ``` @@ -193,8 +193,8 @@ value ```gleam value -|> should() -|> equal(expected) +|> should +|> be_equal(expected) |> or_fail_with("message") ``` @@ -204,17 +204,17 @@ value **Rule**: Use unqualified imports for DSL functions and piped helpers. Use qualified references when the namespace adds clarity. -**Why**: `should()`, `equal()`, `or_fail_with()` read better unqualified in pipes. Module prefixes add noise for frequently-used functions. +**Why**: `should`, `equal()`, `or_fail_with()` read better unqualified in pipes. Module prefixes add noise for frequently-used functions. ### Importing values ```gleam // ✅ Good: Unqualified for pipe-friendly DSL import dream_test/unit.{describe, it} -import dream_test/assertions/should.{should, equal, or_fail_with} +import dream_test/matchers.{should, be_equal, or_fail_with} // ❌ Bad: Redundant alias -import dream_test/assertions/should as should +import dream_test/matchers as should ``` ### Importing types diff --git a/documentation/01-installation.md b/documentation/01-installation.md new file mode 100644 index 0000000..f4cf1c4 --- /dev/null +++ b/documentation/01-installation.md @@ -0,0 +1,78 @@ +## Installation + +Installation is deliberately boring. The goal is not “get the fanciest setup,” it’s: + +- Get one test running locally +- Get one test running in CI +- Keep the runner **explicit**, so future changes don’t surprise you + +### Add the dependency (as dev-only) + +Add Dream Test as a **dev dependency** in your `gleam.toml` using `gleam add`: + +```shell +gleam add --dev dream_test +``` + +Why dev-dependency? Tests are a build-time concern. Keeping it in `[dev-dependencies]` makes it clear Dream Test is not part of your runtime application surface area. + +### Run tests locally (what command you should use) + +This repo (and the examples) use a Makefile. If you’re in this repo: + +```sh +make test +``` + +In your own project, you typically run: + +```sh +gleam test +``` + +**Note:** `gleam test` runs the test runner module at `test/{project_name}_test.gleam` (where `project_name` matches the `name` in your `gleam.toml`). This file must define `pub fn main()`. See the `gleeunit` docs for the standard runner shape. ([hexdocs.pm/gleeunit](https://hexdocs.pm/gleeunit/index.html)) + +### Required: a test runner module (`pub fn main()`) + +Dream Test requires an explicit runner module. This is a design choice: + +- **No hidden global state**: the runner is just Gleam code you can read. +- **No surprising defaults**: your code chooses concurrency, timeouts, reporters, and CI behavior. +- **Better “why did this fail?” debugging**: you can add logging or swap reporters without rewriting tests. + +Create a file under `test/` (for example, `test/{project_name}_test.gleam`) with a `pub fn main()`. + +You can use module discovery to avoid maintaining an import list. + +```gleam +import dream_test/discover.{from_path, to_suites} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner.{exit_on_failure, progress_reporter, results_reporters, run} + +pub fn main() { + let suites = + discover.new() + |> from_path("unit/**_test.gleam") + |> to_suites() + + runner.new(suites) + |> progress_reporter(progress.new()) + |> results_reporters([bdd.new()]) + |> exit_on_failure() + |> run() +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/runner/discovery_runner.gleam) + +### What you just setup + +- `discover` turns files into suites (a convenience). +- `runner` executes suites with isolation, timeouts, and configurable parallelism. +- `reporters` decide how results are rendered (human output vs JSON vs more). + +### What's Next? + +- Go back to [Documentation README](README.md) +- Continue to [Quick Start](02-quick-start.md) to write a first passing test and see the output you’ll build on. diff --git a/documentation/02-quick-start.md b/documentation/02-quick-start.md new file mode 100644 index 0000000..895e083 --- /dev/null +++ b/documentation/02-quick-start.md @@ -0,0 +1,108 @@ +## Quick Start + +When you’re adopting Dream Test, the first goal is simple: **write one passing test, run it, and see readable output**. + +Dream Test keeps the runner explicit on purpose. Instead of tests “just running” because a file exists or was imported, you build **suite values** and pass them to a tiny runner module where you decide: + +- What suites to run +- What output to produce +- How CI should behave on failure + +That explicitness is the source of most of Dream Test’s reliability: when a test run surprises you, there’s always a concrete `main()` you can inspect. + +If you’re looking for a mental model of “how does the runner find my tests?”: in Dream Test, **`main()` chooses what runs** by passing suites to the runner. You can list suites explicitly, or generate the list via discovery. + +### Choose your first runner style + +There are two good starting points: + +- **Discovery**: avoid maintaining an import list. +- **Explicit suites**: simple and easy to reason about. + +### Option A: the smallest useful setup (discovery) + +```gleam +import dream_test/discover.{from_path, to_suites} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner.{exit_on_failure, progress_reporter, results_reporters, run} + +pub fn main() { + let suites = + discover.new() + |> from_path("unit/**_test.gleam") + |> to_suites() + + runner.new(suites) + |> progress_reporter(progress.new()) + |> results_reporters([bdd.new()]) + |> exit_on_failure() + |> run() +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/runner/discovery_runner.gleam) + +What’s happening here (in English): + +- `from_path("unit/**_test.gleam")` finds test modules on disk. +- `to_suites()` turns them into suite values. +- The runner executes those suites and streams output via a reporter. + +### Option B: explicit suites (simple and easy to reason about) + +This is the most “teachable” version because nothing is implicit: `tests()` returns a suite, and `main()` runs it. + +```gleam +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, it} +import gleam/string.{contains, trim} + +pub fn tests() { + describe("String utilities", [ + it("trims whitespace", fn() { + let actual = " hello " |> trim() + + actual + |> should + |> be_equal("hello") + |> or_fail_with("Should remove surrounding whitespace") + }), + it("finds substrings", fn() { + let has_world = "hello world" |> contains("world") + + has_world + |> should + |> be_equal(True) + |> or_fail_with("Should find 'world' in string") + }), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/unit/quick_start.gleam) + +### Why this shape? + +- **`tests()` is your suite**: it describes behavior. It should be boring to call and easy to reuse. +- **`main()` is policy**: it decides how you want output and how strict CI should be. +- **Assertions are pipes**: you start from a value, apply matchers, and end with a message you’ll be happy to see in logs. + +If you only copy one idea from Dream Test, copy this one: always end an assertion chain with `or_fail_with("...")`. That message becomes the breadcrumb you’ll use when debugging. + +### What's Next? + +- Go back to [Installation](01-installation.md) +- Go back to [Documentation README](README.md) +- Continue to [Writing unit tests](03-writing-tests.md) to get comfortable with `describe`, `it`, grouping, skipping, and tags. diff --git a/documentation/03-writing-tests.md b/documentation/03-writing-tests.md new file mode 100644 index 0000000..1a3f644 --- /dev/null +++ b/documentation/03-writing-tests.md @@ -0,0 +1,258 @@ +## Writing unit tests (`describe`, `it`, `group`, `skip`, tags) + +Most of the time, you want tests that read like a conversation with the code: + +- “Here’s the behavior I’m testing” (`describe`) +- “Here’s one concrete thing that should be true” (`it`) + +Dream Test’s unit DSL is built for that style, but there’s a deeper design goal behind the surface syntax: + +- **Suites are just values** you can build, pass around, and run explicitly. +- **Your test module stays ordinary Gleam** (no hidden discovery side-effects). +- **Failures should read well** (because tests are communication, not just verification). + +### `describe` + `it` (the core loop) + +```gleam +import dream_test/matchers.{ + be_equal, be_error, be_ok, or_fail_with, should, +} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, it} +import snippets.{add, divide} + +pub fn tests() { + describe("Calculator", [ + it("adds two numbers", fn() { + add(2, 3) + |> should + |> be_equal(5) + |> or_fail_with("2 + 3 should equal 5") + }), + it("handles division", fn() { + divide(10, 2) + |> should + |> be_ok() + |> be_equal(5) + |> or_fail_with("10 / 2 should equal 5") + }), + it("returns error for division by zero", fn() { + divide(1, 0) + |> should + |> be_error() + |> or_fail_with("Division by zero should error") + }), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/unit/hero.gleam) + +### How to think about `describe` and `it` + +Treat the test structure (nested describes/groups/tests) as documentation: + +- Use **`describe("Thing", [...])`** to name the unit of behavior you’re testing (a module, type, feature, capability). +- Use **short `it` names** that describe the outcome (“returns error for division by zero”), not the implementation (“calls divide with 0”). +- Keep `it` bodies small: arrange → act → assert. +- If setup gets noisy, prefer **named helpers** first. Reach for hooks when you truly need cross-cutting setup/teardown (see the lifecycle chapter). + +### `skip` (keep the test, don’t run it) + +Use `skip` when you want to keep the test structure and body around, but temporarily disable execution. + +```gleam +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, it, skip} +import snippets.{add} + +pub fn tests() { + describe("Skipping tests", [ + it("runs normally", fn() { + add(2, 3) + |> should + |> be_equal(5) + |> or_fail_with("2 + 3 should equal 5") + }), + skip("not implemented yet", fn() { + // This test is skipped - the body is preserved but not executed + add(100, 200) + |> should + |> be_equal(300) + |> or_fail_with("Should add large numbers") + }), + it("also runs normally", fn() { + add(0, 0) + |> should + |> be_equal(0) + |> or_fail_with("0 + 0 should equal 0") + }), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/unit/skipping_tests.gleam) + +Why `skip` exists (beyond “turn it off”): + +- It keeps intent close to the code (“we know this should exist, but it’s not ready”). +- It avoids deleting tests (which often deletes context and makes regressions easier). +- It’s explicit: you still see it in the suite structure and output. + +### `group` (structure inside a `describe`) + +Use `group` when you want nested structure inside a suite: a second level of narrative under a `describe`. + +The most common reason to use `group` is to scope hooks (setup/teardown) to a subset of tests. Even if you don’t use hooks, `group` can make long suites easier to skim. + +Here’s the minimal shape: + +```gleam +import dream_test/matchers.{succeed} +import dream_test/unit.{describe, group, it} + +pub fn tests() { + describe("Thing", [ + group("Case A", [ + it("does one thing", fn() { Ok(succeed()) }), + ]), + group("Case B", [ + it("does another thing", fn() { Ok(succeed()) }), + ]), + ]) +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/unit/grouping.gleam) + +You’ll also see `group` used in the hook inheritance example (outer hooks apply to inner groups): + +```gleam +import dream_test/matchers.{succeed} +import dream_test/unit.{after_each, before_each, describe, group, it} +import gleam/io + +pub fn tests() { + describe("Outer", [ + before_each(fn() { io.println("outer setup") Ok(Nil) }), + after_each(fn() { io.println("outer teardown") Ok(Nil) }), + group("Inner", [ + before_each(fn() { io.println("inner setup") Ok(Nil) }), + after_each(fn() { io.println("inner teardown") Ok(Nil) }), + it("test", fn() { Ok(succeed()) }), + ]), + ]) +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/hooks/hook_inheritance.gleam) + +### Tags (when you need a “slice” of a suite) + +Tags are lightweight labels you can attach to tests and groups. Use them to slice a run (“smoke”, “slow”, “integration”) or to filter/process results in your own tooling and CI. + +What you can tag in unit tests: + +- A **test** (an `it(...)` or `skip(...)` node) +- A **group** (a `group(...)` node) + +Tags on a group apply to all tests inside that group (including nested groups). + +Why tags exist: + +- They let you annotate intent (“slow”, “integration”, “smoke”) without encoding that in names. +- They’re structured data that tools and reporters can use without parsing strings. + +This repo also uses tags heavily in Gherkin specs (see the Gherkin guide), where tags live on scenarios/features. + +### Filtering by tag + +You can use tags to run only a subset of tests. + +```gleam +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/types.{AssertionOk} +import dream_test/unit.{describe, it, with_tags} +import gleam/list + +fn is_smoke(info: runner.TestInfo) -> Bool { + list.contains(info.tags, "smoke") +} + +pub fn main() { + let suite = + describe("Tagged tests", [ + it("smoke: fast", fn() { Ok(AssertionOk) }) |> with_tags(["smoke"]), + it("not smoke", fn() { Ok(AssertionOk) }), + ]) + + runner.new([suite]) + |> runner.filter_tests(is_smoke) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} +``` + +```gleam +import dream_test/matchers.{succeed} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, group, it, with_tags} + +pub fn tests() { + describe("Tagged tests", [ + // Tag a whole group (all tests inside inherit these tags) + group("integration", [ + it("slow path", fn() { Ok(succeed()) }), + it("another slow path", fn() { Ok(succeed()) }), + ]) + |> with_tags(["integration", "slow"]), + + // Or tag a single test + it("smoke", fn() { Ok(succeed()) }) + |> with_tags(["smoke"]), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/unit/tagging.gleam) + +### What's Next? + +- Go back to [Quick Start](02-quick-start.md) +- Go back to [Documentation README](README.md) +- Continue to [Context-aware unit tests](04-context-aware-tests.md) if your setup produces a value you want to pass into every test (DB handle, client, scenario state). diff --git a/documentation/04-context-aware-tests.md b/documentation/04-context-aware-tests.md new file mode 100644 index 0000000..d9f8b5a --- /dev/null +++ b/documentation/04-context-aware-tests.md @@ -0,0 +1,184 @@ +## Context-aware unit tests (`dream_test/unit_context`) + +`unit_context` is for the cases where you want to **pass a shared value into every test** (and have hooks transform it). + +If you’ve ever built a DB handle, HTTP client, fixture, or “scenario state” and wished you could thread it through tests cleanly, this is the tool. + +Use `dream_test/unit_context` when you want hooks and tests to operate on a shared, strongly-typed **context value** that you control. + +### `unit` vs `unit_context`: same DSL names, different data flow + +`dream_test/unit_context` intentionally mirrors the `dream_test/unit` DSL. You’ll see the **same function names** (`describe`, `it`, `group`, hooks like `before_each`, etc.) so you don’t have to learn a second vocabulary. + +What changes is the **shape of the suite**: + +- In `unit`, `describe("name", [...])` builds a suite where `it("name", fn() { ... })` has no context parameter. +- In `unit_context`, `describe("name", seed, [...])` builds a suite where `it("name", fn(context) { ... })` receives the current context value. + +In practice, “switching” between them is mostly swapping the import: + +```gleam +import dream_test/unit.{describe, it} +// vs +import dream_test/unit_context.{describe, it} +``` + +Tip: avoid importing _both_ sets of unqualified names in the same module—`describe`/`it` would collide. Pick one DSL per test module. + +This is the right tool when: + +- Your setup produces values you want to pass into the test body (DB handles, fixtures, clients). +- You want to model “state” explicitly and type-safely (instead of storing it in globals or rebuilding it in every test). +- You want hooks to _transform_ the context for each test. + +If you don’t need an explicit context, prefer `dream_test/unit` — it’s simpler. + +### The idea: context flows through the suite + +- You give `describe` an initial `seed` value. +- `before_all` / `before_each` can transform that context. +- Each `it` receives the current context. + +### Why the seed exists + +The `seed` might look redundant at first (“why not let `before_all` create the context?”), but it’s doing an important job: it makes the **context type known when the suite is built**, before any tests run. + +That matters in Gleam because: + +- The test structure is built as ordinary data, and its parts (`before_each`, `it`, etc.) are typed with a concrete `context` type. +- Hooks run at test time, but the compiler needs the context type at compile time so `it("...", fn(context) { ... })` is type-checked correctly. +- Practically: it keeps context-aware suites simple and predictable—no special “first hook defines the context” rule, and no loss of type-safety. + +This is the “make dependencies explicit” version of hooks: instead of setup functions writing to global state (or relying on process dictionaries), the setup returns a value, and that value is passed into the test body. + +### A minimal example: counter context + +```gleam +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit_context.{before_each, describe, it} + +pub type Ctx { + Ctx(counter: Int) +} + +fn increment(ctx: Ctx) { + Ok(Ctx(counter: ctx.counter + 1)) +} + +pub fn suite() { + describe("Context-aware suite", Ctx(counter: 0), [ + before_each(increment), + it("receives the updated context", fn(ctx: Ctx) { + ctx.counter + |> should + |> be_equal(1) + |> or_fail_with("expected counter to be 1 after before_each") + }), + // Hook can be repeated; each applies to subsequent tests. + before_each(increment), + it("sees hook effects for subsequent tests", fn(ctx: Ctx) { + ctx.counter + |> should + |> be_equal(2) + |> or_fail_with("expected counter to be 2 after two before_each hooks") + }), + ]) +} + +pub fn main() { + runner.new([suite()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/hooks/context_aware_tests.gleam) + +### Grouping + hook scoping (`group`) + +`unit_context.group` is the same idea as `unit.group`: it lets you nest structure, and it scopes hooks. + +- Hooks declared in an **outer scope** apply inside nested groups. +- Hooks declared in an **inner group** apply only to tests in that group. + +```gleam +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit_context.{before_each, describe, group, it} + +pub type Ctx { + Ctx(counter: Int) +} + +fn inc(ctx: Ctx) { + Ok(Ctx(counter: ctx.counter + 1)) +} + +pub fn suite() { + describe("Context-aware grouping", Ctx(counter: 0), [ + // This outer hook applies everywhere under this describe, including groups. + before_each(inc), + + group("inner group", [ + // This hook only applies to tests inside this group. + before_each(inc), + + it("sees both outer + inner hooks", fn(ctx: Ctx) { + ctx.counter + |> should + |> be_equal(2) + |> or_fail_with("expected counter to be 2 (outer + inner before_each)") + }), + ]), + + it("sees only outer hook", fn(ctx: Ctx) { + ctx.counter + |> should + |> be_equal(1) + |> or_fail_with("expected counter to be 1 (outer before_each only)") + }), + ]) +} + +pub fn main() { + runner.new([suite()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/hooks/context_aware_grouping.gleam) + +### Important Gleam detail: when type inference needs help + +In a context-aware test, you’ll often access record fields like `ctx.counter` or `context.world`. +Gleam can only allow record-field access when it knows the record type, so sometimes you need a minimal type hint: + +- `fn my_step(context: StepContext) { ... context.world ... }` + +That’s not “extra ceremony” — it’s the smallest annotation needed for record field access. + +### When to prefer plain `unit` instead + +`unit_context` is great when setup produces something you want to reuse. But it’s not “better” by default. + +Prefer `dream_test/unit` when: + +- Your setup is tiny and reads well inline. +- You don’t need hooks to transform shared state. +- The context would just become another “thing you have to understand” without providing leverage. + +### What's Next? + +- Go back to [Writing unit tests](03-writing-tests.md) +- Go back to [Documentation README](README.md) +- Continue to [Assertions & matchers](05-assertions-and-matchers.md) diff --git a/documentation/05-assertions-and-matchers.md b/documentation/05-assertions-and-matchers.md new file mode 100644 index 0000000..d9ef0b9 --- /dev/null +++ b/documentation/05-assertions-and-matchers.md @@ -0,0 +1,297 @@ +## Assertions & matchers (the `should` pipeline) + +If you’ve used Jest/RSpec style assertions before, this is the Dream Test equivalent — but pipe-first and composable. + +Dream Test assertions are designed around a single, composable pattern: + +```gleam +value +|> should +|> matcher(...) +|> or_fail_with("human-friendly message") +``` + +Read it as a pipeline (top-to-bottom): + +- Start from the value you’re checking. +- `should` starts an assertion chain. +- Each matcher either confirms something (“be_equal”) or unwraps something (“be_ok”, “be_some”). +- `or_fail_with(...)` attaches the message you’ll see when this fails. + +### Why this pattern? + +- **No macros, no hidden magic**: everything is ordinary Gleam code. +- **Composable**: matchers can unwrap values (like `Option`/`Result`) and pass the unwrapped value onward. +- **Consistent failures**: failures are structured values that reporters can format well. + +There’s also a human reason: + +- Assertions become part of your test’s narrative. A good pipeline reads like a sentence and fails with a message that tells you what matters. + +### Chaining matchers (unwrap + assert) + +```gleam +import dream_test/matchers.{be_equal, be_ok, be_some, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, it} +import gleam/option.{Some} + +pub fn tests() { + describe("Chaining matchers", [ + // Unwrap Some, then check the value + it("unwraps Option", fn() { + Some(42) + |> should + |> be_some() + |> be_equal(42) + |> or_fail_with("Should contain 42") + }), + // Unwrap Ok, then check the value + it("unwraps Result", fn() { + Ok("success") + |> should + |> be_ok() + |> be_equal("success") + |> or_fail_with("Should be Ok with 'success'") + }), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/unit/chaining.gleam) + +### Built-in matcher catalogue (practical examples) + +```gleam +import dream_test/matchers.{ + be_between, be_equal, be_false, be_ok, be_some, be_true, contain, + contain_string, have_length, match_regex, or_fail_with, should, +} +import dream_test/unit.{describe, it} +import gleam/option.{Some} + +pub fn tests() { + describe("Built-in matchers", [ + it("boolean: be_true", fn() { + True + |> should + |> be_true() + |> or_fail_with("expected True") + }), + + it("boolean: be_false", fn() { + False + |> should + |> be_false() + |> or_fail_with("expected False") + }), + + it("option: be_some + equal", fn() { + Some(42) + |> should + |> be_some() + |> be_equal(42) + |> or_fail_with("expected Some(42)") + }), + + it("result: be_ok + equal", fn() { + Ok("hello") + |> should + |> be_ok() + |> be_equal("hello") + |> or_fail_with("expected Ok(\"hello\")") + }), + + it("collection: have_length", fn() { + [1, 2, 3] + |> should + |> have_length(3) + |> or_fail_with("expected list length 3") + }), + + it("collection: contain", fn() { + [1, 2, 3] + |> should + |> contain(2) + |> or_fail_with("expected list to contain 2") + }), + + it("comparison: be_between", fn() { + 5 + |> should + |> be_between(1, 10) + |> or_fail_with("expected 5 to be between 1 and 10") + }), + + it("string: contain_string", fn() { + "hello world" + |> should + |> contain_string("world") + |> or_fail_with("expected substring match") + }), + + it("string: match_regex", fn() { + "user-123" + |> should + |> match_regex("^user-\\d+$") + |> or_fail_with("expected an id like user-123") + }), + ]) +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/matchers/builtin_matchers.gleam) + +### Built-in matchers (table) + +All built-in matchers are re-exported from `dream_test/matchers` and are designed to be used in the `should |> matcher(...) |> or_fail_with(...)` pipeline. + +| Category | Matcher | What it asserts / does | +| ---------------------- | ------------------------------ | -------------------------------------------------------------------- | +| **Equality** | `be_equal(expected)` | Structural equality (`==`). | +| **Equality** | `not_equal(unexpected)` | Structural inequality (`!=`). | +| **Boolean** | `be_true()` | Value is `True`. | +| **Boolean** | `be_false()` | Value is `False`. | +| **Option** | `be_some()` | Value is `Some(_)` and **unwraps** to the inner value for chaining. | +| **Option** | `be_none()` | Value is `None`. | +| **Result** | `be_ok()` | Value is `Ok(_)` and **unwraps** to the `Ok` value for chaining. | +| **Result** | `be_error()` | Value is `Error(_)` and **unwraps** to the error value for chaining. | +| **Collections (List)** | `contain(item)` | List contains `item`. | +| **Collections (List)** | `not_contain(item)` | List does not contain `item`. | +| **Collections (List)** | `have_length(n)` | List length is exactly `n`. | +| **Collections (List)** | `be_empty()` | List is empty (`[]`). | +| **Comparison (Int)** | `be_greater_than(n)` | Value is `> n`. | +| **Comparison (Int)** | `be_less_than(n)` | Value is `< n`. | +| **Comparison (Int)** | `be_at_least(n)` | Value is `>= n`. | +| **Comparison (Int)** | `be_at_most(n)` | Value is `<= n`. | +| **Comparison (Int)** | `be_between(min, max)` | Value is strictly between: `min < value < max`. | +| **Comparison (Int)** | `be_in_range(min, max)` | Value is in inclusive range: `min <= value <= max`. | +| **Comparison (Float)** | `be_greater_than_float(n)` | Value is `> n`. | +| **Comparison (Float)** | `be_less_than_float(n)` | Value is `< n`. | +| **String** | `start_with(prefix)` | String starts with `prefix`. | +| **String** | `end_with(suffix)` | String ends with `suffix`. | +| **String** | `contain_string(substring)` | String contains `substring`. | +| **String** | `match_regex(pattern)` | String matches a regular expression (`pattern`) anywhere within it. | +| **Snapshot** | `match_snapshot(path)` | Compares a `String` to a snapshot file (creates it on first run). | +| **Snapshot** | `match_snapshot_inspect(path)` | Snapshot testing for any value via `string.inspect` serialization. | + +### Writing custom matchers (the matcher pattern) + +Dream Test doesn’t require a special “custom matcher API.” Built-in and custom matchers follow the same simple pattern: + +- A matcher is a function that **takes a `MatchResult(a)`** and **returns a `MatchResult(b)`**. +- If the incoming result is already a failure, the matcher should **propagate it unchanged**. +- Otherwise, it inspects the value and returns either `MatchOk(value)` or `MatchFailed(AssertionFailure(...))`. + +Here’s a minimal custom matcher that checks “even number”: + +```gleam +import dream_test/types.{ + AssertionFailure, CustomMatcherFailure, MatchFailed, MatchOk, +} +import gleam/int +import gleam/option.{Some} + +pub fn be_even(result) { + case result { + // If already failed, propagate the failure + MatchFailed(failure) -> MatchFailed(failure) + // Otherwise, check our condition + MatchOk(value) -> check_even(value) + } +} + +fn check_even(value) { + case value % 2 == 0 { + True -> MatchOk(value) + False -> + MatchFailed(AssertionFailure( + operator: "be_even", + message: "", + payload: Some(CustomMatcherFailure( + actual: int.to_string(value), + description: "expected an even number", + )), + )) + } +} +``` + +This example uses a structured `payload` (`CustomMatcherFailure`) so reporters can display richer diagnostics without forcing you to bake everything into a string message. + +🧪 [Tested source](../examples/snippets/test/snippets/matchers/custom_matchers.gleam) + +### Why matchers unwrap values + +The “unwrap then assert” flow is one of the biggest quality-of-life wins of the pipeline approach. + +Instead of: + +- Pattern matching in every test +- Copy/pasting error handling +- Producing unclear failures (“expected Ok(_) but got Error(_)”) without context + +…you can write the story you mean: “this should be Ok, and the value should equal X.” + +### Explicit success/failure (when branching is unavoidable) + +Sometimes you need a conditional check that isn’t a good fit for the normal matcher pipeline. +Use `succeed()` and `fail_with("...")` to keep the return type consistent. + +```gleam +import dream_test/matchers.{fail_with, succeed} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, it} +import snippets.{divide} + +pub fn tests() { + describe("Explicit failures", [ + it("succeeds explicitly when division works", fn() { + case divide(10, 2) { + Ok(_) -> Ok(succeed()) + Error(_) -> Ok(fail_with("Should have succeeded")) + } + }), + it("fails explicitly when expecting an error", fn() { + case divide(10, 0) { + Ok(_) -> Ok(fail_with("Should have returned an error")) + Error(_) -> Ok(succeed()) + } + }), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/unit/explicit_failures.gleam) + +### Common pitfalls (and how to avoid them) + +- **Forgetting `or_fail_with(...)`**: without it, failures tend to be harder to interpret in CI logs. Treat it like part of the assertion, not an optional extra. +- **Asserting too much in one chain**: long chains can hide which step mattered. Split into smaller checks when it improves clarity. +- **Using snapshots where equality is clearer**: if a value is tiny, prefer `be_equal(...)` over snapshot matchers (see the snapshot chapter for the tradeoff). + +### What's Next? + +- Go back to [Context-aware unit tests](04-context-aware-tests.md) +- Go back to [Documentation README](README.md) +- Continue to [Lifecycle hooks](06-lifecycle-hooks.md) diff --git a/documentation/06-lifecycle-hooks.md b/documentation/06-lifecycle-hooks.md new file mode 100644 index 0000000..6f7bd6c --- /dev/null +++ b/documentation/06-lifecycle-hooks.md @@ -0,0 +1,302 @@ +## Lifecycle hooks (`before_all`, `before_each`, `after_each`, `after_all`) + +Hooks are a power tool: they remove repetition, but they can also hide the story of a test if you overuse them. + +The goal of Dream Test’s hook design is to keep hooks **predictable and debuggable**, especially under parallel execution. + +### Mental model + +Hooks are part of the nested test structure that the runner executes around tests: + +- Setup flows **enclosing scope → nested scope** +- Teardown flows **nested scope → enclosing scope** +- If a setup hook fails, Dream Test fails the affected tests **without running the test body** + +Hooks let you run setup/teardown logic around tests while keeping the test bodies focused on behavior. + +#### Example: hook order (enclosing → nested, then nested → enclosing) + +This example prints the execution order so you can see the flow directly: + +```gleam +import dream_test/matchers.{succeed} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{after_each, before_each, describe, group, it} + +pub fn tests() { + describe("Enclosing", [ + before_each(fn() { + io.println("1. enclosing before_each") + Ok(Nil) + }), + after_each(fn() { + io.println("4. enclosing after_each") + Ok(Nil) + }), + group("Nested", [ + before_each(fn() { + io.println("2. nested before_each") + Ok(Nil) + }), + after_each(fn() { + io.println("3. nested after_each") + Ok(Nil) + }), + it("test body runs here", fn() { + io.println("(test)") + Ok(succeed()) + }), + ]), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} +``` + +Expected print order: + +1. enclosing before_each +2. nested before_each + (test) +3. nested after_each +4. enclosing after_each + +#### Diagram: enclosing scope vs nested scope + +```mermaid +sequenceDiagram + participant Enclosing as Enclosing scope (surrounding describe/group) + participant Nested as Nested scope (nested group) + participant Test as Test body + + Note over Enclosing,Test: Setup (before_each) runs enclosing → nested + Enclosing->>Enclosing: before_each + Nested->>Nested: before_each + Test->>Test: run + + Note over Enclosing,Test: Teardown (after_each) runs nested → enclosing + Nested->>Nested: after_each + Enclosing->>Enclosing: after_each +``` + +#### Example: setup failure skips the test body + +If a `before_each` hook returns `Error("...")`, the test body does not run: + +```gleam +import dream_test/matchers.{succeed} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{before_each, describe, it} + +pub fn tests() { + describe("Setup failures", [ + before_each(fn() { Error("could not connect to database") }), + it("will not run", fn() { + // This won't execute. + io.println("nope") + Ok(succeed()) + }), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} +``` + +### When to use hooks + +- **Use hooks** for repetitive setup/cleanup (opening DB connections, starting servers, creating temp directories). +- **Avoid hooks** when they hide important context. Prefer explicit setup in the test body for small cases. + +Two practical rules of thumb: + +- If the setup is essential to understanding the assertion, consider keeping it in the test body. +- If the setup is infrastructure (“start server”, “create temp directory”), hooks usually make things clearer. + +### The four hooks + +- `before_all`: runs once before any tests in the group +- `before_each`: runs before each test in the group +- `after_each`: runs after each test in the group (even if the test fails) +- `after_all`: runs once after all tests in the group + +### Basic lifecycle example + +```gleam +import dream_test/matchers.{be_empty, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{ + after_all, after_each, before_all, before_each, describe, it, +} + +pub fn tests() { + describe("Database tests", [ + before_all(fn() { + // Start database once for all tests + start_database() + }), + before_each(fn() { + // Begin transaction before each test + begin_transaction() + }), + it("creates a record", fn() { + [] + |> should + |> be_empty() + |> or_fail_with("Placeholder test") + }), + it("queries records", fn() { + [] + |> should + |> be_empty() + |> or_fail_with("Placeholder test") + }), + after_each(fn() { + // Rollback transaction after each test + rollback_transaction() + }), + after_all(fn() { + // Stop database after all tests + stop_database() + }), + ]) +} + +fn start_database() { + Ok(Nil) +} + +fn stop_database() { + Ok(Nil) +} + +fn begin_transaction() { + Ok(Nil) +} + +fn rollback_transaction() { + Ok(Nil) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/hooks/lifecycle_hooks.gleam) + +### Hook inheritance (nested groups) + +Nested groups inherit hooks. Setup runs **enclosing → nested**, teardown runs **nested → enclosing**. + +```gleam +import dream_test/matchers.{succeed} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{after_each, before_each, describe, group, it} + +pub fn tests() { + describe("Outer", [ + before_each(fn() { + io.println("1. outer setup") + Ok(Nil) + }), + after_each(fn() { + io.println("4. outer teardown") + Ok(Nil) + }), + group("Inner", [ + before_each(fn() { + io.println("2. inner setup") + Ok(Nil) + }), + after_each(fn() { + io.println("3. inner teardown") + Ok(Nil) + }), + it("test", fn() { + io.println("(test)") + Ok(succeed()) + }), + ]), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/hooks/hook_inheritance.gleam) + +### Hook failure behavior (important for reliability) + +If a hook fails, Dream Test records that failure and fails the affected tests. + +```gleam +import dream_test/matchers.{succeed} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{before_all, describe, it} + +pub fn tests() { + describe("Handles failures", [ + before_all(fn() { Error("Database connection failed") }), + // If before_all fails, these tests do not run, and they are reported as failed. + it("test1", fn() { Ok(succeed()) }), + it("test2", fn() { Ok(succeed()) }), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/hooks/hook_failure.gleam) + +### Hooks and parallelism (the source of most flaky tests) + +Dream Test runs tests in parallel by default (configurable). Hooks don’t change that: they run _around_ tests, but they don’t automatically serialize tests that share resources. + +If your hooks touch shared external state (ports, filesystem paths, database schemas), you have two options: + +- Make the resource usage isolated (unique temp dirs, unique ports, per-test DB schemas), or +- Run sequentially by setting the runner’s concurrency to 1, or run only the shared-resource suite(s) sequentially using `runner.add_suites_with_config` (see the runner chapter). + +### What's Next? + +- Go back to [Assertions & matchers](05-assertions-and-matchers.md) +- Go back to [Documentation README](README.md) +- Continue to [Runner & execution model](07-runner-and-execution.md) diff --git a/documentation/07-runner-and-execution.md b/documentation/07-runner-and-execution.md new file mode 100644 index 0000000..87b41f4 --- /dev/null +++ b/documentation/07-runner-and-execution.md @@ -0,0 +1,179 @@ +## Runner & execution model + +This chapter is about turning “a suite” into “a reliable test run.” + +Suites describe behavior; the runner decides execution policy. The runner exists so you can make those policies explicit instead of relying on defaults you can’t see. + +### Mental model + +- You control **how fast** tests run with `max_concurrency`. +- You control **how long** tests may run with `default_timeout_ms`. +- You control **CI behavior** with `exit_on_failure`. + +Dream Test is **suite-first**: + +- You define suites with `dream_test/unit` or `dream_test/unit_context` +- You run them with `dream_test/runner` + +(Under the hood: the runner uses the parallel executor, but most users never need to call it directly.) + +### Why Dream Test is explicit here + +Most testing pain shows up at the runner layer: + +- Flakiness due to shared resources + parallelism +- Hung tests that stall CI +- Output that is hard to interpret under concurrency + +Dream Test’s runner makes those constraints visible and configurable. + +### Configure parallelism + timeouts + +Use `max_concurrency` and `default_timeout_ms` to tune execution: + +- **Higher concurrency** speeds up independent tests. +- **Lower concurrency** is safer for tests that share external resources (DBs, ports, filesystem paths). + +```gleam +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("Runner config demo", [ + it("runs with custom config", fn() { + 1 + 1 + |> should + |> be_equal(2) + |> or_fail_with("Math works") + }), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.max_concurrency(8) + |> runner.default_timeout_ms(10_000) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/runner/runner_config.gleam) + +### Sequential execution (when shared resources matter) + +When tests share external state, you often want `max_concurrency(1)` to avoid flakiness. + +```gleam +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("Sequential tests", [ + it("first test", fn() { + // When tests share external resources, run them sequentially + 1 + 1 + |> should + |> be_equal(2) + |> or_fail_with("Math works") + }), + it("second test", fn() { + 2 + 2 + |> should + |> be_equal(4) + |> or_fail_with("Math still works") + }), + ]) +} + +pub fn main() { + // Sequential execution for tests with shared state + runner.new([tests()]) + |> runner.max_concurrency(1) + |> runner.default_timeout_ms(30_000) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/runner/sequential_execution.gleam) + +### Mixed execution policies (per-suite concurrency/timeout) + +If only *some* suites share external state (for example: database suites) you don’t need two separate runners. +Instead, run everything in one runner and apply an execution config override to the suites that need it. + +```gleam +import dream_test/parallel +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner + +pub fn main() { + let db_config = + parallel.ParallelConfig(max_concurrency: 1, default_timeout_ms: 60_000) + + runner.new([]) + |> runner.add_suites([unit_tests()]) + |> runner.add_suites_with_config(db_config, [db_tests()]) + |> runner.max_concurrency(8) + |> runner.default_timeout_ms(10_000) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/runner/suite_specific_config.gleam) + +### Choosing a concurrency number (practical guidance) + +- Start with the default. +- If you see flakes involving ports/files/DB state, either isolate those resources per test or set `max_concurrency(1)` for that run. +- If you have a large suite of pure unit tests (no external state), increasing concurrency often speeds up feedback noticeably. + +### Advanced: running the executor directly + +Most users should not call `dream_test/parallel` directly. It’s public so advanced tooling can embed the executor. + +```gleam +import dream_test/matchers.{have_length, or_fail_with, should, succeed} +import dream_test/parallel +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("Parallel executor", [ + it("can run a suite and return a list of results", fn() { + let suite = + describe("Suite", [ + it("a", fn() { Ok(succeed()) }), + it("b", fn() { Ok(succeed()) }), + ]) + + parallel.run_root_parallel(parallel.default_config(), suite) + |> should + |> have_length(2) + |> or_fail_with("expected two results") + }), + ]) +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/utils/parallel_direct.gleam) + +### What's Next? + +- Go back to [Lifecycle hooks](06-lifecycle-hooks.md) +- Go back to [Documentation README](README.md) +- Continue to [Reporters](08-reporters.md) diff --git a/documentation/08-reporters.md b/documentation/08-reporters.md new file mode 100644 index 0000000..66ec5c1 --- /dev/null +++ b/documentation/08-reporters.md @@ -0,0 +1,356 @@ +## Reporters (BDD, JSON, Progress, Gherkin) + +Reporters are the bridge between “test results” and “what someone sees.” + +This chapter helps you choose an output style for humans (local dev), for machines (CI/tooling), and for scenarios where you want both. + +### Mental model + +Dream Test reporting is split into two phases: + +- **Progress reporter (during the run)**: reacts to events in completion order and renders live progress. +- **Results reporters (end of run)**: print whole report blocks from the final, traversal-ordered results. + +This keeps output readable under parallel execution: + +- Progress stays responsive and keeps CI logs alive. +- Final reports are deterministic and easy to scan/tail. + +### Recommended default: progress + BDD + +This is the standard “human” output: live progress while tests run, then a full BDD report (with failures repeated near the end) and a summary. + +```gleam +import dream_test/matchers.{succeed} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("BDD reporter", [ + it("passes", fn() { Ok(succeed()) }), + it("also passes", fn() { Ok(succeed()) }), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/reporters/bdd_reporter.gleam) + +### JSON results reporter (end of run) + +Use JSON output for CI/CD integration and tooling (parsing, dashboards, artifact uploads). + +```gleam +import dream_test/matchers.{succeed} +import dream_test/reporters/json +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("JSON Reporter", [ + it("outputs JSON format", fn() { + // `json.new()` prints machine-readable JSON at the end of the run. + Ok(succeed()) + }), + it("includes test metadata", fn() { + // JSON output includes name, full_name, status, duration, tags + Ok(succeed()) + }), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([json.new()]) + |> runner.exit_on_failure() + |> runner.run() +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/reporters/json_reporter.gleam) + +### Progress reporter (during the run) + +Use progress output when you want compact logs, especially for large suites. + +```gleam +import dream_test/matchers.{succeed} +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("Progress reporter", [ + it("passes", fn() { Ok(succeed()) }), + it("also passes", fn() { Ok(succeed()) }), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.exit_on_failure() + |> runner.run() +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/reporters/progress_reporter.gleam) + +### Post-run formatting (render into a string) + +Post-run formatting is useful when you want to: + +- Save reports to disk +- Embed results into a larger tool +- Perform extra processing before output + +```gleam +import dream_test/matchers.{ + contain_string, or_fail_with, should, succeed, +} +import dream_test/reporters/bdd +import dream_test/runner +import dream_test/unit.{describe, it} + +fn example_suite() { + describe("Example Suite", [ + it("passes", fn() { Ok(succeed()) }), + ]) +} + +pub fn tests() { + describe("BDD formatting", [ + it("format returns a report string", fn() { + let results = runner.new([example_suite()]) |> runner.run() + let report = bdd.format(results) + + report + |> should + |> contain_string("Example Suite") + |> or_fail_with("Expected formatted report to include the suite name") + }), + ]) +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/reporters/bdd_formatting.gleam) + +### Custom reporters + +Dream Test currently supports **custom reporting** in two practical ways: + +- **Custom output routing**: decide where runner output goes (stdout, stderr, file, in-memory buffer). +- **Custom post-run reporters**: write your own formatter over `List(TestResult)` and print it after `runner.run()`. + +Note: the runner’s built-in “attach” points are intentionally small today: + +- `runner.progress_reporter(...)` currently accepts only `progress.ProgressReporter` +- `runner.results_reporters([...])` currently accepts the built-in `bdd`/`json` results reporters + +If you want “plug in an arbitrary reporter module” wired into the runner, you can still do it by building your own driver around `runner.run()` (post-run), or by implementing a custom executor/event loop (advanced). + +#### Custom output routing (capture output) + +Use `runner.output(...)` to route any reporter output into your own sinks. +This is useful for snapshots, embedding Dream Test in other tools, or writing reports to files. + +```gleam +import dream_test/matchers.{succeed} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, it} +import gleam/erlang/process as beam_process +import gleam/list +import gleam/otp/actor +import gleam/string + +pub type OutMsg { + Write(String) + GetAll(beam_process.Subject(List(String))) +} + +fn handle_out( + state: List(String), + msg: OutMsg, +) -> actor.Next(List(String), OutMsg) { + case msg { + Write(line) -> actor.continue([line, ..state]) + GetAll(reply) -> { + beam_process.send(reply, state) + actor.continue(state) + } + } +} + +fn start_out() -> beam_process.Subject(OutMsg) { + let assert Ok(started) = + actor.new([]) + |> actor.on_message(handle_out) + |> actor.start + started.data +} + +fn read_out(out: beam_process.Subject(OutMsg)) -> String { + actor.call(out, waiting: 1000, sending: GetAll) + |> list.reverse + |> string.concat +} + +pub fn main() { + let out = start_out() + let write = fn(s: String) { beam_process.send(out, Write(s)) } + let output = runner.Output(out: write, error: write) + + let _results = + runner.new([example_suite()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new() |> bdd.summary_only()]) + |> runner.output(output) + |> runner.run() + + let captured = read_out(out) + // Now you can snapshot/parse/write `captured` however you want. + let _ = captured + Nil +} + +fn example_suite() { + describe("Example Suite", [ + it("passes", fn() { Ok(succeed()) }), + ]) +} +``` + +#### Custom post-run reporter (format `List(TestResult)`) + +If you want a new report format (JUnit XML, GitHub annotations, your own table layout), +write a pure formatter over `List(TestResult)` and run it after `runner.run()`. + +```gleam +import dream_test/types.{ + type Status, type TestResult, Failed, Passed, Pending, SetupFailed, Skipped, + TimedOut, +} +import gleam/int +import gleam/list +import gleam/string + +pub fn render_my_report(results: List(TestResult)) -> String { + let total = list.length(results) + let failed = count(results, Failed) + let timed_out = count(results, TimedOut) + let setup_failed = count(results, SetupFailed) + let skipped = count(results, Skipped) + let pending = count(results, Pending) + let passed = total - failed - timed_out - setup_failed - skipped - pending + + string.concat([ + "MyReport: ", + int.to_string(total), + " total; ", + int.to_string(passed), + " passed; ", + int.to_string(failed + timed_out + setup_failed), + " failed\n", + ]) +} + +fn count(results: List(TestResult), wanted: Status) -> Int { + count_loop(results, wanted, 0) +} + +fn count_loop(results: List(TestResult), wanted: Status, n: Int) -> Int { + case results { + [] -> n + [r, ..rest] -> + case r.status == wanted { + True -> count_loop(rest, wanted, n + 1) + False -> count_loop(rest, wanted, n) + } + } +} +``` + +Then call it from your runner `main()`: + +```gleam +import dream_test/runner +import gleam/io + +pub fn main() { + let results = runner.new([tests()]) |> runner.run() + io.print(render_my_report(results)) +} +``` + +#### Driving the progress reporter manually (advanced) + +If you’re building a custom tool that already receives `ReporterEvent` values, you can reuse +the built-in progress renderer: + +- `progress.handle_event(progress.new(), event)` returns `Option(String)` +- You decide where to write it, and when (or whether) to print it + +This is intentionally **not** the common path; most users should attach progress via +`runner.progress_reporter(progress.new())`. + +### Gherkin reporter (post-run, Cucumber-style) + +If you’re using `dream_test/gherkin`, you can render results in Gherkin-friendly formatting. + +```gleam +import dream_test/matchers.{succeed} +import dream_test/gherkin/feature.{feature, given, scenario, then} +import dream_test/gherkin/steps.{step} +import dream_test/reporters/gherkin as gherkin_reporter +import dream_test/runner +import gleam/io + +fn step_ok(_context) { + Ok(succeed()) +} + +pub fn tests() { + let steps = steps.new() |> step("everything is fine", step_ok) + + feature("Gherkin Reporting", steps, [ + scenario("A passing scenario", [ + given("everything is fine"), + then("everything is fine"), + ]), + ]) +} + +pub fn main() { + let results = runner.new([tests()]) |> runner.run() + let _ = gherkin_reporter.report(results, io.print) + Nil +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/reporters/gherkin_reporter.gleam) + +### Choosing a reporter (a quick heuristic) + +- **Local dev**: start with BDD output. +- **Big suites / noisy logs**: progress output. +- **Tooling / CI integration**: JSON output (and/or post-run formatting to write files). +- **Behavior specs**: use the Gherkin reporter when your suites are authored via `dream_test/gherkin`. + +### What's Next? + +- Go back to [Runner & execution model](07-runner-and-execution.md) +- Go back to [Documentation README](README.md) +- Continue to [Snapshot testing](09-snapshot-testing.md) diff --git a/documentation/09-snapshot-testing.md b/documentation/09-snapshot-testing.md new file mode 100644 index 0000000..2763fa3 --- /dev/null +++ b/documentation/09-snapshot-testing.md @@ -0,0 +1,134 @@ +## Snapshot testing + +Snapshot tests are about turning “a big output blob” into a stable contract. + +They’re great when the output is too large or too structural to assert on directly, but they can also become noise if you snapshot things that change frequently. + +### Mental model + +A snapshot test compares current output against a stored “golden file”: + +- If the snapshot file is missing, Dream Test creates it. +- If it exists, Dream Test compares content and fails on a diff. + +Snapshot testing is for “this output should stay stable” assertions: + +- HTML rendering +- JSON output +- formatted reports +- pretty-printed data structures + +### When snapshot tests are a good fit + +- You want confidence that output didn’t change unexpectedly. +- The output is large/structured enough that writing a manual assertion would be noisy. + +The real “why” is readability: a snapshot test often makes the intent clearer than a dozen micro-assertions. + +### When to avoid snapshots + +- The output includes inherently unstable data (timestamps, random IDs) unless you normalize it. +- The output is so small that a direct `equal(...)` is clearer. + +If your snapshot fails every day for “expected reasons,” it’s no longer buying you safety—it’s training you to ignore diffs. + +### String snapshots + `inspect` snapshots + +```gleam +import dream_test/matchers.{ + be_equal, match_snapshot, match_snapshot_inspect, or_fail_with, should, +} +import dream_test/matchers/snapshot +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, group, it} +import gleam/int +import gleam/result +import gleam/string + +// Example: A function that renders a user profile as HTML +fn render_profile(name, age) { + string.concat([ + "
\n", + "

", + name, + "

\n", + "

Age: ", + int.to_string(age), + "

\n", + "
", + ]) +} + +// Example: A function that builds a configuration record +pub type Config { + Config(host: String, port: Int, debug: Bool) +} + +fn build_config() { + Config(host: "localhost", port: 8080, debug: True) +} + +pub fn tests() { + describe("Snapshot Testing", [ + group("match_snapshot", [ + it("renders user profile", fn() { + render_profile("Alice", 30) + |> should + |> match_snapshot("./test/snapshots/user_profile.snap") + |> or_fail_with("Profile should match snapshot") + }), + ]), + group("match_snapshot_inspect", [ + it("builds config correctly", fn() { + build_config() + |> should + |> match_snapshot_inspect("./test/snapshots/config.snap") + |> or_fail_with("Config should match snapshot") + }), + ]), + group("clearing snapshots", [ + it("can clear a single snapshot", fn() { + // Create a temporary snapshot + use _ <- result.try( + "temp content" + |> should + |> match_snapshot("./test/snapshots/temp.snap") + |> or_fail_with("Should create temp snapshot"), + ) + + // Clear it + let result = snapshot.clear_snapshot("./test/snapshots/temp.snap") + + result + |> should + |> be_equal(Ok(Nil)) + |> or_fail_with("Should successfully clear snapshot") + }), + ]), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/matchers/snapshots.gleam) + +### Practical workflow + +- **First run**: the snapshot file doesn’t exist → Dream Test creates it. +- **Review**: treat the snapshot like code. Make sure it matches what you intend to guarantee. +- **Future changes**: when it fails, decide whether the change is a regression (fix code) or an intentional update (update snapshot). + +### What's Next? + +- Go back to [Reporters](08-reporters.md) +- Go back to [Documentation README](README.md) +- Continue to [Gherkin / BDD testing](10-gherkin-bdd.md) diff --git a/documentation/10-gherkin-bdd.md b/documentation/10-gherkin-bdd.md new file mode 100644 index 0000000..840b7bf --- /dev/null +++ b/documentation/10-gherkin-bdd.md @@ -0,0 +1,373 @@ +## Gherkin / BDD testing + +### What is Gherkin? + +**Gherkin** is a lightweight, plain-text format for writing **behavior-driven development (BDD)** specs. It’s commonly associated with tools like Cucumber, and it’s designed to be readable by both engineers and non-engineers. + +At a high level, you describe behavior using a small vocabulary: + +- **Feature**: a capability you’re describing (a suite of scenarios) +- **Scenario**: one concrete example (a test) +- **Background**: steps that should run before every scenario (shared setup) +- **Given / When / Then**: a convention for structuring steps: + - **Given**: starting state / setup + - **When**: the action + - **Then**: the expected outcome + +In Dream Test, you write Gherkin either as standard `.feature` files or directly in Gleam, and you implement behavior by mapping step text to handler functions (a “step registry”). + +Unit tests are great for “input → output.” Gherkin is for “a sequence of behavior over time.” + +This chapter explains Dream Test’s Gherkin layer as an engineering tool: + +- It’s for scenarios with shared per-scenario state (“the world”) +- It’s for readable specs you can share with product/QA +- It’s for reusing step definitions across many scenarios + +### Mental model + +- A **scenario** is a test. +- Each scenario runs with its own `StepContext`: + - `context.world` is per-scenario state (isolated) + - `context.captures` holds placeholder captures from the step text +- A **step registry** maps text patterns to handler functions. + +Dream Test includes a Gherkin DSL that lets you write scenarios with **Given/When/Then** structure. + +### When to use Gherkin + +- You want tests that are readable by non-engineers (product, QA, support). +- You want to model behavior as scenarios with a shared “world” state. +- You want to reuse step definitions across many scenarios. + +If your tests are mostly “function input → output”, the unit DSL (`describe` / `it`) is usually simpler. + +### The core pieces + +- **Feature**: a suite of scenarios +- **Scenario**: a list of steps (Given/When/Then/And/But) +- **Step registry**: maps step patterns to handlers +- **World**: per-scenario state (isolated between scenarios) +- **Captures**: placeholder values parsed from step text + +### Step handlers and `StepContext` (the “context” you were missing) + +Each step handler receives a `StepContext` record. The two fields you use most often are: + +- `context.world`: a per-scenario key/value store (isolated between scenarios) +- `context.captures`: the placeholder captures extracted from the step text + +Important Gleam detail: when you access record fields like `context.world`, Gleam needs to know the record type. +That’s why the examples annotate `context: StepContext` (it’s the minimal type hint needed for record field access). + +### Inline Gherkin (the “hero” example) + +```gleam +import dream_test/matchers.{be_equal, or_fail_with, should, succeed} +import dream_test/gherkin/feature.{feature, given, scenario, then, when} +import dream_test/gherkin/steps.{type StepContext, get_int, step} +import dream_test/gherkin/world.{get_or, put} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import gleam/result + +fn step_have_items(context: StepContext) { + let count = get_int(context.captures, 0) |> result.unwrap(0) + put(context.world, "cart", count) + Ok(succeed()) +} + +fn step_add_items(context: StepContext) { + let current = get_or(context.world, "cart", 0) + let to_add = get_int(context.captures, 0) |> result.unwrap(0) + put(context.world, "cart", current + to_add) + Ok(succeed()) +} + +fn step_should_have(context: StepContext) { + let expected = get_int(context.captures, 0) |> result.unwrap(0) + get_or(context.world, "cart", 0) + |> should + |> be_equal(expected) + |> or_fail_with("Cart count mismatch") +} + +pub fn tests() { + let steps = + steps.new() + |> step("I have {int} items in my cart", step_have_items) + |> step("I add {int} more items", step_add_items) + |> step("I should have {int} items total", step_should_have) + + feature("Shopping Cart", steps, [ + scenario("Adding items to cart", [ + given("I have 3 items in my cart"), + when("I add 2 more items"), + then("I should have 5 items total"), + ]), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/gherkin/gherkin_hero.gleam) + +What to notice in this example: + +- The step registry is built once and shared by scenarios. +- `context.world` is per-scenario state, so scenarios don’t leak into each other. +- `context.captures` turns step text into typed values (via helpers like `get_int`). + +### Placeholders (`{int}`, `{float}`, `{string}`, `{word}`) + +Placeholders let you capture values from the step text into `context.captures`. +Typed helpers like `get_int` parse the capture into the right type. + +```gleam +import dream_test/matchers.{succeed} +import dream_test/gherkin/feature.{feature, given, scenario, then} +import dream_test/gherkin/steps.{ + type StepContext, get_float, get_int, get_string, get_word, step, +} +import dream_test/gherkin/world.{put} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import gleam/result + +// {int} captures integers +fn step_int(context: StepContext) { + let value = get_int(context.captures, 0) |> result.unwrap(0) + put(context.world, "int", value) + Ok(succeed()) +} + +// {float} captures decimals (works with $ prefix too) +fn step_float(context: StepContext) { + let value = get_float(context.captures, 0) |> result.unwrap(0.0) + put(context.world, "float", value) + Ok(succeed()) +} + +// {string} captures quoted strings +fn step_string(context: StepContext) { + let value = get_string(context.captures, 0) |> result.unwrap("") + put(context.world, "string", value) + Ok(succeed()) +} + +// {word} captures a single unquoted word +fn step_word(context: StepContext) { + let value = get_word(context.captures, 0) |> result.unwrap("") + put(context.world, "word", value) + Ok(succeed()) +} + +fn step_pass(_context) { + Ok(succeed()) +} + +pub fn tests() { + let steps = + steps.new() + |> step("I have {int} items", step_int) + |> step("the price is ${float}", step_float) + |> step("the message is {string}", step_string) + |> step("the user is {word}", step_word) + |> step("everything works", step_pass) + + feature("Placeholder Types", steps, [ + scenario("Using different placeholders", [ + given("I have 42 items"), + given("the price is $19.99"), + given("the message is \"hello world\""), + given("the user is alice"), + then("everything works"), + ]), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/gherkin/gherkin_placeholders.gleam) + +### `.feature` files (authoring + parsing) + +If you want tests authored in plain text (often by QA/product, or copied from tickets), use real `.feature` files. + +#### Example `.feature` file + +```gherkin +@shopping +Feature: Shopping Cart + As a customer I want to add items to my cart + + Background: + Given the server is running + + @smoke + Scenario: Adding items + Given the cart is empty + When I add 3 items + Then the cart should have 3 items + + Scenario: Adding multiple times + Given the cart is empty + When I add 2 items + And I add 3 items + Then the cart should have 5 items +``` + +🧪 [Tested source](../examples/snippets/test/cart.feature) + +#### Loading a `.feature` file and converting to a suite + +```gleam +import dream_test/matchers.{be_equal, or_fail_with, should, succeed} +import dream_test/gherkin/feature.{FeatureConfig, to_test_suite} +import dream_test/gherkin/parser +import dream_test/gherkin/steps.{type StepContext, get_int, step} +import dream_test/gherkin/world.{get_or, put} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import gleam/result + +fn step_empty_cart(context: StepContext) { + put(context.world, "cart", 0) + Ok(succeed()) +} + +fn step_server_running(context: StepContext) { + put(context.world, "server_running", True) + Ok(succeed()) +} + +fn step_add_items(context: StepContext) { + let current = get_or(context.world, "cart", 0) + let to_add = get_int(context.captures, 0) |> result.unwrap(0) + put(context.world, "cart", current + to_add) + Ok(succeed()) +} + +fn step_verify_count(context: StepContext) { + let expected = get_int(context.captures, 0) |> result.unwrap(0) + get_or(context.world, "cart", 0) + |> should + |> be_equal(expected) + |> or_fail_with("Cart count mismatch") +} + +pub fn tests() { + // Define step handlers + let steps = + steps.new() + |> step("the server is running", step_server_running) + |> step("the cart is empty", step_empty_cart) + |> step("I add {int} items", step_add_items) + |> step("the cart should have {int} items", step_verify_count) + + // Parse the .feature file + let assert Ok(feature) = parser.parse_file("test/cart.feature") + + // Convert to TestSuite and run + let config = FeatureConfig(feature: feature, step_registry: steps) + to_test_suite(config) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/gherkin/gherkin_file.gleam) + +### Feature discovery (load `.feature` files) + +Use discovery when you want real `.feature` files (e.g. written by QA or copied into tickets). + +```gleam +import dream_test/matchers.{be_equal, or_fail_with, should, succeed} +import dream_test/gherkin/discover +import dream_test/gherkin/steps.{type StepContext, get_int, step} +import dream_test/gherkin/world.{get_or, put} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import gleam/result + +fn step_empty_cart(context: StepContext) { + put(context.world, "cart", 0) + Ok(succeed()) +} + +fn step_server_running(context: StepContext) { + put(context.world, "server_running", True) + Ok(succeed()) +} + +fn step_add_items(context: StepContext) { + let current = get_or(context.world, "cart", 0) + let to_add = get_int(context.captures, 0) |> result.unwrap(0) + put(context.world, "cart", current + to_add) + Ok(succeed()) +} + +fn step_verify_count(context: StepContext) { + let expected = get_int(context.captures, 0) |> result.unwrap(0) + get_or(context.world, "cart", 0) + |> should + |> be_equal(expected) + |> or_fail_with("Cart count mismatch") +} + +pub fn tests() { + // Define step handlers + let steps = + steps.new() + |> step("the server is running", step_server_running) + |> step("the cart is empty", step_empty_cart) + |> step("I add {int} items", step_add_items) + |> step("the cart should have {int} items", step_verify_count) + + // Discover and load all .feature files + discover.features("test/*.feature") + |> discover.with_registry(steps) + |> discover.to_suite("cart_features") +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/gherkin/gherkin_discover.gleam) + +### What's Next? + +- Go back to [Snapshot testing](09-snapshot-testing.md) +- Go back to [Documentation README](README.md) +- Continue to [Utilities](11-utilities.md) diff --git a/documentation/11-utilities.md b/documentation/11-utilities.md new file mode 100644 index 0000000..43a4ace --- /dev/null +++ b/documentation/11-utilities.md @@ -0,0 +1,221 @@ +## Utilities (file, process, timing, sandbox) + +These modules exist because real test suites always accumulate a little infrastructure: temp files, unique ports, timing, and “run this safely.” + +Rather than forcing every project to reinvent these helpers slightly differently (and slightly incorrectly), Dream Test provides small, intentionally-minimal building blocks. + +### Mental model + +These are small helper modules that make tests more reliable and less repetitive: + +- `file`: predictable IO + structured errors (great for temp files/snapshots) +- `process`: small helpers for test isolation (counters, unique ports) +- `timing`: monotonic time + formatting +- `sandbox`: run code in an isolated process with timeout/crash reporting + +Dream Test ships a few small helper modules that support testing workflows. +They’re public because they’re useful in real test suites, but they’re intentionally minimal. + +### `dream_test/file` (safe filesystem helpers) + +Use these helpers when you want predictable, structured error handling around file IO (snapshots, temp files, fixtures). + +```gleam +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/file.{NotFound, delete, error_to_string, read, write} +import dream_test/process.{unique_port} +import dream_test/unit.{describe, it} +import gleam/int + +fn tmp_path() { + "./test/tmp/file_helpers_" <> int.to_string(unique_port()) <> ".txt" +} + +pub fn tests() { + describe("File helpers", [ + it("write + read roundtrip", fn() { + let path = tmp_path() + let _ = write(path, "hello") + + read(path) + |> should + |> be_equal(Ok("hello")) + |> or_fail_with("expected to read back written content") + }), + + it("delete removes a file", fn() { + let path = tmp_path() + let _ = write(path, "hello") + let _ = delete(path) + + read(path) + |> should + |> be_equal(Error(NotFound(path))) + |> or_fail_with("expected deleted file to be NotFound") + }), + + it("error_to_string formats NotFound", fn() { + error_to_string(NotFound("/x")) + |> should + |> be_equal("File not found: /x") + |> or_fail_with("expected NotFound formatting") + }), + ]) +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/utils/file_helpers.gleam) + +### `dream_test/process` (small helpers for tests) + +These are practical testing helpers: a simple counter and a “unique port” generator for avoiding collisions. + +```gleam +import dream_test/matchers.{be_between, be_equal, or_fail_with, should} +import dream_test/process.{get_count, increment, start_counter, unique_port} +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("Process helpers", [ + it("start_counter + increment + get_count work", fn() { + let counter = start_counter() + increment(counter) + increment(counter) + + get_count(counter) + |> should + |> be_equal(2) + |> or_fail_with("expected counter to be 2") + }), + + it("unique_port returns a value in the safe range", fn() { + unique_port() + |> should + |> be_between(10_000, 60_000) + |> or_fail_with("expected unique_port to be within 10k..60k") + }), + ]) +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/utils/process_helpers.gleam) + +### `dream_test/timing` (durations + monotonic time) + +Use `timing.now_ms()` / `timing.now_us()` for monotonic timing, and format helpers for readable output. + +```gleam +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/timing +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("Timing", [ + it("format_duration_ms scales milliseconds and seconds", fn() { + // Arrange & Act + let ms = timing.format_duration_ms(42) + + // Assert + ms + |> should + |> be_equal("42ms") + |> or_fail_with("expected 42ms") + }), + + it("format_duration_ms formats 1500ms as seconds", fn() { + timing.format_duration_ms(1500) + |> should + |> be_equal("1.5s") + |> or_fail_with("expected 1.5s") + }), + + it("format_duration_us formats sub-millisecond values", fn() { + timing.format_duration_us(500) + |> should + |> be_equal("0.5ms") + |> or_fail_with("expected 0.5ms") + }), + + it("now_ms is monotonic (non-decreasing)", fn() { + let t1 = timing.now_ms() + let t2 = timing.now_ms() + let ok = t2 >= t1 + + ok + |> should + |> be_equal(True) + |> or_fail_with("expected now_ms to be monotonic") + }), + ]) +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/utils/timing_helpers.gleam) + +### `dream_test/sandbox` (timeouts + crash isolation) + +This is the mechanism Dream Test uses to make test execution resilient: run code in an isolated process, detect timeouts, and catch crashes. + +```gleam +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/sandbox.{ + SandboxCompleted, SandboxConfig, SandboxCrashed, SandboxTimedOut, +} +import dream_test/unit.{describe, it} + +fn loop_forever() { + loop_forever() +} + +pub fn tests() { + describe("Sandboxing", [ + it("run_isolated returns SandboxCompleted(value) on success", fn() { + let config = SandboxConfig(timeout_ms: 100, show_crash_reports: False) + let result = sandbox.run_isolated(config, fn() { 123 }) + + result + |> should + |> be_equal(SandboxCompleted(123)) + |> or_fail_with("expected SandboxCompleted(123)") + }), + + it( + "run_isolated returns SandboxTimedOut when the function is too slow", + fn() { + let config = SandboxConfig(timeout_ms: 10, show_crash_reports: False) + let result = sandbox.run_isolated(config, loop_forever) + + result + |> should + |> be_equal(SandboxTimedOut) + |> or_fail_with("expected SandboxTimedOut") + }, + ), + + it("run_isolated returns SandboxCrashed when the function panics", fn() { + let config = SandboxConfig(timeout_ms: 100, show_crash_reports: False) + let result = sandbox.run_isolated(config, fn() { panic as "boom" }) + + let did_crash = case result { + SandboxCrashed(_) -> True + _ -> False + } + + did_crash + |> should + |> be_equal(True) + |> or_fail_with("expected SandboxCrashed(...)") + }), + ]) +} +``` + +🧪 [Tested source](../examples/snippets/test/snippets/utils/sandboxing.gleam) + +### What's Next? + +- Go back to [Gherkin / BDD testing](10-gherkin-bdd.md) +- Go back to [Documentation README](README.md) +- Jump to the API reference on Hexdocs: [Dream Test on Hexdocs](https://hexdocs.pm/dream_test/) + + diff --git a/documentation/README.md b/documentation/README.md new file mode 100644 index 0000000..ca59a21 --- /dev/null +++ b/documentation/README.md @@ -0,0 +1,47 @@ +## Dream Test Documentation + +This folder is a **guided series** for engineers adopting Dream Test. It’s written as narrative documentation: each chapter explains the problem it solves, the mental model you need to use it safely, and the rationale behind the API shape. + +If you want a complete “everything in one place” reference, see [Dream Test on Hexdocs](https://hexdocs.pm/dream_test/). This series is intentionally more opinionated: it’s meant to help you develop good instincts and avoid the common failure modes (flaky tests, hidden setup, unreadable failures). + +### Target support + +Dream Test targets the **BEAM (Erlang)**. + +### The 30-second mental model + +Dream Test tries to keep three ideas explicit: + +- **Suites are data**: you build a nested set of suites/groups/tests (unit suites or Gherkin features) rather than relying on hidden global test discovery. +- **The runner is a policy decision**: your `main()` chooses output style, concurrency, timeouts, and CI behavior. +- **Assertions are pipelines**: you start from a value, run matchers, then end with a failure message you’ll want to read at 2am. + +### Why the docs read like this + +Testing frameworks often optimize for “look how short this example is.” Dream Test optimizes for “why did this fail, and what do I do next?” + +That’s why you’ll see the same patterns repeated: + +- **Explicit runner module** instead of magic auto-discovery everywhere (with an opt-in discovery helper when you want it). +- **Pipe-first assertions** instead of fluent builders or macros. +- **Process isolation + timeouts** so a single bad test can’t hang or crash the whole run. + +### Recommended reading order (the full story) + +1. [Installation](01-installation.md) — get to a green run, and understand why `main()` exists. +2. [Quick Start](02-quick-start.md) — the first “real” test, with two runner styles. +3. [Writing unit tests](03-writing-tests.md) — naming, structure, skipping, tags. +4. [Context-aware unit tests](04-context-aware-tests.md) — when “setup returns a value” and you need to pass it into tests. +5. [Assertions & matchers](05-assertions-and-matchers.md) — how and why the `should` pipeline works. +6. [Lifecycle hooks](06-lifecycle-hooks.md) — power tools, and how to keep them from hiding meaning. +7. [Runner & execution model](07-runner-and-execution.md) — concurrency, timeouts, CI, and reliability. +8. [Reporters](08-reporters.md) — humans vs machines, streaming vs post-run. +9. [Snapshot testing](09-snapshot-testing.md) — when snapshots make tests clearer (and when they make them worse). +10. [Gherkin / BDD](10-gherkin-bdd.md) — scenario testing with a world state and placeholder captures. +11. [Utilities](11-utilities.md) — small helpers that make tests less repetitive and more reliable. + +### If you only need one thing… + +- **I want to get a test running**: start at [Quick Start](02-quick-start.md). +- **My tests are flaky**: read [Runner & execution model](07-runner-and-execution.md) and [Lifecycle hooks](06-lifecycle-hooks.md). +- **I need better CI integration**: jump to [Reporters](08-reporters.md) (JSON) and [Runner & execution model](07-runner-and-execution.md) (exit codes). diff --git a/examples/cache_app/README.md b/examples/cache_app/README.md index 724fedf..2f78ae0 100644 --- a/examples/cache_app/README.md +++ b/examples/cache_app/README.md @@ -18,9 +18,9 @@ it("retrieves a stored value", fn() { // Assert cache_app.stop(cache) result - |> should() + |> should |> be_some() - |> equal("Alice") + |> be_equal("Alice") |> or_fail_with("get() should return the stored value") }) ``` @@ -29,25 +29,25 @@ Tests interact only with the public API — no internal state inspection. ## What This Demonstrates -| Feature | Where | -| ------- | ----- | -| Nested `describe`/`it` blocks | Throughout | -| `before_all` / `after_all` hooks | "with lifecycle hooks" section | -| `before_each` / `after_each` hooks | "with lifecycle hooks" section | -| Option matchers (`be_some`, `be_none`) | Basic operations | -| Result matchers (`be_ok`, `be_error`) | Convenience functions | -| Collection matchers (`contain`, `have_length`, `be_empty`) | Collection operations | -| Boolean matchers (`be_true`, `be_false`) | Convenience functions | -| Comparison matchers (`be_greater_than`, `be_at_least`) | Collection operations | -| Equality matchers (`equal`, `not_equal`) | Throughout | -| Assertion chaining (`be_ok() \|> equal()`) | Convenience functions | -| Suite mode (`to_test_suite` + `run_suite`) | main() | +| Feature | Where | +| ---------------------------------------------------------- | ------------------------------ | +| Nested `describe`/`it` blocks | Throughout | +| `before_all` / `after_all` hooks | "with lifecycle hooks" section | +| `before_each` / `after_each` hooks | "with lifecycle hooks" section | +| Option matchers (`be_some`, `be_none`) | Basic operations | +| Result matchers (`be_ok`, `be_error`) | Convenience functions | +| Collection matchers (`contain`, `have_length`, `be_empty`) | Collection operations | +| Boolean matchers (`be_true`, `be_false`) | Convenience functions | +| Comparison matchers (`be_greater_than`, `be_at_least`) | Collection operations | +| Equality matchers (`equal`, `not_equal`) | Throughout | +| Assertion chaining (`be_ok() \|> be_equal()`) | Convenience functions | +| Runner (suite-first) (`runner.new` → `runner.run`) | `cache_app_test.gleam` | ## Running the Tests ```bash cd examples/cache_app -gleam test +make test ``` ## The Cache App @@ -78,7 +78,6 @@ Key patterns to follow: 1. **Group related tests** in `describe` blocks 2. **Use lifecycle hooks** for shared setup/teardown -3. **Chain assertions** for readability (`be_ok() |> equal()`) -4. **Use suite mode** when you need `before_all`/`after_all` +3. **Chain assertions** for readability (`be_ok() |> be_equal()`) +4. **Use root hooks** when you need `before_all`/`after_all` 5. **Clean up resources** in tests (stop caches, close connections) - diff --git a/examples/cache_app/test/cache_app_test.gleam b/examples/cache_app/test/cache_app_test.gleam index 7a64fbf..0e7db90 100644 --- a/examples/cache_app/test/cache_app_test.gleam +++ b/examples/cache_app/test/cache_app_test.gleam @@ -8,486 +8,489 @@ //// Tests interact only with the public API — no internal state inspection. import cache_app -import dream_test/assertions/should.{ - be_empty, be_error, be_false, be_greater_than, be_none, be_ok, be_some, - be_true, contain, equal, have_length, or_fail_with, should, +import dream_test/matchers.{ + be_empty, be_equal, be_error, be_false, be_greater_than, be_none, be_ok, + be_some, be_true, contain, have_length, or_fail_with, should, } -import dream_test/reporter/bdd.{report} -import dream_test/runner.{exit_on_failure, run_suite} -import dream_test/types.{AssertionOk} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner import dream_test/unit.{ - after_all, after_each, before_all, before_each, describe, it, to_test_suite, + after_all, after_each, before_all, before_each, describe, group, it, } import gleam/io import gleam/option.{Some} +import gleam/result // ============================================================================ // Test Suite // ============================================================================ -pub fn tests() { - describe("Cache", [ - // ------------------------------------------------------------------------- - // Basic Operations - // ------------------------------------------------------------------------- - describe("get and set", [ - it("retrieves a stored value", fn() { - // Arrange - let cache = cache_app.start() - cache_app.set(cache, "name", "Alice") - - // Act - let result = cache_app.get(cache, "name") - - // Assert - result - |> should() - |> be_some() - |> equal("Alice") - |> or_fail_with("get() should return the stored value") - }), +pub fn suites() { + [ + describe("Cache", [ + // ------------------------------------------------------------------------- + // Basic Operations + // ------------------------------------------------------------------------- + group("get and set", [ + it("retrieves a stored value", fn() { + // Arrange + let cache = cache_app.start() + cache_app.set(cache, "name", "Alice") - it("returns None for keys that were never set", fn() { - // Arrange - let cache = cache_app.start() + // Act + let result = cache_app.get(cache, "name") - // Act - let result = cache_app.get(cache, "nonexistent") + // Assert + result + |> should + |> be_some() + |> be_equal("Alice") + |> or_fail_with("get() should return the stored value") + }), - // Assert - result - |> should() - |> be_none() - |> or_fail_with("get() should return None for missing keys") - }), + it("returns None for keys that were never set", fn() { + // Arrange + let cache = cache_app.start() - it("overwrites previous values for the same key", fn() { - // Arrange - let cache = cache_app.start() - cache_app.set(cache, "version", "1.0") - - // Act - cache_app.set(cache, "version", "2.0") - let result = cache_app.get(cache, "version") - - // Assert - result - |> should() - |> be_some() - |> equal("2.0") - |> or_fail_with("set() should overwrite existing values") - }), - ]), + // Act + let result = cache_app.get(cache, "nonexistent") - describe("delete", [ - it("removes an existing key", fn() { - // Arrange - let cache = cache_app.start() - cache_app.set(cache, "temp", "data") - - // Act - cache_app.delete(cache, "temp") - let result = cache_app.get(cache, "temp") - - // Assert - result - |> should() - |> be_none() - |> or_fail_with("delete() should remove the key") - }), + // Assert + result + |> should + |> be_none() + |> or_fail_with("get() should return None for missing keys") + }), - it("does nothing when deleting a nonexistent key", fn() { - // Arrange - let cache = cache_app.start() - cache_app.set(cache, "keep", "this") - - // Act - cache_app.delete(cache, "nonexistent") - let kept = cache_app.get(cache, "keep") - let size = cache_app.size(cache) - - // Assert - kept - |> should() - |> be_some() - |> or_fail_with("Other keys should be unaffected") - - size - |> should() - |> equal(1) - |> or_fail_with("Size should remain unchanged") - }), - ]), + it("overwrites previous values for the same key", fn() { + // Arrange + let cache = cache_app.start() + cache_app.set(cache, "version", "1.0") - describe("clear", [ - it("removes all entries", fn() { - // Arrange - let cache = cache_app.start() - cache_app.set(cache, "a", "1") - cache_app.set(cache, "b", "2") - cache_app.set(cache, "c", "3") - - // Act - cache_app.clear(cache) - - // Assert - let size = cache_app.size(cache) - size - |> should() - |> equal(0) - |> or_fail_with("clear() should remove all entries") - }), + // Act + cache_app.set(cache, "version", "2.0") + let result = cache_app.get(cache, "version") - it("leaves cache usable after clearing", fn() { - // Arrange - let cache = cache_app.start() - cache_app.set(cache, "old", "data") - cache_app.clear(cache) - - // Act - cache_app.set(cache, "new", "data") - let result = cache_app.get(cache, "new") - - // Assert - result - |> should() - |> be_some() - |> equal("data") - |> or_fail_with("Cache should work normally after clear()") - }), - ]), + // Assert + result + |> should + |> be_some() + |> be_equal("2.0") + |> or_fail_with("set() should overwrite existing values") + }), + ]), - // ------------------------------------------------------------------------- - // Collection Operations - // ------------------------------------------------------------------------- - describe("keys", [ - it("returns empty list for new cache", fn() { - // Arrange - let cache = cache_app.start() - - // Act - let result = cache_app.keys(cache) - - // Assert - result - |> should() - |> be_empty() - |> or_fail_with("New cache should have no keys") - }), + group("delete", [ + it("removes an existing key", fn() { + // Arrange + let cache = cache_app.start() + cache_app.set(cache, "temp", "data") + + // Act + cache_app.delete(cache, "temp") + let result = cache_app.get(cache, "temp") + + // Assert + result + |> should + |> be_none() + |> or_fail_with("delete() should remove the key") + }), - it("returns all stored keys", fn() { - // Arrange - let cache = cache_app.start() - cache_app.set(cache, "first", "1") - cache_app.set(cache, "second", "2") - cache_app.set(cache, "third", "3") - - // Act - let result = cache_app.keys(cache) - - // Assert - result - |> should() - |> have_length(3) - |> or_fail_with("Should return all 3 keys") - - result - |> should() - |> contain("second") - |> or_fail_with("Keys should include 'second'") - }), - ]), + it("does nothing when deleting a nonexistent key", fn() { + // Arrange + let cache = cache_app.start() + cache_app.set(cache, "keep", "this") + + // Act + cache_app.delete(cache, "nonexistent") + let kept = cache_app.get(cache, "keep") + let size = cache_app.size(cache) + + // Assert + use _ <- result.try( + kept + |> should + |> be_some() + |> or_fail_with("Other keys should be unaffected"), + ) + + size + |> should + |> be_equal(1) + |> or_fail_with("Size should remain unchanged") + }), + ]), - describe("size", [ - it("returns zero for empty cache", fn() { - // Arrange - let cache = cache_app.start() + group("clear", [ + it("removes all entries", fn() { + // Arrange + let cache = cache_app.start() + cache_app.set(cache, "a", "1") + cache_app.set(cache, "b", "2") + cache_app.set(cache, "c", "3") + + // Act + cache_app.clear(cache) + + // Assert + let size = cache_app.size(cache) + size + |> should + |> be_equal(0) + |> or_fail_with("clear() should remove all entries") + }), - // Act - let result = cache_app.size(cache) + it("leaves cache usable after clearing", fn() { + // Arrange + let cache = cache_app.start() + cache_app.set(cache, "old", "data") + cache_app.clear(cache) - // Assert - result - |> should() - |> equal(0) - |> or_fail_with("Empty cache should have size 0") - }), + // Act + cache_app.set(cache, "new", "data") + let result = cache_app.get(cache, "new") - it("increases as items are added", fn() { - // Arrange - let cache = cache_app.start() - - // Act - cache_app.set(cache, "one", "1") - let after_one = cache_app.size(cache) - cache_app.set(cache, "two", "2") - let after_two = cache_app.size(cache) - - // Assert - after_one - |> should() - |> equal(1) - |> or_fail_with("Size should be 1 after first insert") - - after_two - |> should() - |> equal(2) - |> or_fail_with("Size should be 2 after second insert") - }), + // Assert + result + |> should + |> be_some() + |> be_equal("data") + |> or_fail_with("Cache should work normally after clear()") + }), + ]), - it("does not increase when overwriting", fn() { - // Arrange - let cache = cache_app.start() - cache_app.set(cache, "key", "first") + // ------------------------------------------------------------------------- + // Collection Operations + // ------------------------------------------------------------------------- + group("keys", [ + it("returns empty list for new cache", fn() { + // Arrange + let cache = cache_app.start() - // Act - cache_app.set(cache, "key", "second") - let result = cache_app.size(cache) + // Act + let result = cache_app.keys(cache) - // Assert - result - |> should() - |> equal(1) - |> or_fail_with("Overwriting should not increase size") - }), - ]), + // Assert + result + |> should + |> be_empty() + |> or_fail_with("New cache should have no keys") + }), - // ------------------------------------------------------------------------- - // Convenience Functions - // ------------------------------------------------------------------------- - describe("get_or", [ - it("returns stored value when key exists", fn() { - // Arrange - let cache = cache_app.start() - cache_app.set(cache, "exists", "actual") - - // Act - let result = cache_app.get_or(cache, "exists", "default") - - // Assert - result - |> should() - |> equal("actual") - |> or_fail_with("Should return actual value, not default") - }), + it("returns all stored keys", fn() { + // Arrange + let cache = cache_app.start() + cache_app.set(cache, "first", "1") + cache_app.set(cache, "second", "2") + cache_app.set(cache, "third", "3") + + // Act + let result = cache_app.keys(cache) + + // Assert + use _ <- result.try( + result + |> should + |> have_length(3) + |> or_fail_with("Should return all 3 keys"), + ) - it("returns default when key is missing", fn() { - // Arrange - let cache = cache_app.start() + result + |> should + |> contain("second") + |> or_fail_with("Keys should include 'second'") + }), + ]), - // Act - let result = cache_app.get_or(cache, "missing", "fallback") + group("size", [ + it("returns zero for empty cache", fn() { + // Arrange + let cache = cache_app.start() - // Assert - result - |> should() - |> equal("fallback") - |> or_fail_with("Should return default for missing key") - }), - ]), + // Act + let result = cache_app.size(cache) - describe("has", [ - it("returns true when key exists", fn() { - // Arrange - let cache = cache_app.start() - cache_app.set(cache, "present", "here") + // Assert + result + |> should + |> be_equal(0) + |> or_fail_with("Empty cache should have size 0") + }), - // Act - let result = cache_app.has(cache, "present") + it("increases as items are added", fn() { + // Arrange + let cache = cache_app.start() + + // Act + cache_app.set(cache, "one", "1") + let after_one = cache_app.size(cache) + cache_app.set(cache, "two", "2") + let after_two = cache_app.size(cache) + + // Assert + use _ <- result.try( + after_one + |> should + |> be_equal(1) + |> or_fail_with("Size should be 1 after first insert"), + ) + + after_two + |> should + |> be_equal(2) + |> or_fail_with("Size should be 2 after second insert") + }), - // Assert - result - |> should() - |> be_true() - |> or_fail_with("has() should return True for existing key") - }), + it("does not increase when overwriting", fn() { + // Arrange + let cache = cache_app.start() + cache_app.set(cache, "key", "first") + + // Act + cache_app.set(cache, "key", "second") + let result = cache_app.size(cache) + + // Assert + result + |> should + |> be_equal(1) + |> or_fail_with("Overwriting should not increase size") + }), + ]), - it("returns false when key is missing", fn() { - // Arrange - let cache = cache_app.start() + // ------------------------------------------------------------------------- + // Convenience Functions + // ------------------------------------------------------------------------- + group("get_or", [ + it("returns stored value when key exists", fn() { + // Arrange + let cache = cache_app.start() + cache_app.set(cache, "exists", "actual") - // Act - let result = cache_app.has(cache, "absent") + // Act + let result = cache_app.get_or(cache, "exists", "default") - // Assert - result - |> should() - |> be_false() - |> or_fail_with("has() should return False for missing key") - }), - ]), + // Assert + result + |> should + |> be_equal("actual") + |> or_fail_with("Should return actual value, not default") + }), - describe("update", [ - it("transforms existing value and returns Ok", fn() { - // Arrange - let cache = cache_app.start() - cache_app.set(cache, "counter", 10) - - // Act - let result = cache_app.update(cache, "counter", fn(n) { n * 2 }) - - // Assert - result - |> should() - |> be_ok() - |> equal(20) - |> or_fail_with("update() should return Ok with new value") - }), + it("returns default when key is missing", fn() { + // Arrange + let cache = cache_app.start() - it("persists the transformed value", fn() { - // Arrange - let cache = cache_app.start() - cache_app.set(cache, "counter", 5) - - // Act - let _ = cache_app.update(cache, "counter", fn(n) { n + 3 }) - let result = cache_app.get(cache, "counter") - - // Assert - result - |> should() - |> be_some() - |> equal(8) - |> or_fail_with("Updated value should be persisted") - }), + // Act + let result = cache_app.get_or(cache, "missing", "fallback") - it("returns Error for missing key", fn() { - // Arrange - let cache = cache_app.start() + // Assert + result + |> should + |> be_equal("fallback") + |> or_fail_with("Should return default for missing key") + }), + ]), - // Act - let result = cache_app.update(cache, "missing", fn(n) { n + 1 }) + group("has", [ + it("returns true when key exists", fn() { + // Arrange + let cache = cache_app.start() + cache_app.set(cache, "present", "here") - // Assert - result - |> should() - |> be_error() - |> or_fail_with("update() should return Error for missing key") - }), - ]), + // Act + let result = cache_app.has(cache, "present") - describe("pop", [ - it("returns and removes the value", fn() { - // Arrange - let cache = cache_app.start() - cache_app.set(cache, "temp", "data") - - // Act - let popped = cache_app.pop(cache, "temp") - let after = cache_app.get(cache, "temp") - - // Assert - popped - |> should() - |> be_some() - |> equal("data") - |> or_fail_with("pop() should return the value") - - after - |> should() - |> be_none() - |> or_fail_with("Key should be removed after pop()") - }), + // Assert + result + |> should + |> be_true() + |> or_fail_with("has() should return True for existing key") + }), - it("returns None for missing key", fn() { - // Arrange - let cache = cache_app.start() + it("returns false when key is missing", fn() { + // Arrange + let cache = cache_app.start() - // Act - let result = cache_app.pop(cache, "nonexistent") + // Act + let result = cache_app.has(cache, "absent") - // Assert - result - |> should() - |> be_none() - |> or_fail_with("pop() should return None for missing key") - }), - ]), + // Assert + result + |> should + |> be_false() + |> or_fail_with("has() should return False for missing key") + }), + ]), + + group("update", [ + it("transforms existing value and returns Ok", fn() { + // Arrange + let cache = cache_app.start() + cache_app.set(cache, "counter", 10) + + // Act + let result = cache_app.update(cache, "counter", fn(n) { n * 2 }) + + // Assert + result + |> should + |> be_ok() + |> be_equal(20) + |> or_fail_with("update() should return Ok with new value") + }), + + it("persists the transformed value", fn() { + // Arrange + let cache = cache_app.start() + cache_app.set(cache, "counter", 5) + + // Act + let _ = cache_app.update(cache, "counter", fn(n) { n + 3 }) + let result = cache_app.get(cache, "counter") + + // Assert + result + |> should + |> be_some() + |> be_equal(8) + |> or_fail_with("Updated value should be persisted") + }), + + it("returns Error for missing key", fn() { + // Arrange + let cache = cache_app.start() + + // Act + let result = cache_app.update(cache, "missing", fn(n) { n + 1 }) + + // Assert + result + |> should + |> be_error() + |> or_fail_with("update() should return Error for missing key") + }), + ]), + + group("pop", [ + it("returns and removes the value", fn() { + // Arrange + let cache = cache_app.start() + cache_app.set(cache, "temp", "data") + + // Act + let popped = cache_app.pop(cache, "temp") + let after = cache_app.get(cache, "temp") + + // Assert + use _ <- result.try( + popped + |> should + |> be_some() + |> be_equal("data") + |> or_fail_with("pop() should return the value"), + ) + + after + |> should + |> be_none() + |> or_fail_with("Key should be removed after pop()") + }), - // ------------------------------------------------------------------------- - // Lifecycle Hooks (Suite Mode Demo) - // ------------------------------------------------------------------------- - describe("lifecycle hooks demo", [ + it("returns None for missing key", fn() { + // Arrange + let cache = cache_app.start() + + // Act + let result = cache_app.pop(cache, "nonexistent") + + // Assert + result + |> should + |> be_none() + |> or_fail_with("pop() should return None for missing key") + }), + ]), + + // ------------------------------------------------------------------------- + // Nested Describe Blocks + // ------------------------------------------------------------------------- + group("nested organization", [ + group("outer group", [ + it("test at outer level", fn() { + // Arrange + let value = Some("outer") + + // Act & Assert + value + |> should + |> be_some() + |> be_equal("outer") + |> or_fail_with("Should be Some(\"outer\")") + }), + + group("inner group", [ + it("test at inner level", fn() { + // Arrange + let list = [1, 2, 3] + + // Act & Assert + use _ <- result.try( + list + |> should + |> have_length(3) + |> or_fail_with("List should have 3 elements"), + ) + + list + |> should + |> contain(2) + |> or_fail_with("List should contain 2") + }), + ]), + ]), + ]), + ]), + describe("Lifecycle hooks demo", [ before_all(fn() { - // Arrange (once for entire describe block) - io.println(" [before_all] Setting up shared test environment") - AssertionOk + // Arrange (once for entire suite) + Ok(Nil) }), before_each(fn() { // Arrange (before each test) - io.println(" [before_each] Preparing fresh state") - AssertionOk + Ok(Nil) }), it("first test runs after hooks", fn() { // Act & Assert - io.println(" [test] First test executing") True - |> should() + |> should |> be_true() |> or_fail_with("Test should pass") }), it("second test also gets fresh setup", fn() { // Act & Assert - io.println(" [test] Second test executing") 42 - |> should() + |> should |> be_greater_than(0) |> or_fail_with("42 should be positive") }), after_each(fn() { // Cleanup (after each test) - io.println(" [after_each] Cleaning up test state") - AssertionOk + Ok(Nil) }), after_all(fn() { - // Cleanup (once for entire describe block) - io.println(" [after_all] Tearing down shared environment") - AssertionOk + // Cleanup (once for entire suite) + Ok(Nil) }), ]), - - // ------------------------------------------------------------------------- - // Nested Describe Blocks - // ------------------------------------------------------------------------- - describe("nested organization", [ - describe("outer group", [ - it("test at outer level", fn() { - // Arrange - let value = Some("outer") - - // Act & Assert - value - |> should() - |> be_some() - |> equal("outer") - |> or_fail_with("Should be Some(\"outer\")") - }), - - describe("inner group", [ - it("test at inner level", fn() { - // Arrange - let list = [1, 2, 3] - - // Act & Assert - list - |> should() - |> have_length(3) - |> or_fail_with("List should have 3 elements") - - list - |> should() - |> contain(2) - |> or_fail_with("List should contain 2") - }), - ]), - ]), - ]), - ]) + ] } // ============================================================================ @@ -500,11 +503,9 @@ pub fn main() { io.println("===============================") io.println("") - // Use suite mode for full lifecycle hook support - let results = - to_test_suite("cache_app_test", tests()) - |> run_suite() - - report(results, io.print) - exit_on_failure(results) + runner.new(suites()) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new() |> bdd.color()]) + |> runner.exit_on_failure() + |> runner.run() } diff --git a/examples/failure_showcase/README.md b/examples/failure_showcase/README.md new file mode 100644 index 0000000..f332d3c --- /dev/null +++ b/examples/failure_showcase/README.md @@ -0,0 +1,27 @@ +# Failure Showcase — Dream Test Reporting Demo + +This example project is intentionally **all failures**. + +Use it to quickly inspect how Dream Test renders: + +- assertion failures (with structured payloads) +- `Error("...")` aborts from test bodies +- hook failures (`before_all`, `before_each`, `after_each`) +- sandbox crashes (`panic`) +- timeouts +- Gherkin failures (undefined steps + failing assertions) + +## Run it + +```bash +cd examples/failure_showcase +gleam test +``` + +## Notes + +- This project is **not** part of `make all` in the repo root. +- The runner does **not** use `exit_on_failure()` so you can see every failure. +- The runner sets a low default timeout to guarantee the timeout example triggers. + + diff --git a/examples/failure_showcase/features/failures.feature b/examples/failure_showcase/features/failures.feature new file mode 100644 index 0000000..12442a5 --- /dev/null +++ b/examples/failure_showcase/features/failures.feature @@ -0,0 +1,16 @@ +@failure_showcase +Feature: Failure Showcase + This feature file intentionally contains failing scenarios to exercise + Dream Test's Gherkin output. + + Scenario: Failing assertion in a step + Given a counter at 0 + When I increment the counter + Then the counter should be 2 + + Scenario: Undefined step + Given a counter at 0 + When I do something undefined + Then the counter should be 1 + + diff --git a/examples/failure_showcase/gleam.toml b/examples/failure_showcase/gleam.toml new file mode 100644 index 0000000..b3b890b --- /dev/null +++ b/examples/failure_showcase/gleam.toml @@ -0,0 +1,12 @@ +name = "failure_showcase" +version = "1.0.0" +description = "Example project containing only failing tests to showcase Dream Test reporting" + +[dependencies] +gleam_stdlib = ">= 0.44.0 and < 2.0.0" +gleam_erlang = ">= 1.3.0 and < 2.0.0" + +[dev-dependencies] +dream_test = { path = "../.." } + + diff --git a/examples/failure_showcase/manifest.toml b/examples/failure_showcase/manifest.toml new file mode 100644 index 0000000..b1b1985 --- /dev/null +++ b/examples/failure_showcase/manifest.toml @@ -0,0 +1,15 @@ +# This file was generated by Gleam +# You typically do not need to edit this file + +packages = [ + { name = "dream_test", version = "2.0.0", build_tools = ["gleam"], requirements = ["gleam_erlang", "gleam_json", "gleam_otp", "gleam_regexp", "gleam_stdlib"], source = "local", path = "../.." }, + { name = "gleam_erlang", version = "1.3.0", build_tools = ["gleam"], requirements = ["gleam_stdlib"], otp_app = "gleam_erlang", source = "hex", outer_checksum = "1124AD3AA21143E5AF0FC5CF3D9529F6DB8CA03E43A55711B60B6B7B3874375C" }, + { name = "gleam_json", version = "3.1.0", build_tools = ["gleam"], requirements = ["gleam_stdlib"], otp_app = "gleam_json", source = "hex", outer_checksum = "44FDAA8847BE8FC48CA7A1C089706BD54BADCC4C45B237A992EDDF9F2CDB2836" }, + { name = "gleam_otp", version = "1.2.0", build_tools = ["gleam"], requirements = ["gleam_erlang", "gleam_stdlib"], otp_app = "gleam_otp", source = "hex", outer_checksum = "BA6A294E295E428EC1562DC1C11EA7530DCB981E8359134BEABC8493B7B2258E" }, + { name = "gleam_regexp", version = "1.1.1", build_tools = ["gleam"], requirements = ["gleam_stdlib"], otp_app = "gleam_regexp", source = "hex", outer_checksum = "9C215C6CA84A5B35BB934A9B61A9A306EC743153BE2B0425A0D032E477B062A9" }, + { name = "gleam_stdlib", version = "0.67.1", build_tools = ["gleam"], requirements = [], otp_app = "gleam_stdlib", source = "hex", outer_checksum = "6CE3E4189A8B8EC2F73AB61A2FBDE49F159D6C9C61C49E3B3082E439F260D3D0" }, +] + +[requirements] +dream_test = { path = "../.." } +gleam_stdlib = { version = ">= 0.44.0 and < 2.0.0" } diff --git a/examples/failure_showcase/src/failure_showcase.gleam b/examples/failure_showcase/src/failure_showcase.gleam new file mode 100644 index 0000000..60488e7 --- /dev/null +++ b/examples/failure_showcase/src/failure_showcase.gleam @@ -0,0 +1,7 @@ +//// Failure showcase example. +//// +//// This project exists purely to demonstrate Dream Test failure rendering. + +pub fn main() { + Nil +} diff --git a/examples/failure_showcase/test/failure_showcase_test.gleam b/examples/failure_showcase/test/failure_showcase_test.gleam new file mode 100644 index 0000000..7fc3692 --- /dev/null +++ b/examples/failure_showcase/test/failure_showcase_test.gleam @@ -0,0 +1,24 @@ +//// Failure Showcase — Dream Test Reporting Demo +//// +//// This runner intentionally executes only failing tests so you can quickly +//// inspect how Dream Test renders different failure modes. + +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import gherkin_failures +import gleam/io +import unit_failures + +pub fn main() { + io.println("") + io.println("Failure Showcase — Dream Test Reporting Demo") + io.println("===========================================") + io.println("") + + runner.new([unit_failures.tests(), gherkin_failures.tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new() |> bdd.color()]) + |> runner.default_timeout_ms(10) + |> runner.run() +} diff --git a/examples/failure_showcase/test/gherkin_failures.gleam b/examples/failure_showcase/test/gherkin_failures.gleam new file mode 100644 index 0000000..f7e8f40 --- /dev/null +++ b/examples/failure_showcase/test/gherkin_failures.gleam @@ -0,0 +1,44 @@ +//// Gherkin failures for the failure showcase example. + +import dream_test/gherkin/discover +import dream_test/gherkin/steps.{type StepContext, step} +import dream_test/gherkin/world.{get_or, put} +import dream_test/matchers.{be_equal, or_fail_with, should, succeed} + +fn step_counter_at_zero(context: StepContext) { + put(context.world, "count", 0) + Ok(succeed()) +} + +fn step_increment_counter(context: StepContext) { + let current = get_or(context.world, "count", 0) + put(context.world, "count", current + 1) + Ok(succeed()) +} + +fn step_counter_should_be_two(context: StepContext) { + get_or(context.world, "count", 0) + |> should + |> be_equal(2) + |> or_fail_with("intentional gherkin failure: expected counter to be 2") +} + +fn step_counter_should_be_one(context: StepContext) { + get_or(context.world, "count", 0) + |> should + |> be_equal(1) + |> or_fail_with("expected counter to be 1") +} + +pub fn tests() { + let registry = + steps.new() + |> step("a counter at 0", step_counter_at_zero) + |> step("I increment the counter", step_increment_counter) + |> step("the counter should be 2", step_counter_should_be_two) + |> step("the counter should be 1", step_counter_should_be_one) + + discover.features("features/*.feature") + |> discover.with_registry(registry) + |> discover.to_suite("Failure Showcase (gherkin)") +} diff --git a/examples/failure_showcase/test/snapshots/intentional_snapshot_failure.snap b/examples/failure_showcase/test/snapshots/intentional_snapshot_failure.snap new file mode 100644 index 0000000..98dd395 --- /dev/null +++ b/examples/failure_showcase/test/snapshots/intentional_snapshot_failure.snap @@ -0,0 +1,2 @@ +expected snapshot content + diff --git a/examples/failure_showcase/test/unit_failures.gleam b/examples/failure_showcase/test/unit_failures.gleam new file mode 100644 index 0000000..544529f --- /dev/null +++ b/examples/failure_showcase/test/unit_failures.gleam @@ -0,0 +1,238 @@ +//// Unit test failures for the failure showcase example. + +import dream_test/matchers.{ + be_empty, be_equal, be_error, be_greater_than, be_less_than, be_none, be_ok, + be_some, be_true, contain, contain_string, end_with, have_length, match_regex, + match_snapshot, or_fail_with, should, start_with, succeed, +} +import dream_test/types.{ + type AssertionResult, type MatchResult, AssertionFailure, CustomMatcherFailure, + MatchFailed, MatchOk, +} +import dream_test/unit.{after_each, before_all, before_each, describe, group, it} +import gleam/erlang/process +import gleam/int +import gleam/option.{None, Some} + +pub fn tests() { + describe("Failure Showcase (unit)", [ + it("assertion payload: equality mismatch", fn() { + 1 + |> should + |> be_equal(2) + |> or_fail_with("intentional equality failure: 1 should equal 2") + }), + + it("assertion payload: boolean mismatch", fn() { + False + |> should + |> be_true() + |> or_fail_with("intentional boolean failure: expected True") + }), + + it("explicit Error(...) from test body", fn() { + Error("intentional Error(...) from test body") + }), + + it("sandbox crash (panic)", fn() { + panic as "intentional crash for failure showcase" + }), + + it("timeout (default timeout is set low in the runner)", fn() { + process.sleep(50) + Ok(succeed()) + }), + + // ========================================================================= + // FailurePayload coverage (matcher failures) + // ========================================================================= + it("assertion payload: option mismatch (be_some)", fail_option_be_some), + it("assertion payload: option mismatch (be_none)", fail_option_be_none), + it("assertion payload: result mismatch (be_ok)", fail_result_be_ok), + it("assertion payload: result mismatch (be_error)", fail_result_be_error), + it( + "assertion payload: collection mismatch (contain)", + fail_collection_contain, + ), + it( + "assertion payload: collection mismatch (have_length)", + fail_collection_have_length, + ), + it( + "assertion payload: collection mismatch (be_empty)", + fail_collection_be_empty, + ), + it( + "assertion payload: comparison mismatch (be_greater_than)", + fail_comparison_greater_than, + ), + it( + "assertion payload: comparison mismatch (be_less_than)", + fail_comparison_less_than, + ), + it( + "assertion payload: string mismatch (start_with)", + fail_string_start_with, + ), + it("assertion payload: string mismatch (end_with)", fail_string_end_with), + it( + "assertion payload: string mismatch (contain_string)", + fail_string_contain_string, + ), + it( + "assertion payload: string mismatch (match_regex)", + fail_string_match_regex, + ), + it("assertion payload: snapshot mismatch", fail_snapshot_mismatch), + it("assertion payload: custom matcher failure", fail_custom_matcher), + + group("hook failure: before_all", [ + before_all(fn() { Error("intentional before_all failure") }), + it("test 1 (will not run, should be marked failed)", fn() { + Ok(succeed()) + }), + it("test 2 (will not run, should be marked failed)", fn() { + Ok(succeed()) + }), + ]), + + group("hook failure: before_each", [ + before_each(fn() { Error("intentional before_each failure") }), + it("will not run, should be marked failed", fn() { Ok(succeed()) }), + ]), + + group("hook failure: after_each", [ + after_each(fn() { Error("intentional after_each failure") }), + it("runs but fails during teardown", fn() { Ok(succeed()) }), + ]), + ]) +} + +fn fail_option_be_some() -> Result(AssertionResult, String) { + None + |> should + |> be_some() + |> or_fail_with("intentional OptionFailure: expected Some(_)") +} + +fn fail_option_be_none() -> Result(AssertionResult, String) { + Some(1) + |> should + |> be_none() + |> or_fail_with("intentional OptionFailure: expected None") +} + +fn fail_result_be_ok() -> Result(AssertionResult, String) { + Error("nope") + |> should + |> be_ok() + |> or_fail_with("intentional ResultFailure: expected Ok(_)") +} + +fn fail_result_be_error() -> Result(AssertionResult, String) { + Ok(1) + |> should + |> be_error() + |> or_fail_with("intentional ResultFailure: expected Error(_)") +} + +fn fail_collection_contain() -> Result(AssertionResult, String) { + [1, 2] + |> should + |> contain(3) + |> or_fail_with("intentional CollectionFailure: expected list to contain 3") +} + +fn fail_collection_have_length() -> Result(AssertionResult, String) { + [1, 2, 3] + |> should + |> have_length(2) + |> or_fail_with("intentional CollectionFailure: expected length 2") +} + +fn fail_collection_be_empty() -> Result(AssertionResult, String) { + [1] + |> should + |> be_empty() + |> or_fail_with("intentional CollectionFailure: expected empty list") +} + +fn fail_comparison_greater_than() -> Result(AssertionResult, String) { + 1 + |> should + |> be_greater_than(2) + |> or_fail_with("intentional ComparisonFailure: expected > 2") +} + +fn fail_comparison_less_than() -> Result(AssertionResult, String) { + 10 + |> should + |> be_less_than(3) + |> or_fail_with("intentional ComparisonFailure: expected < 3") +} + +fn fail_string_start_with() -> Result(AssertionResult, String) { + "hello" + |> should + |> start_with("world") + |> or_fail_with("intentional StringMatchFailure: expected prefix world") +} + +fn fail_string_end_with() -> Result(AssertionResult, String) { + "hello" + |> should + |> end_with(".gleam") + |> or_fail_with("intentional StringMatchFailure: expected suffix .gleam") +} + +fn fail_string_contain_string() -> Result(AssertionResult, String) { + "hello" + |> should + |> contain_string("world") + |> or_fail_with("intentional StringMatchFailure: expected substring world") +} + +fn fail_string_match_regex() -> Result(AssertionResult, String) { + "abc" + |> should + |> match_regex("^\\d+$") + |> or_fail_with( + "intentional StringMatchFailure: expected digits-only regex match", + ) +} + +fn fail_snapshot_mismatch() -> Result(AssertionResult, String) { + "actual snapshot content" + |> should + |> match_snapshot("./test/snapshots/intentional_snapshot_failure.snap") + |> or_fail_with("intentional SnapshotFailure: expected snapshot mismatch") +} + +fn fail_custom_matcher() -> Result(AssertionResult, String) { + 3 + |> should + |> be_even_int() + |> or_fail_with("intentional CustomMatcherFailure: expected even number") +} + +fn be_even_int(value_or_result: MatchResult(Int)) -> MatchResult(Int) { + case value_or_result { + MatchFailed(failure) -> MatchFailed(failure) + MatchOk(actual) -> check_is_even_int(actual) + } +} + +fn check_is_even_int(actual: Int) -> MatchResult(Int) { + case actual % 2 == 0 { + True -> MatchOk(actual) + False -> + MatchFailed(AssertionFailure( + operator: "be_even_int", + message: "", + payload: Some(CustomMatcherFailure( + actual: int.to_string(actual), + description: "expected an even number", + )), + )) + } +} diff --git a/examples/shopping_cart/README.md b/examples/shopping_cart/README.md index 380f1fa..75a0507 100644 --- a/examples/shopping_cart/README.md +++ b/examples/shopping_cart/README.md @@ -22,12 +22,16 @@ shopping_cart/ │ │ ├── checkout.gleam # Checkout steps │ │ └── assertions.gleam # Verification steps (Then) │ └── features/ -│ └── shopping_cart.gleam # Inline feature definition +│ ├── shopping_cart.gleam # Inline DSL feature definition +│ └── shopping_cart_file.gleam # Loads `features/*.feature` into a TestSuite └── README.md ``` ## Two Approaches +This example project runs **both** the `.feature` file(s) in `features/` and the +inline DSL suite. + ### 1. Gherkin Feature Files Write features in standard `.feature` files: @@ -76,18 +80,18 @@ pub fn register(registry: StepRegistry) -> StepRegistry { ### Cucumber Expression Placeholders -| Placeholder | Matches | Example | -|-------------|---------|---------| -| `{int}` | Integers | `5`, `42` | -| `{float}` | Decimals | `3.14`, `0.5` | -| `{string}` | Quoted text | `"hello"` | -| `{word}` | Single word | `apple` | +| Placeholder | Matches | Example | +| ----------- | ----------- | ------------- | +| `{int}` | Integers | `5`, `42` | +| `{float}` | Decimals | `3.14`, `0.5` | +| `{string}` | Quoted text | `"hello"` | +| `{word}` | Single word | `apple` | ## Running ```bash cd examples/shopping_cart -gleam test +make test ``` ## Output diff --git a/examples/shopping_cart/src/shopping_cart/failure_demo.gleam b/examples/shopping_cart/src/shopping_cart/failure_demo.gleam new file mode 100644 index 0000000..392eae5 --- /dev/null +++ b/examples/shopping_cart/src/shopping_cart/failure_demo.gleam @@ -0,0 +1,13 @@ +//// Failure demo (moved to tests). +//// +//// `dream_test` is a dev dependency in this example app, so `src/` modules +//// can’t import it. The intentional failure demo now lives in +//// `test/shopping_cart_test.gleam` behind an env var toggle: +//// +//// touch test/DEMO_FAILURES && gleam test +//// +//// This file is kept as a stub so older links don’t hard-error during builds. + +pub fn main() { + Nil +} diff --git a/examples/shopping_cart/test/features/shopping_cart.gleam b/examples/shopping_cart/test/features/shopping_cart.gleam index 099fa57..3734215 100644 --- a/examples/shopping_cart/test/features/shopping_cart.gleam +++ b/examples/shopping_cart/test/features/shopping_cart.gleam @@ -4,16 +4,16 @@ import dream_test/gherkin/feature.{ and, background, feature_with_background, given, scenario, then, when, with_tags, } -import dream_test/gherkin/steps.{new_registry} +import dream_test/gherkin/steps import dream_test/types.{type TestSuite} import steps/assertions import steps/cart import steps/checkout import steps/discount -pub fn tests() -> TestSuite { +pub fn tests() -> TestSuite(Nil) { let registry = - new_registry() + steps.new() |> cart.register() |> discount.register() |> checkout.register() @@ -21,7 +21,7 @@ pub fn tests() -> TestSuite { let bg = background([given("I have an empty cart")]) - feature_with_background("Shopping Cart", registry, bg, [ + feature_with_background("Shopping Cart (inline)", registry, bg, [ scenario("Adding a single item to the cart", [ when("I add 2 apples to the cart"), then("the cart should contain 2 items"), @@ -58,6 +58,14 @@ pub fn tests() -> TestSuite { and("the total should be $5.40"), ]) |> with_tags(["discount"]), + scenario("Applying a fixed amount discount", [ + given("I add 2 oranges to the cart"), + when("I apply a $1.00 discount"), + then("the subtotal should be $4.00"), + and("the discount should be $1.00"), + and("the total should be $3.00"), + ]) + |> with_tags(["discount"]), scenario("Successful checkout", [ given("I add 1 apple to the cart"), and("I add 2 bananas to the cart"), @@ -70,7 +78,7 @@ pub fn tests() -> TestSuite { when("I checkout"), then("the checkout should fail with \"cart is empty\""), ]) - |> with_tags(["checkout", "error"]), + |> with_tags(["error"]), scenario("Cannot add zero items", [ when("I try to add 0 apples to the cart"), then("the operation should fail with \"invalid quantity\""), diff --git a/examples/shopping_cart/test/features/shopping_cart_file.gleam b/examples/shopping_cart/test/features/shopping_cart_file.gleam new file mode 100644 index 0000000..c83ba3b --- /dev/null +++ b/examples/shopping_cart/test/features/shopping_cart_file.gleam @@ -0,0 +1,25 @@ +//// Shopping Cart feature loaded from `.feature` files on disk. +//// +//// This example intentionally keeps the Gherkin `.feature` file(s) in +//// `features/` so you can see the full file-based workflow. + +import dream_test/gherkin/discover +import dream_test/gherkin/steps +import dream_test/types.{type TestSuite} +import steps/assertions +import steps/cart +import steps/checkout +import steps/discount + +pub fn tests() -> TestSuite(Nil) { + let registry = + steps.new() + |> cart.register() + |> discount.register() + |> checkout.register() + |> assertions.register() + + discover.features("features/*.feature") + |> discover.with_registry(registry) + |> discover.to_suite("Shopping Cart (file)") +} diff --git a/examples/shopping_cart/test/shopping_cart_test.gleam b/examples/shopping_cart/test/shopping_cart_test.gleam index d3fb423..41ab308 100644 --- a/examples/shopping_cart/test/shopping_cart_test.gleam +++ b/examples/shopping_cart/test/shopping_cart_test.gleam @@ -2,9 +2,11 @@ //// //// Run with: gleam test -import dream_test/reporter/bdd.{report} -import dream_test/runner.{exit_on_failure, run_suite} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner import features/shopping_cart as shopping_cart_feature +import features/shopping_cart_file as shopping_cart_file_feature import gleam/io pub fn main() { @@ -13,8 +15,9 @@ pub fn main() { io.println("====================================") io.println("") - shopping_cart_feature.tests() - |> run_suite() - |> report(io.print) - |> exit_on_failure() + runner.new([shopping_cart_feature.tests(), shopping_cart_file_feature.tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new() |> bdd.color()]) + |> runner.exit_on_failure() + |> runner.run() } diff --git a/examples/shopping_cart/test/steps/assertions.gleam b/examples/shopping_cart/test/steps/assertions.gleam index 5bd1825..36b5242 100644 --- a/examples/shopping_cart/test/steps/assertions.gleam +++ b/examples/shopping_cart/test/steps/assertions.gleam @@ -1,10 +1,10 @@ //// Assertion/verification steps (Then steps). -import dream_test/assertions/should.{be_true, equal, or_fail_with, should} import dream_test/gherkin/steps.{ type StepContext, type StepRegistry, get_float, get_int, get_string, } import dream_test/gherkin/world.{get, get_or} +import dream_test/matchers.{be_equal, be_true, or_fail_with, should} import dream_test/types.{type AssertionResult} import gleam/float import gleam/int @@ -13,6 +13,10 @@ import shopping_cart/cart import shopping_cart/pricing import shopping_cart/types as cart_types +fn no_checkout_result_message(_e: e) -> String { + "No checkout result found" +} + // ============================================================================ // Step Registration // ============================================================================ @@ -39,107 +43,103 @@ pub fn register(registry: StepRegistry) -> StepRegistry { // Step Implementations // ============================================================================ -fn step_verify_item_count(context: StepContext) -> AssertionResult { +fn step_verify_item_count( + context: StepContext, +) -> Result(AssertionResult, String) { let expected = get_int(context.captures, 0) |> result.unwrap(0) let the_cart: cart_types.Cart = get_or(context.world, "cart", cart.new()) let actual = cart.item_count(the_cart) actual - |> should() - |> equal(expected) + |> should + |> be_equal(expected) |> or_fail_with("Cart should contain " <> int.to_string(expected) <> " items") } -fn step_verify_subtotal(context: StepContext) -> AssertionResult { +fn step_verify_subtotal(context: StepContext) -> Result(AssertionResult, String) { let expected = get_float(context.captures, 0) |> result.unwrap(0.0) let the_cart: cart_types.Cart = get_or(context.world, "cart", cart.new()) let actual = pricing.subtotal(the_cart) floats_equal(actual, expected) - |> should() + |> should |> be_true() |> or_fail_with("Subtotal should be $" <> float.to_string(expected)) } -fn step_verify_discount(context: StepContext) -> AssertionResult { +fn step_verify_discount(context: StepContext) -> Result(AssertionResult, String) { let expected = get_float(context.captures, 0) |> result.unwrap(0.0) let the_cart: cart_types.Cart = get_or(context.world, "cart", cart.new()) let actual = pricing.discount_amount(the_cart) floats_equal(actual, expected) - |> should() + |> should |> be_true() |> or_fail_with("Discount should be $" <> float.to_string(expected)) } -fn step_verify_total(context: StepContext) -> AssertionResult { +fn step_verify_total(context: StepContext) -> Result(AssertionResult, String) { let expected = get_float(context.captures, 0) |> result.unwrap(0.0) let the_cart: cart_types.Cart = get_or(context.world, "cart", cart.new()) let actual = pricing.total(the_cart) floats_equal(actual, expected) - |> should() + |> should |> be_true() |> or_fail_with("Total should be $" <> float.to_string(expected)) } -fn step_verify_checkout_success(context: StepContext) -> AssertionResult { - let success: Bool = get_or(context.world, "checkout_success", False) +fn step_verify_checkout_success( + context: StepContext, +) -> Result(AssertionResult, String) { + let checkout_succeeded: Bool = + get_or(context.world, "checkout_success", False) - success - |> should() + checkout_succeeded + |> should |> be_true() |> or_fail_with("Checkout should succeed") } -fn step_verify_checkout_failure(context: StepContext) -> AssertionResult { +fn step_verify_checkout_failure( + context: StepContext, +) -> Result(AssertionResult, String) { let expected_error = get_string(context.captures, 0) |> result.unwrap("") - let success: Bool = get_or(context.world, "checkout_success", True) + let checkout_succeeded: Bool = get_or(context.world, "checkout_success", True) let actual_error: String = get_or(context.world, "checkout_error", "") - case success { - True -> { - False - |> should() - |> be_true() - |> or_fail_with("Checkout should have failed") - } - False -> { - actual_error - |> should() - |> equal(expected_error) - |> or_fail_with("Error should be: " <> expected_error) - } - } + #(checkout_succeeded, actual_error) + |> should + |> be_equal(#(False, expected_error)) + |> or_fail_with("Checkout should fail with: " <> expected_error) } -fn step_verify_order_total(context: StepContext) -> AssertionResult { +fn step_verify_order_total( + context: StepContext, +) -> Result(AssertionResult, String) { let expected = get_float(context.captures, 0) |> result.unwrap(0.0) - case get(context.world, "checkout_result") { - Ok(checkout_result) -> { - let the_checkout: cart_types.CheckoutResult = checkout_result - floats_equal(the_checkout.total, expected) - |> should() - |> be_true() - |> or_fail_with("Order total should be $" <> float.to_string(expected)) - } - Error(_) -> { - False - |> should() - |> be_true() - |> or_fail_with("No checkout result found") - } - } + use checkout_result <- result.try( + get(context.world, "checkout_result") + |> result.map_error(no_checkout_result_message), + ) + let the_checkout: cart_types.CheckoutResult = checkout_result + + floats_equal(the_checkout.total, expected) + |> should + |> be_true() + |> or_fail_with("Order total should be $" <> float.to_string(expected)) } -fn step_verify_operation_failure(context: StepContext) -> AssertionResult { +fn step_verify_operation_failure( + context: StepContext, +) -> Result(AssertionResult, String) { let expected_error = get_string(context.captures, 0) |> result.unwrap("") let actual_error: String = get_or(context.world, "last_error", "") actual_error - |> should() - |> equal(expected_error) + |> should + |> be_equal(expected_error) |> or_fail_with("Operation should fail with: " <> expected_error) } diff --git a/examples/shopping_cart/test/steps/cart.gleam b/examples/shopping_cart/test/steps/cart.gleam index 4fc7584..72f7f68 100644 --- a/examples/shopping_cart/test/steps/cart.gleam +++ b/examples/shopping_cart/test/steps/cart.gleam @@ -4,7 +4,8 @@ import dream_test/gherkin/steps.{ type StepContext, type StepRegistry, get_int, get_string, } import dream_test/gherkin/world.{get_or, put} -import dream_test/types.{type AssertionResult, AssertionOk} +import dream_test/matchers.{succeed} +import dream_test/types.{type AssertionResult} import gleam/result import gleam/string import shopping_cart/cart @@ -28,30 +29,27 @@ pub fn register(registry: StepRegistry) -> StepRegistry { // Step Implementations // ============================================================================ -fn step_empty_cart(context: StepContext) -> AssertionResult { +fn step_empty_cart(context: StepContext) -> Result(AssertionResult, String) { put(context.world, "cart", cart.new()) - AssertionOk + Ok(succeed()) } -fn step_add_product(context: StepContext) -> AssertionResult { +fn step_add_product(context: StepContext) -> Result(AssertionResult, String) { let quantity = get_int(context.captures, 0) |> result.unwrap(0) let product = get_string(context.captures, 1) |> result.unwrap("") |> lookup_product() let the_cart: cart_types.Cart = get_or(context.world, "cart", cart.new()) - case cart.add_item(the_cart, product, quantity) { - Ok(updated) -> { - put(context.world, "cart", updated) - AssertionOk - } - Error(_) -> { - put(context.world, "last_error", "add_item failed") - AssertionOk - } - } + use updated <- result.try( + cart.add_item(the_cart, product, quantity) + |> result.map_error(add_item_error_to_string), + ) + + put(context.world, "cart", updated) + Ok(succeed()) } -fn step_try_add_product(context: StepContext) -> AssertionResult { +fn step_try_add_product(context: StepContext) -> Result(AssertionResult, String) { let quantity = get_int(context.captures, 0) |> result.unwrap(0) let product = get_string(context.captures, 1) |> result.unwrap("") |> lookup_product() @@ -61,20 +59,24 @@ fn step_try_add_product(context: StepContext) -> AssertionResult { Ok(updated) -> { put(context.world, "cart", updated) put(context.world, "last_error", "") - AssertionOk - } - Error(cart_types.InvalidQuantity) -> { - put(context.world, "last_error", "invalid quantity") - AssertionOk + Ok(succeed()) } - Error(_) -> { - put(context.world, "last_error", "unknown error") - AssertionOk + + Error(e) -> { + put(context.world, "last_error", add_item_error_to_string(e)) + Ok(succeed()) } } } -fn step_update_quantity(context: StepContext) -> AssertionResult { +fn add_item_error_to_string(e) -> String { + case e { + cart_types.InvalidQuantity -> "invalid quantity" + _ -> "add_item failed" + } +} + +fn step_update_quantity(context: StepContext) -> Result(AssertionResult, String) { let product_name = get_string(context.captures, 0) |> result.unwrap("") let quantity = get_int(context.captures, 1) |> result.unwrap(0) let product_id = normalize_product_id(product_name) @@ -83,16 +85,16 @@ fn step_update_quantity(context: StepContext) -> AssertionResult { case cart.update_quantity(the_cart, product_id, quantity) { Ok(updated) -> { put(context.world, "cart", updated) - AssertionOk + Ok(succeed()) } Error(_) -> { put(context.world, "last_error", "update failed") - AssertionOk + Ok(succeed()) } } } -fn step_remove_product(context: StepContext) -> AssertionResult { +fn step_remove_product(context: StepContext) -> Result(AssertionResult, String) { let product_name = get_string(context.captures, 0) |> result.unwrap("") let product_id = normalize_product_id(product_name) let the_cart: cart_types.Cart = get_or(context.world, "cart", cart.new()) @@ -100,11 +102,11 @@ fn step_remove_product(context: StepContext) -> AssertionResult { case cart.remove_item(the_cart, product_id) { Ok(updated) -> { put(context.world, "cart", updated) - AssertionOk + Ok(succeed()) } Error(_) -> { put(context.world, "last_error", "remove failed") - AssertionOk + Ok(succeed()) } } } diff --git a/examples/shopping_cart/test/steps/checkout.gleam b/examples/shopping_cart/test/steps/checkout.gleam index 8ce958b..65e454e 100644 --- a/examples/shopping_cart/test/steps/checkout.gleam +++ b/examples/shopping_cart/test/steps/checkout.gleam @@ -2,7 +2,8 @@ import dream_test/gherkin/steps.{type StepContext, type StepRegistry} import dream_test/gherkin/world.{get_or, put} -import dream_test/types.{type AssertionResult, AssertionOk} +import dream_test/matchers.{succeed} +import dream_test/types.{type AssertionResult} import shopping_cart/cart import shopping_cart/checkout import shopping_cart/types as cart_types @@ -20,24 +21,24 @@ pub fn register(registry: StepRegistry) -> StepRegistry { // Step Implementations // ============================================================================ -fn step_checkout(context: StepContext) -> AssertionResult { +fn step_checkout(context: StepContext) -> Result(AssertionResult, String) { let the_cart: cart_types.Cart = get_or(context.world, "cart", cart.new()) case checkout.checkout(the_cart) { Ok(checkout_result) -> { put(context.world, "checkout_result", checkout_result) put(context.world, "checkout_success", True) - AssertionOk + Ok(succeed()) } Error(cart_types.CartEmpty) -> { put(context.world, "checkout_success", False) put(context.world, "checkout_error", "cart is empty") - AssertionOk + Ok(succeed()) } Error(_) -> { put(context.world, "checkout_success", False) put(context.world, "checkout_error", "checkout failed") - AssertionOk + Ok(succeed()) } } } diff --git a/examples/shopping_cart/test/steps/discount.gleam b/examples/shopping_cart/test/steps/discount.gleam index f854d83..d21fb90 100644 --- a/examples/shopping_cart/test/steps/discount.gleam +++ b/examples/shopping_cart/test/steps/discount.gleam @@ -4,7 +4,8 @@ import dream_test/gherkin/steps.{ type StepContext, type StepRegistry, get_float, get_int, } import dream_test/gherkin/world.{get_or, put} -import dream_test/types.{type AssertionResult, AssertionOk} +import dream_test/matchers.{succeed} +import dream_test/types.{type AssertionResult} import gleam/int import gleam/result import shopping_cart/cart @@ -25,7 +26,9 @@ pub fn register(registry: StepRegistry) -> StepRegistry { // Step Implementations // ============================================================================ -fn step_apply_percent_discount(context: StepContext) -> AssertionResult { +fn step_apply_percent_discount( + context: StepContext, +) -> Result(AssertionResult, String) { let percent = get_int(context.captures, 0) |> result.unwrap(0) let the_cart: cart_types.Cart = get_or(context.world, "cart", cart.new()) let discount = cart_types.PercentOff(int.to_float(percent)) @@ -33,16 +36,18 @@ fn step_apply_percent_discount(context: StepContext) -> AssertionResult { case pricing.apply_discount(the_cart, discount) { Ok(updated) -> { put(context.world, "cart", updated) - AssertionOk + Ok(succeed()) } Error(_) -> { put(context.world, "last_error", "discount failed") - AssertionOk + Ok(succeed()) } } } -fn step_apply_fixed_discount(context: StepContext) -> AssertionResult { +fn step_apply_fixed_discount( + context: StepContext, +) -> Result(AssertionResult, String) { let amount = get_float(context.captures, 0) |> result.unwrap(0.0) let the_cart: cart_types.Cart = get_or(context.world, "cart", cart.new()) let discount = cart_types.FixedAmount(amount) @@ -50,11 +55,11 @@ fn step_apply_fixed_discount(context: StepContext) -> AssertionResult { case pricing.apply_discount(the_cart, discount) { Ok(updated) -> { put(context.world, "cart", updated) - AssertionOk + Ok(succeed()) } Error(_) -> { put(context.world, "last_error", "discount failed") - AssertionOk + Ok(succeed()) } } } diff --git a/examples/snippets/README.md b/examples/snippets/README.md new file mode 100644 index 0000000..fa9cd4c --- /dev/null +++ b/examples/snippets/README.md @@ -0,0 +1,63 @@ +## Dream Test snippets + +This project contains **small, compile-ready examples** for every `dream_test` feature. + +### Running + +- **Run all snippet tests** (recommended): + - `gleam test` +- **Run a single snippet as an app** (for snippets that have `main`): + - `gleam run -m snippets/unit/quick_start` + - `gleam run -m snippets/reporters/progress_reporter` + - `gleam run -m snippets/reporters/gherkin_reporter` + - `gleam run -m snippets/hooks/context_aware_tests` + +### Feature map (one file per feature) + +- **Unit DSL (0-arg tests + hooks)**: + - `test/snippets/unit/quick_start.gleam` + - `test/snippets/unit/hero.gleam` + - `test/snippets/unit/chaining.gleam` + - `test/snippets/unit/skipping_tests.gleam` + - `test/snippets/hooks/lifecycle_hooks.gleam` + - `test/snippets/hooks/hook_inheritance.gleam` + - `test/snippets/hooks/hook_failure.gleam` + - `test/snippets/unit/explicit_failures.gleam` +- **Runner configuration (parallel/sequential, timeouts, exit-on-failure)**: + - `test/snippets/runner/runner_config.gleam` + - `test/snippets/runner/sequential_execution.gleam` + - `test/snippets/runner/execution_modes.gleam` +- **Reporters**: + - **Progress (live) + BDD (end-of-run)**: `test/snippets/reporters/bdd_reporter.gleam` (and `snippets_test.gleam` uses progress + BDD by default) + - **JSON (end-of-run)**: `test/snippets/reporters/json_reporter.gleam` + - **BDD (formatting)**: `test/snippets/reporters/bdd_formatting.gleam` + - **Progress bar (live)**: `test/snippets/reporters/progress_reporter.gleam` + - **Gherkin formatting**: `test/snippets/reporters/gherkin_reporter.gleam` (runs, then formats results) + - **JSON (formatting)**: `test/snippets/reporters/json_formatting.gleam` + - **Reporter events (advanced/custom tooling)**: `test/snippets/reporters/reporter_api_handle_event.gleam` +- **Sandboxing (timeouts + crash isolation)**: + - `test/snippets/utils/sandboxing.gleam` +- **Timing helpers**: + - `test/snippets/utils/timing_helpers.gleam` +- **File helpers**: + - `test/snippets/utils/file_helpers.gleam` +- **Process helpers (actors, unique ports)**: + - `test/snippets/utils/process_helpers.gleam` +- **Parallel executor (direct usage)**: + - `test/snippets/utils/parallel_direct.gleam` +- **Core types helpers**: + - `test/snippets/utils/types_helpers.gleam` +- **Custom matchers**: + - `test/snippets/matchers/custom_matchers.gleam` +- **Snapshot testing**: + - `test/snippets/matchers/snapshots.gleam` +- **Gherkin (inline DSL, placeholders, step registry, discovery, feature files)**: + - `test/snippets/gherkin/gherkin_hero.gleam` + - `test/snippets/gherkin/gherkin_feature.gleam` + - `test/snippets/gherkin/gherkin_step_handler.gleam` + - `test/snippets/gherkin/gherkin_placeholders.gleam` + - `test/snippets/gherkin/gherkin_file.gleam` (parsing `.feature` files) + - `test/snippets/gherkin/gherkin_discover.gleam` (glob discovery) +- **Context-aware tests (`unit_context`)**: + - `test/snippets/hooks/context_aware_tests.gleam` + - Note: uses a custom context type, so it’s run standalone via `gleam run -m snippets/hooks/context_aware_tests`. diff --git a/examples/snippets/gleam.toml b/examples/snippets/gleam.toml index 9bee57e..3ba1560 100644 --- a/examples/snippets/gleam.toml +++ b/examples/snippets/gleam.toml @@ -6,6 +6,8 @@ description = "README code examples - these are tested to ensure they compile" gleam_stdlib = ">= 0.44.0 and < 2.0.0" gleam_regexp = ">= 1.0.0 and < 2.0.0" dream_test = { path = "../.." } +gleam_erlang = ">= 1.3.0 and < 2.0.0" +gleam_otp = ">= 1.2.0 and < 2.0.0" [dev-dependencies] diff --git a/examples/snippets/manifest.toml b/examples/snippets/manifest.toml index 75a1d7d..d0b9674 100644 --- a/examples/snippets/manifest.toml +++ b/examples/snippets/manifest.toml @@ -2,15 +2,17 @@ # You typically do not need to edit this file packages = [ - { name = "dream_test", version = "1.1.0", build_tools = ["gleam"], requirements = ["gleam_erlang", "gleam_json", "gleam_otp", "gleam_regexp", "gleam_stdlib"], source = "local", path = "../.." }, + { name = "dream_test", version = "2.0.0", build_tools = ["gleam"], requirements = ["gleam_erlang", "gleam_json", "gleam_otp", "gleam_regexp", "gleam_stdlib"], source = "local", path = "../.." }, { name = "gleam_erlang", version = "1.3.0", build_tools = ["gleam"], requirements = ["gleam_stdlib"], otp_app = "gleam_erlang", source = "hex", outer_checksum = "1124AD3AA21143E5AF0FC5CF3D9529F6DB8CA03E43A55711B60B6B7B3874375C" }, { name = "gleam_json", version = "3.1.0", build_tools = ["gleam"], requirements = ["gleam_stdlib"], otp_app = "gleam_json", source = "hex", outer_checksum = "44FDAA8847BE8FC48CA7A1C089706BD54BADCC4C45B237A992EDDF9F2CDB2836" }, { name = "gleam_otp", version = "1.2.0", build_tools = ["gleam"], requirements = ["gleam_erlang", "gleam_stdlib"], otp_app = "gleam_otp", source = "hex", outer_checksum = "BA6A294E295E428EC1562DC1C11EA7530DCB981E8359134BEABC8493B7B2258E" }, { name = "gleam_regexp", version = "1.1.1", build_tools = ["gleam"], requirements = ["gleam_stdlib"], otp_app = "gleam_regexp", source = "hex", outer_checksum = "9C215C6CA84A5B35BB934A9B61A9A306EC743153BE2B0425A0D032E477B062A9" }, - { name = "gleam_stdlib", version = "0.67.0", build_tools = ["gleam"], requirements = [], otp_app = "gleam_stdlib", source = "hex", outer_checksum = "6368313DB35963DC02F677A513BB0D95D58A34ED0A9436C8116820BF94BE3511" }, + { name = "gleam_stdlib", version = "0.67.1", build_tools = ["gleam"], requirements = [], otp_app = "gleam_stdlib", source = "hex", outer_checksum = "6CE3E4189A8B8EC2F73AB61A2FBDE49F159D6C9C61C49E3B3082E439F260D3D0" }, ] [requirements] dream_test = { path = "../.." } +gleam_erlang = { version = ">= 1.3.0 and < 2.0.0" } gleam_regexp = { version = ">= 1.0.0 and < 2.0.0" } gleam_stdlib = { version = ">= 0.44.0 and < 2.0.0" } +gleam_otp = { version = ">= 1.2.0 and < 2.0.0" } diff --git a/examples/snippets/src/snippets.gleam b/examples/snippets/src/snippets.gleam index caeb75a..781aaa5 100644 --- a/examples/snippets/src/snippets.gleam +++ b/examples/snippets/src/snippets.gleam @@ -1,5 +1,3 @@ -//// Simple calculator for README examples. - /// Add two numbers pub fn add(a: Int, b: Int) -> Int { a + b diff --git a/examples/snippets/test/cart.feature b/examples/snippets/test/cart.feature index 372add9..33dc451 100644 --- a/examples/snippets/test/cart.feature +++ b/examples/snippets/test/cart.feature @@ -3,19 +3,17 @@ Feature: Shopping Cart As a customer I want to add items to my cart Background: - Given I have an empty cart + Given the server is running @smoke Scenario: Adding items + Given the cart is empty When I add 3 items Then the cart should have 3 items Scenario: Adding multiple times + Given the cart is empty When I add 2 items And I add 3 items Then the cart should have 5 items - - - - diff --git a/examples/snippets/test/chaining.gleam b/examples/snippets/test/chaining.gleam deleted file mode 100644 index b08ab1e..0000000 --- a/examples/snippets/test/chaining.gleam +++ /dev/null @@ -1,35 +0,0 @@ -//// README: Chaining matchers - -import dream_test/assertions/should.{be_ok, be_some, equal, or_fail_with, should} -import dream_test/reporter/bdd.{report} -import dream_test/runner.{run_all} -import dream_test/unit.{describe, it, to_test_cases} -import gleam/io -import gleam/option.{Some} - -pub fn tests() { - describe("Chaining matchers", [ - // Unwrap Some, then check the value - it("unwraps Option", fn() { - Some(42) - |> should() - |> be_some() - |> equal(42) - |> or_fail_with("Should contain 42") - }), - // Unwrap Ok, then check the value - it("unwraps Result", fn() { - Ok("success") - |> should() - |> be_ok() - |> equal("success") - |> or_fail_with("Should be Ok with 'success'") - }), - ]) -} - -pub fn main() { - to_test_cases("chaining", tests()) - |> run_all() - |> report(io.print) -} diff --git a/examples/snippets/test/execution_modes.gleam b/examples/snippets/test/execution_modes.gleam deleted file mode 100644 index 5e84aae..0000000 --- a/examples/snippets/test/execution_modes.gleam +++ /dev/null @@ -1,36 +0,0 @@ -//// README: Execution modes (flat vs suite) - -import dream_test/assertions/should.{equal, or_fail_with, should} -import dream_test/reporter/bdd.{report} -import dream_test/runner.{run_all, run_suite} -import dream_test/unit.{describe, it, to_test_cases, to_test_suite} -import gleam/io - -pub fn tests() { - describe("Execution modes demo", [ - it("works in both modes", fn() { - 1 + 1 - |> should() - |> equal(2) - |> or_fail_with("Math works") - }), - ]) -} - -// Flat mode - simpler, faster -pub fn run_flat_mode() { - to_test_cases("my_test", tests()) - |> run_all() - |> report(io.print) -} - -// Suite mode - preserves group structure for before_all/after_all -pub fn run_suite_mode() { - to_test_suite("my_test", tests()) - |> run_suite() - |> report(io.print) -} - -pub fn main() { - run_flat_mode() -} diff --git a/examples/snippets/test/gherkin_discover.gleam b/examples/snippets/test/gherkin_discover.gleam deleted file mode 100644 index 4488091..0000000 --- a/examples/snippets/test/gherkin_discover.gleam +++ /dev/null @@ -1,51 +0,0 @@ -//// README: Gherkin feature discovery example - -import dream_test/assertions/should.{equal, or_fail_with, should} -import dream_test/gherkin/discover -import dream_test/gherkin/steps.{type StepContext, get_int, new_registry, step} -import dream_test/gherkin/world.{get_or, put} -import dream_test/reporter/gherkin.{report} -import dream_test/runner.{run_suite} -import dream_test/types.{type AssertionResult, AssertionOk} -import gleam/io -import gleam/result - -fn step_empty_cart(context: StepContext) -> AssertionResult { - put(context.world, "cart", 0) - AssertionOk -} - -fn step_add_items(context: StepContext) -> AssertionResult { - let current = get_or(context.world, "cart", 0) - let to_add = get_int(context.captures, 0) |> result.unwrap(0) - put(context.world, "cart", current + to_add) - AssertionOk -} - -fn step_verify_count(context: StepContext) -> AssertionResult { - let expected = get_int(context.captures, 0) |> result.unwrap(0) - get_or(context.world, "cart", 0) - |> should() - |> equal(expected) - |> or_fail_with("Cart count mismatch") -} - -pub fn tests() { - // Define step handlers - let steps = - new_registry() - |> step("I have an empty cart", step_empty_cart) - |> step("I add {int} items", step_add_items) - |> step("the cart should have {int} items", step_verify_count) - - // Discover and load all .feature files - discover.features("test/*.feature") - |> discover.with_registry(steps) - |> discover.to_suite("cart_features") -} - -pub fn main() { - tests() - |> run_suite() - |> report(io.print) -} diff --git a/examples/snippets/test/gherkin_feature.gleam b/examples/snippets/test/gherkin_feature.gleam deleted file mode 100644 index 7bf8e92..0000000 --- a/examples/snippets/test/gherkin_feature.gleam +++ /dev/null @@ -1,65 +0,0 @@ -//// README: Gherkin feature definition example - -import dream_test/assertions/should.{fail_with} -import dream_test/gherkin/feature.{ - and, background, feature_with_background, given, scenario, then, when, - with_tags, -} -import dream_test/gherkin/steps.{type StepContext, get_int, new_registry, step} -import dream_test/gherkin/world.{get_or, put} -import dream_test/reporter/bdd.{report} -import dream_test/runner.{run_suite} -import dream_test/types.{type AssertionResult, AssertionOk} -import gleam/int -import gleam/io -import gleam/result - -fn step_empty_cart(context: StepContext) -> AssertionResult { - put(context.world, "cart", 0) - AssertionOk -} - -fn step_add_items(context: StepContext) -> AssertionResult { - let current = get_or(context.world, "cart", 0) - let to_add = get_int(context.captures, 0) |> result.unwrap(0) - put(context.world, "cart", current + to_add) - AssertionOk -} - -fn step_verify_count(context: StepContext) -> AssertionResult { - let expected = get_int(context.captures, 0) |> result.unwrap(0) - let actual = get_or(context.world, "cart", 0) - case actual == expected { - True -> AssertionOk - False -> fail_with("Expected " <> int.to_string(expected) <> " items") - } -} - -pub fn tests() { - let steps = - new_registry() - |> step("I have an empty cart", step_empty_cart) - |> step("I add {int} items", step_add_items) - |> step("I should have {int} items", step_verify_count) - - let bg = background([given("I have an empty cart")]) - - feature_with_background("Shopping Cart", steps, bg, [ - scenario("Adding items", [ - when("I add 3 items"), - then("I should have 3 items"), - ]) - |> with_tags(["smoke"]), - scenario("Adding more items", [ - when("I add 2 items"), - and("I add 3 items"), - then("I should have 5 items"), - ]), - ]) -} - -pub fn main() { - tests() - |> run_suite() - |> report(io.print) -} diff --git a/examples/snippets/test/hook_failure.gleam b/examples/snippets/test/hook_failure.gleam deleted file mode 100644 index 878b044..0000000 --- a/examples/snippets/test/hook_failure.gleam +++ /dev/null @@ -1,31 +0,0 @@ -//// README: Hook failure behavior - -import dream_test/assertions/should.{fail_with, succeed} -import dream_test/reporter/bdd.{report} -import dream_test/runner.{run_suite} -import dream_test/unit.{before_all, describe, it, to_test_suite} -import gleam/io - -fn connect_to_database() { - Ok(Nil) -} - -pub fn tests() { - describe("Handles failures", [ - before_all(fn() { - case connect_to_database() { - Ok(_) -> succeed() - Error(e) -> fail_with("Database connection failed: " <> e) - } - }), - // If before_all fails, these tests are marked SetupFailed (not run) - it("test1", fn() { succeed() }), - it("test2", fn() { succeed() }), - ]) -} - -pub fn main() { - to_test_suite("hook_failure", tests()) - |> run_suite() - |> report(io.print) -} diff --git a/examples/snippets/test/hook_inheritance.gleam b/examples/snippets/test/hook_inheritance.gleam deleted file mode 100644 index 78f877e..0000000 --- a/examples/snippets/test/hook_inheritance.gleam +++ /dev/null @@ -1,40 +0,0 @@ -//// README: Hook inheritance - -import dream_test/assertions/should.{succeed} -import dream_test/reporter/bdd.{report} -import dream_test/runner.{run_suite} -import dream_test/unit.{after_each, before_each, describe, it, to_test_suite} -import gleam/io - -pub fn tests() { - describe("Outer", [ - before_each(fn() { - io.println("1. outer setup") - succeed() - }), - after_each(fn() { - io.println("4. outer teardown") - succeed() - }), - describe("Inner", [ - before_each(fn() { - io.println("2. inner setup") - succeed() - }), - after_each(fn() { - io.println("3. inner teardown") - succeed() - }), - it("test", fn() { - io.println("(test)") - succeed() - }), - ]), - ]) -} - -pub fn main() { - to_test_suite("hook_inheritance", tests()) - |> run_suite() - |> report(io.print) -} diff --git a/examples/snippets/test/json_reporter.gleam b/examples/snippets/test/json_reporter.gleam deleted file mode 100644 index 6720b41..0000000 --- a/examples/snippets/test/json_reporter.gleam +++ /dev/null @@ -1,30 +0,0 @@ -//// README: JSON reporter example - -import dream_test/assertions/should.{succeed} -import dream_test/reporter/bdd.{report} -import dream_test/reporter/json -import dream_test/runner.{exit_on_failure, run_all} -import dream_test/unit.{describe, it, to_test_cases} -import gleam/io - -pub fn tests() { - describe("JSON Reporter", [ - it("outputs JSON format", fn() { - // The json.report function outputs machine-readable JSON - // while bdd.report outputs human-readable text - succeed() - }), - it("includes test metadata", fn() { - // JSON output includes name, full_name, status, duration, tags - succeed() - }), - ]) -} - -pub fn main() { - to_test_cases("json_reporter", tests()) - |> run_all() - |> report(io.print) - |> json.report_pretty(io.println) - |> exit_on_failure() -} diff --git a/examples/snippets/test/lifecycle_hooks.gleam b/examples/snippets/test/lifecycle_hooks.gleam deleted file mode 100644 index 3fa46c5..0000000 --- a/examples/snippets/test/lifecycle_hooks.gleam +++ /dev/null @@ -1,48 +0,0 @@ -//// README: Lifecycle hooks - -import dream_test/assertions/should.{be_empty, or_fail_with, should, succeed} -import dream_test/reporter/bdd.{report} -import dream_test/runner.{run_suite} -import dream_test/unit.{ - after_all, after_each, before_all, before_each, describe, it, to_test_suite, -} -import gleam/io - -pub fn tests() { - describe("Database tests", [ - before_all(fn() { - // Start database once for all tests - succeed() - }), - before_each(fn() { - // Begin transaction before each test - succeed() - }), - it("creates a record", fn() { - [] - |> should() - |> be_empty() - |> or_fail_with("Placeholder test") - }), - it("queries records", fn() { - [] - |> should() - |> be_empty() - |> or_fail_with("Placeholder test") - }), - after_each(fn() { - // Rollback transaction after each test - succeed() - }), - after_all(fn() { - // Stop database after all tests - succeed() - }), - ]) -} - -pub fn main() { - to_test_suite("lifecycle_hooks", tests()) - |> run_suite() - |> report(io.print) -} diff --git a/examples/snippets/test/quick_start.gleam b/examples/snippets/test/quick_start.gleam deleted file mode 100644 index f4a6063..0000000 --- a/examples/snippets/test/quick_start.gleam +++ /dev/null @@ -1,34 +0,0 @@ -//// README: Quick Start example - -import dream_test/assertions/should.{equal, or_fail_with, should} -import dream_test/reporter/bdd.{report} -import dream_test/runner.{exit_on_failure, run_all} -import dream_test/unit.{describe, it, to_test_cases} -import gleam/io -import gleam/string - -pub fn tests() { - describe("String utilities", [ - it("trims whitespace", fn() { - " hello " - |> string.trim() - |> should() - |> equal("hello") - |> or_fail_with("Should remove surrounding whitespace") - }), - it("finds substrings", fn() { - "hello world" - |> string.contains("world") - |> should() - |> equal(True) - |> or_fail_with("Should find 'world' in string") - }), - ]) -} - -pub fn main() { - to_test_cases("quick_start", tests()) - |> run_all() - |> report(io.print) - |> exit_on_failure() -} diff --git a/examples/snippets/test/runner_config.gleam b/examples/snippets/test/runner_config.gleam deleted file mode 100644 index 7731538..0000000 --- a/examples/snippets/test/runner_config.gleam +++ /dev/null @@ -1,32 +0,0 @@ -//// README: Runner config - -import dream_test/assertions/should.{equal, or_fail_with, should} -import dream_test/reporter/bdd.{report} -import dream_test/runner.{RunnerConfig, run_all_with_config} -import dream_test/unit.{describe, it, to_test_cases} -import gleam/io -import gleam/option.{None} - -pub fn tests() { - describe("Runner config demo", [ - it("runs with custom config", fn() { - 1 + 1 - |> should() - |> equal(2) - |> or_fail_with("Math works") - }), - ]) -} - -pub fn main() { - let config = - RunnerConfig( - max_concurrency: 8, - default_timeout_ms: 10_000, - test_filter: None, - ) - - let test_cases = to_test_cases("runner_config", tests()) - run_all_with_config(config, test_cases) - |> report(io.print) -} diff --git a/examples/snippets/test/sequential_execution.gleam b/examples/snippets/test/sequential_execution.gleam deleted file mode 100644 index a21d468..0000000 --- a/examples/snippets/test/sequential_execution.gleam +++ /dev/null @@ -1,40 +0,0 @@ -//// README: Sequential execution for shared resources - -import dream_test/assertions/should.{equal, or_fail_with, should} -import dream_test/reporter/bdd.{report} -import dream_test/runner.{RunnerConfig, run_all_with_config} -import dream_test/unit.{describe, it, to_test_cases} -import gleam/io -import gleam/option.{None} - -pub fn tests() { - describe("Sequential tests", [ - it("first test", fn() { - // When tests share external resources, run them sequentially - 1 + 1 - |> should() - |> equal(2) - |> or_fail_with("Math works") - }), - it("second test", fn() { - 2 + 2 - |> should() - |> equal(4) - |> or_fail_with("Math still works") - }), - ]) -} - -pub fn main() { - // Sequential execution for tests with shared state - let config = - RunnerConfig( - max_concurrency: 1, - default_timeout_ms: 30_000, - test_filter: None, - ) - - to_test_cases("sequential_test", tests()) - |> run_all_with_config(config, _) - |> report(io.print) -} diff --git a/examples/snippets/test/snapshot_testing.gleam b/examples/snippets/test/snapshot_testing.gleam deleted file mode 100644 index 1a2cb0b..0000000 --- a/examples/snippets/test/snapshot_testing.gleam +++ /dev/null @@ -1,87 +0,0 @@ -// Snapshot testing example for dream_test -// -// Run: gleam test -// -// Demonstrates: -// - match_snapshot for string comparisons -// - match_snapshot_inspect for complex data -// - Clearing snapshots programmatically - -import dream_test/assertions/should.{ - equal, match_snapshot, match_snapshot_inspect, or_fail_with, should, -} -import dream_test/matchers/snapshot -import dream_test/reporter/bdd.{report} -import dream_test/runner.{exit_on_failure, run_all} -import dream_test/unit.{describe, it, to_test_cases} -import gleam/int -import gleam/io -import gleam/string - -// Example: A function that renders a user profile as HTML -fn render_profile(name: String, age: Int) -> String { - string.concat([ - "
\n", - "

", - name, - "

\n", - "

Age: ", - int.to_string(age), - "

\n", - "
", - ]) -} - -// Example: A function that builds a configuration record -pub type Config { - Config(host: String, port: Int, debug: Bool) -} - -fn build_config() -> Config { - Config(host: "localhost", port: 8080, debug: True) -} - -pub fn tests() { - describe("Snapshot Testing", [ - describe("match_snapshot", [ - it("renders user profile", fn() { - render_profile("Alice", 30) - |> should() - |> match_snapshot("./test/snapshots/user_profile.snap") - |> or_fail_with("Profile should match snapshot") - }), - ]), - describe("match_snapshot_inspect", [ - it("builds config correctly", fn() { - build_config() - |> should() - |> match_snapshot_inspect("./test/snapshots/config.snap") - |> or_fail_with("Config should match snapshot") - }), - ]), - describe("clearing snapshots", [ - it("can clear a single snapshot", fn() { - // Create a temporary snapshot - "temp content" - |> should() - |> match_snapshot("./test/snapshots/temp.snap") - |> or_fail_with("Should create temp snapshot") - - // Clear it - let result = snapshot.clear_snapshot("./test/snapshots/temp.snap") - - result - |> should() - |> equal(Ok(Nil)) - |> or_fail_with("Should successfully clear snapshot") - }), - ]), - ]) -} - -pub fn main() { - to_test_cases("snapshot_testing", tests()) - |> run_all() - |> report(io.print) - |> exit_on_failure() -} diff --git a/examples/snippets/test/snapshots/bdd_format_incremental_parts_with_test_indent.snap b/examples/snippets/test/snapshots/bdd_format_incremental_parts_with_test_indent.snap new file mode 100644 index 0000000..f154873 --- /dev/null +++ b/examples/snippets/test/snapshots/bdd_format_incremental_parts_with_test_indent.snap @@ -0,0 +1,2 @@ +Example Suite + ✓ passes diff --git a/examples/snippets/test/snapshots/bdd_format_incremental_with_test_indent.snap b/examples/snippets/test/snapshots/bdd_format_incremental_with_test_indent.snap new file mode 100644 index 0000000..f154873 --- /dev/null +++ b/examples/snippets/test/snapshots/bdd_format_incremental_with_test_indent.snap @@ -0,0 +1,2 @@ +Example Suite + ✓ passes diff --git a/examples/snippets/test/snapshots/bdd_format_report.snap b/examples/snippets/test/snapshots/bdd_format_report.snap new file mode 100644 index 0000000..2f81725 --- /dev/null +++ b/examples/snippets/test/snapshots/bdd_format_report.snap @@ -0,0 +1,4 @@ +Example Suite + ✓ passes + +Summary: 1 run, 0 failed, 1 passed in 0ms diff --git a/examples/snippets/test/snapshots/bdd_format_summary_only.snap b/examples/snippets/test/snapshots/bdd_format_summary_only.snap new file mode 100644 index 0000000..95798ea --- /dev/null +++ b/examples/snippets/test/snapshots/bdd_format_summary_only.snap @@ -0,0 +1 @@ +Summary: 1 run, 0 failed, 1 passed in 0ms diff --git a/examples/snippets/test/snapshots/bdd_report_file_output.snap b/examples/snippets/test/snapshots/bdd_report_file_output.snap new file mode 100644 index 0000000..2f81725 --- /dev/null +++ b/examples/snippets/test/snapshots/bdd_report_file_output.snap @@ -0,0 +1,4 @@ +Example Suite + ✓ passes + +Summary: 1 run, 0 failed, 1 passed in 0ms diff --git a/examples/snippets/test/snapshots/config.snap b/examples/snippets/test/snapshots/config.snap new file mode 100644 index 0000000..45f4aff --- /dev/null +++ b/examples/snippets/test/snapshots/config.snap @@ -0,0 +1 @@ +Config("localhost", 8080, True) \ No newline at end of file diff --git a/examples/snippets/test/snapshots/gherkin_format_report.snap b/examples/snippets/test/snapshots/gherkin_format_report.snap new file mode 100644 index 0000000..f78de26 --- /dev/null +++ b/examples/snippets/test/snapshots/gherkin_format_report.snap @@ -0,0 +1,5 @@ +Feature: Example Feature + Scenario: Scenario A ✓ + +Summary: 1 run, 0 failed, 1 passed in 0ms + diff --git a/examples/snippets/test/snapshots/json_format_pretty.snap b/examples/snippets/test/snapshots/json_format_pretty.snap new file mode 100644 index 0000000..548f2ba --- /dev/null +++ b/examples/snippets/test/snapshots/json_format_pretty.snap @@ -0,0 +1,37 @@ +{ + "version": "1.0", + "timestamp_ms":0, + "duration_ms": 0, + "system": { + "os": "unix", + "otp_version": "28", + "gleam_version": "0.67.1" + }, + "summary": { + "total": 1, + "passed": 1, + "failed": 0, + "skipped": 0, + "pending": 0, + "timed_out": 0, + "setup_failed": 0 + }, + "tests": [ + { + "name": "passes", + "full_name": [ + "Example Suite", + "passes" + ], + "status": "passed", + "duration_ms": 0, + "tags": [ + + ], + "kind": "unit", + "failures": [ + + ] + } + ] +} \ No newline at end of file diff --git a/examples/snippets/test/snapshots/progress_render_run_started.snap b/examples/snippets/test/snapshots/progress_render_run_started.snap new file mode 100644 index 0000000..246d673 --- /dev/null +++ b/examples/snippets/test/snapshots/progress_render_run_started.snap @@ -0,0 +1 @@ +0/10 [░░░░░░░░░░░░░░░░░░░] 0% \ No newline at end of file diff --git a/examples/snippets/test/snapshots/user_profile.snap b/examples/snippets/test/snapshots/user_profile.snap new file mode 100644 index 0000000..4bf99d6 --- /dev/null +++ b/examples/snippets/test/snapshots/user_profile.snap @@ -0,0 +1,4 @@ +
+

Alice

+

Age: 30

+
\ No newline at end of file diff --git a/examples/snippets/test/snippets/gherkin/discover_loading.gleam b/examples/snippets/test/snippets/gherkin/discover_loading.gleam new file mode 100644 index 0000000..90f4bc2 --- /dev/null +++ b/examples/snippets/test/snippets/gherkin/discover_loading.gleam @@ -0,0 +1,23 @@ +import dream_test/gherkin/discover +import dream_test/matchers.{contain, have_length, or_fail_with, should} +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("Gherkin discovery", [ + it("list_files returns matching .feature file paths", fn() { + discover.features("test/*.feature") + |> discover.list_files() + |> should + |> contain("test/cart.feature") + |> or_fail_with("expected list_files to include test/cart.feature") + }), + it("load returns parsed features (and collects errors)", fn() { + let result = discover.features("test/*.feature") |> discover.load() + + result.features + |> should + |> have_length(1) + |> or_fail_with("expected one parsed feature") + }), + ]) +} diff --git a/examples/snippets/test/snippets/gherkin/gherkin_discover.gleam b/examples/snippets/test/snippets/gherkin/gherkin_discover.gleam new file mode 100644 index 0000000..8ff6f4f --- /dev/null +++ b/examples/snippets/test/snippets/gherkin/gherkin_discover.gleam @@ -0,0 +1,56 @@ +import dream_test/gherkin/discover +import dream_test/gherkin/steps.{type StepContext, get_int, step} +import dream_test/gherkin/world.{get_or, put} +import dream_test/matchers.{be_equal, or_fail_with, should, succeed} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import gleam/result + +fn step_server_running(context: StepContext) { + put(context.world, "server_running", True) + Ok(succeed()) +} + +fn step_empty_cart(context: StepContext) { + put(context.world, "cart", 0) + Ok(succeed()) +} + +fn step_add_items(context: StepContext) { + let current = get_or(context.world, "cart", 0) + let to_add = get_int(context.captures, 0) |> result.unwrap(0) + put(context.world, "cart", current + to_add) + Ok(succeed()) +} + +fn step_verify_count(context: StepContext) { + let expected = get_int(context.captures, 0) |> result.unwrap(0) + get_or(context.world, "cart", 0) + |> should + |> be_equal(expected) + |> or_fail_with("Cart count mismatch") +} + +pub fn tests() { + // Define step handlers + let steps = + steps.new() + |> step("the server is running", step_server_running) + |> step("the cart is empty", step_empty_cart) + |> step("I add {int} items", step_add_items) + |> step("the cart should have {int} items", step_verify_count) + + // Discover and load all .feature files + discover.features("test/*.feature") + |> discover.with_registry(steps) + |> discover.to_suite("cart_features") +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} diff --git a/examples/snippets/test/snippets/gherkin/gherkin_feature.gleam b/examples/snippets/test/snippets/gherkin/gherkin_feature.gleam new file mode 100644 index 0000000..1542718 --- /dev/null +++ b/examples/snippets/test/snippets/gherkin/gherkin_feature.gleam @@ -0,0 +1,72 @@ +import dream_test/gherkin/feature.{ + and, background, feature_with_background, given, scenario, then, when, + with_tags, +} +import dream_test/gherkin/steps.{type StepContext, get_int, step} +import dream_test/gherkin/world.{get_or, put} +import dream_test/matchers.{be_equal, or_fail_with, should, succeed} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import gleam/int +import gleam/result + +fn step_server_running(context: StepContext) { + put(context.world, "server_running", True) + Ok(succeed()) +} + +fn step_empty_cart(context: StepContext) { + put(context.world, "cart", 0) + Ok(succeed()) +} + +fn step_add_items(context: StepContext) { + let current = get_or(context.world, "cart", 0) + let to_add = get_int(context.captures, 0) |> result.unwrap(0) + put(context.world, "cart", current + to_add) + Ok(succeed()) +} + +fn step_verify_count(context: StepContext) { + let expected = get_int(context.captures, 0) |> result.unwrap(0) + let actual = get_or(context.world, "cart", 0) + actual + |> should + |> be_equal(expected) + |> or_fail_with("Expected " <> int.to_string(expected) <> " items") +} + +pub fn tests() { + let steps = + steps.new() + |> step("the server is running", step_server_running) + |> step("the cart is empty", step_empty_cart) + |> step("I add {int} items", step_add_items) + |> step("the cart should have {int} items", step_verify_count) + + let bg = background([given("the server is running")]) + + feature_with_background("Shopping Cart", steps, bg, [ + scenario("Adding items", [ + given("the cart is empty"), + when("I add 3 items"), + then("the cart should have 3 items"), + ]) + |> with_tags(["smoke"]), + scenario("Adding more items", [ + given("the cart is empty"), + when("I add 2 items"), + and("I add 3 items"), + then("the cart should have 5 items"), + ]), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} diff --git a/examples/snippets/test/gherkin_file.gleam b/examples/snippets/test/snippets/gherkin/gherkin_file.gleam similarity index 51% rename from examples/snippets/test/gherkin_file.gleam rename to examples/snippets/test/snippets/gherkin/gherkin_file.gleam index 3b14dab..7a34dd9 100644 --- a/examples/snippets/test/gherkin_file.gleam +++ b/examples/snippets/test/snippets/gherkin/gherkin_file.gleam @@ -1,41 +1,44 @@ -//// README: Gherkin .feature file example - -import dream_test/assertions/should.{equal, or_fail_with, should} import dream_test/gherkin/feature.{FeatureConfig, to_test_suite} import dream_test/gherkin/parser -import dream_test/gherkin/steps.{type StepContext, get_int, new_registry, step} +import dream_test/gherkin/steps.{type StepContext, get_int, step} import dream_test/gherkin/world.{get_or, put} -import dream_test/reporter/bdd.{report} -import dream_test/runner.{run_suite} -import dream_test/types.{type AssertionResult, AssertionOk} -import gleam/io +import dream_test/matchers.{be_equal, or_fail_with, should, succeed} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner import gleam/result -fn step_empty_cart(context: StepContext) -> AssertionResult { +fn step_server_running(context: StepContext) { + put(context.world, "server_running", True) + Ok(succeed()) +} + +fn step_empty_cart(context: StepContext) { put(context.world, "cart", 0) - AssertionOk + Ok(succeed()) } -fn step_add_items(context: StepContext) -> AssertionResult { +fn step_add_items(context: StepContext) { let current = get_or(context.world, "cart", 0) let to_add = get_int(context.captures, 0) |> result.unwrap(0) put(context.world, "cart", current + to_add) - AssertionOk + Ok(succeed()) } -fn step_verify_count(context: StepContext) -> AssertionResult { +fn step_verify_count(context: StepContext) { let expected = get_int(context.captures, 0) |> result.unwrap(0) get_or(context.world, "cart", 0) - |> should() - |> equal(expected) + |> should + |> be_equal(expected) |> or_fail_with("Cart count mismatch") } pub fn tests() { // Define step handlers let steps = - new_registry() - |> step("I have an empty cart", step_empty_cart) + steps.new() + |> step("the server is running", step_server_running) + |> step("the cart is empty", step_empty_cart) |> step("I add {int} items", step_add_items) |> step("the cart should have {int} items", step_verify_count) @@ -44,11 +47,13 @@ pub fn tests() { // Convert to TestSuite and run let config = FeatureConfig(feature: feature, step_registry: steps) - to_test_suite("cart_test", config) + to_test_suite(config) } pub fn main() { - tests() - |> run_suite() - |> report(io.print) + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() } diff --git a/examples/snippets/test/gherkin_hero.gleam b/examples/snippets/test/snippets/gherkin/gherkin_hero.gleam similarity index 55% rename from examples/snippets/test/gherkin_hero.gleam rename to examples/snippets/test/snippets/gherkin/gherkin_hero.gleam index 6a20192..47d6a76 100644 --- a/examples/snippets/test/gherkin_hero.gleam +++ b/examples/snippets/test/snippets/gherkin/gherkin_hero.gleam @@ -1,39 +1,36 @@ -//// README: Gherkin hero example (featured at top) - -import dream_test/assertions/should.{equal, or_fail_with, should} -import dream_test/gherkin/feature.{feature, given, scenario, then, when} -import dream_test/gherkin/steps.{type StepContext, get_int, new_registry, step} +import dream_test/gherkin/feature.{but, feature, given, scenario, then, when} +import dream_test/gherkin/steps.{type StepContext, get_int, step} import dream_test/gherkin/world.{get_or, put} -import dream_test/reporter/bdd.{report} -import dream_test/runner.{run_suite} -import dream_test/types.{type AssertionResult, AssertionOk} -import gleam/io +import dream_test/matchers.{be_equal, or_fail_with, should, succeed} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner import gleam/result -fn step_have_items(context: StepContext) -> AssertionResult { +fn step_have_items(context: StepContext) { let count = get_int(context.captures, 0) |> result.unwrap(0) put(context.world, "cart", count) - AssertionOk + Ok(succeed()) } -fn step_add_items(context: StepContext) -> AssertionResult { +fn step_add_items(context: StepContext) { let current = get_or(context.world, "cart", 0) let to_add = get_int(context.captures, 0) |> result.unwrap(0) put(context.world, "cart", current + to_add) - AssertionOk + Ok(succeed()) } -fn step_should_have(context: StepContext) -> AssertionResult { +fn step_should_have(context: StepContext) { let expected = get_int(context.captures, 0) |> result.unwrap(0) get_or(context.world, "cart", 0) - |> should() - |> equal(expected) + |> should + |> be_equal(expected) |> or_fail_with("Cart count mismatch") } pub fn tests() { let steps = - new_registry() + steps.new() |> step("I have {int} items in my cart", step_have_items) |> step("I add {int} more items", step_add_items) |> step("I should have {int} items total", step_should_have) @@ -43,12 +40,15 @@ pub fn tests() { given("I have 3 items in my cart"), when("I add 2 more items"), then("I should have 5 items total"), + but("I should have 5 items total"), ]), ]) } pub fn main() { - tests() - |> run_suite() - |> report(io.print) + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() } diff --git a/examples/snippets/test/gherkin_placeholders.gleam b/examples/snippets/test/snippets/gherkin/gherkin_placeholders.gleam similarity index 64% rename from examples/snippets/test/gherkin_placeholders.gleam rename to examples/snippets/test/snippets/gherkin/gherkin_placeholders.gleam index d952ace..9de7b6d 100644 --- a/examples/snippets/test/gherkin_placeholders.gleam +++ b/examples/snippets/test/snippets/gherkin/gherkin_placeholders.gleam @@ -1,52 +1,49 @@ -//// README: Gherkin placeholder types example - -import dream_test/assertions/should.{succeed} import dream_test/gherkin/feature.{feature, given, scenario, then} import dream_test/gherkin/steps.{ - type StepContext, get_float, get_int, get_string, get_word, new_registry, step, + type StepContext, get_float, get_int, get_string, get_word, step, } import dream_test/gherkin/world.{put} -import dream_test/reporter/bdd.{report} -import dream_test/runner.{run_suite} -import dream_test/types.{type AssertionResult} -import gleam/io +import dream_test/matchers.{succeed} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner import gleam/result // {int} captures integers -fn step_int(context: StepContext) -> AssertionResult { +fn step_int(context: StepContext) { let value = get_int(context.captures, 0) |> result.unwrap(0) put(context.world, "int", value) - succeed() + Ok(succeed()) } // {float} captures decimals (works with $ prefix too) -fn step_float(context: StepContext) -> AssertionResult { +fn step_float(context: StepContext) { let value = get_float(context.captures, 0) |> result.unwrap(0.0) put(context.world, "float", value) - succeed() + Ok(succeed()) } // {string} captures quoted strings -fn step_string(context: StepContext) -> AssertionResult { +fn step_string(context: StepContext) { let value = get_string(context.captures, 0) |> result.unwrap("") put(context.world, "string", value) - succeed() + Ok(succeed()) } // {word} captures a single unquoted word -fn step_word(context: StepContext) -> AssertionResult { +fn step_word(context: StepContext) { let value = get_word(context.captures, 0) |> result.unwrap("") put(context.world, "word", value) - succeed() + Ok(succeed()) } -fn step_pass(_context: StepContext) -> AssertionResult { - succeed() +fn step_pass(_context) { + Ok(succeed()) } pub fn tests() { let steps = - new_registry() + steps.new() |> step("I have {int} items", step_int) |> step("the price is ${float}", step_float) |> step("the message is {string}", step_string) @@ -65,7 +62,9 @@ pub fn tests() { } pub fn main() { - tests() - |> run_suite() - |> report(io.print) + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() } diff --git a/examples/snippets/test/gherkin_step_handler.gleam b/examples/snippets/test/snippets/gherkin/gherkin_step_handler.gleam similarity index 56% rename from examples/snippets/test/gherkin_step_handler.gleam rename to examples/snippets/test/snippets/gherkin/gherkin_step_handler.gleam index a89418e..2711aae 100644 --- a/examples/snippets/test/gherkin_step_handler.gleam +++ b/examples/snippets/test/snippets/gherkin/gherkin_step_handler.gleam @@ -1,41 +1,36 @@ -//// README: Gherkin step handler example - -import dream_test/assertions/should.{equal, or_fail_with, should, succeed} import dream_test/gherkin/feature.{feature, given, scenario, then, when} -import dream_test/gherkin/steps.{ - type StepContext, type StepRegistry, get_float, new_registry, step, -} +import dream_test/gherkin/steps.{type StepContext, get_float, step} import dream_test/gherkin/world.{get_or, put} -import dream_test/reporter/bdd.{report} -import dream_test/runner.{run_suite} -import dream_test/types.{type AssertionResult} -import gleam/io +import dream_test/matchers.{be_equal, or_fail_with, should, succeed} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner import gleam/result -// Each step handler receives a StepContext -fn step_have_balance(context: StepContext) -> AssertionResult { +// NOTE: We annotate `StepContext` because record field access needs a known type. +fn step_have_balance(context: StepContext) { // {float} captures the numeric value (even with $ prefix) let balance = get_float(context.captures, 0) |> result.unwrap(0.0) put(context.world, "balance", balance) - succeed() + Ok(succeed()) } -fn step_withdraw(context: StepContext) -> AssertionResult { +fn step_withdraw(context: StepContext) { let current = get_or(context.world, "balance", 0.0) let amount = get_float(context.captures, 0) |> result.unwrap(0.0) put(context.world, "balance", current -. amount) - succeed() + Ok(succeed()) } -fn step_balance_is(context: StepContext) -> AssertionResult { +fn step_balance_is(context: StepContext) { let expected = get_float(context.captures, 0) |> result.unwrap(0.0) get_or(context.world, "balance", 0.0) - |> should() - |> equal(expected) + |> should + |> be_equal(expected) |> or_fail_with("Balance mismatch") } -pub fn register(registry: StepRegistry) -> StepRegistry { +pub fn register(registry) { registry |> step("I have a balance of ${float}", step_have_balance) |> step("I withdraw ${float}", step_withdraw) @@ -43,7 +38,7 @@ pub fn register(registry: StepRegistry) -> StepRegistry { } pub fn tests() { - let steps = new_registry() |> register() + let steps = steps.new() |> register() feature("Bank Account", steps, [ scenario("Withdrawal", [ @@ -55,7 +50,9 @@ pub fn tests() { } pub fn main() { - tests() - |> run_suite() - |> report(io.print) + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() } diff --git a/examples/snippets/test/snippets/gherkin/gherkin_steps_registry.gleam b/examples/snippets/test/snippets/gherkin/gherkin_steps_registry.gleam new file mode 100644 index 0000000..9f6a608 --- /dev/null +++ b/examples/snippets/test/snippets/gherkin/gherkin_steps_registry.gleam @@ -0,0 +1,54 @@ +import dream_test/gherkin/steps.{ + type StepContext, capture_count, find_step, get_int, given, then_, when_, +} +import dream_test/gherkin/types.{Given, Then, When} +import dream_test/matchers.{be_equal, or_fail_with, should, succeed} +import dream_test/unit.{describe, it} +import gleam/result + +fn step_pass(_context: StepContext) { + Ok(succeed()) +} + +pub fn tests() { + describe("Gherkin steps registry", [ + it("can find a step and count captures", fn() { + let registry = steps.new() |> given("I have {int} items", step_pass) + + use matched <- result.try(find_step(registry, Given, "I have 3 items")) + + capture_count(matched.captures) + |> should + |> be_equal(1) + |> or_fail_with("expected exactly one capture") + }), + it("can extract an int capture", fn() { + let registry = steps.new() |> given("I have {int} items", step_pass) + + use matched <- result.try(find_step(registry, Given, "I have 3 items")) + use count <- result.try(get_int(matched.captures, 0)) + + count + |> should + |> be_equal(3) + |> or_fail_with("expected captured int to be 3") + }), + it("registers a When step", fn() { + let registry = steps.new() |> when_("I add {int} items", step_pass) + + use _matched <- result.try(find_step(registry, When, "I add 2 items")) + Ok(succeed()) + }), + it("registers a Then step", fn() { + let registry = + steps.new() |> then_("I should have {int} items", step_pass) + + use _matched <- result.try(find_step( + registry, + Then, + "I should have 5 items", + )) + Ok(succeed()) + }), + ]) +} diff --git a/examples/snippets/test/snippets/gherkin/gherkin_types.gleam b/examples/snippets/test/snippets/gherkin/gherkin_types.gleam new file mode 100644 index 0000000..e4eabbb --- /dev/null +++ b/examples/snippets/test/snippets/gherkin/gherkin_types.gleam @@ -0,0 +1,122 @@ +import dream_test/gherkin/types.{ + And, Background, DataTable, DocString, ExamplesTable, Feature, Given, Scenario, + ScenarioOutline, Step, Then, empty_background, empty_examples, + keyword_from_string, keyword_to_string, resolve_keyword, +} +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/unit.{describe, it} +import gleam/option.{None, Some} + +pub fn tests() { + describe("Gherkin types", [ + it("keyword_to_string renders keywords", fn() { + keyword_to_string(Given) + |> should + |> be_equal("Given") + |> or_fail_with("expected Given") + }), + it("keyword_from_string parses known keywords", fn() { + keyword_from_string("Then") + |> should + |> be_equal(Some(Then)) + |> or_fail_with("expected Some(Then)") + }), + it("resolve_keyword turns And/But into the previous keyword", fn() { + resolve_keyword(And, Given) + |> should + |> be_equal(Given) + |> or_fail_with("expected And after Given to resolve to Given") + }), + it("Step can be constructed", fn() { + Step(keyword: Given, text: "I have 1 item", argument: None) + |> should + |> be_equal(Step(keyword: Given, text: "I have 1 item", argument: None)) + |> or_fail_with("expected Step to be constructible") + }), + it("DocString can be constructed", fn() { + DocString(content: "{\"name\":\"example\"}", content_type: Some("json")) + |> should + |> be_equal(DocString( + content: "{\"name\":\"example\"}", + content_type: Some("json"), + )) + |> or_fail_with("expected DocString to be constructible") + }), + it("DataTable can be constructed", fn() { + DataTable(rows: [["name", "email"], ["Alice", "alice@test.com"]]) + |> should + |> be_equal( + DataTable(rows: [["name", "email"], ["Alice", "alice@test.com"]]), + ) + |> or_fail_with("expected DataTable to be constructible") + }), + it("empty_examples returns a table with no headers/rows", fn() { + empty_examples() + |> should + |> be_equal(ExamplesTable(headers: [], rows: [])) + |> or_fail_with("expected empty examples table") + }), + it("empty_background returns a background with no steps", fn() { + empty_background() + |> should + |> be_equal(Background(steps: [])) + |> or_fail_with("expected empty background") + }), + it("ExamplesTable can be constructed", fn() { + ExamplesTable(headers: ["quantity"], rows: [["1"], ["5"]]) + |> should + |> be_equal(ExamplesTable(headers: ["quantity"], rows: [["1"], ["5"]])) + |> or_fail_with("expected ExamplesTable to be constructible") + }), + it("Scenario can be constructed", fn() { + let step = Step(keyword: Given, text: "I have 1 item", argument: None) + + Scenario(name: "Example scenario", tags: [], steps: [step]) + |> should + |> be_equal(Scenario(name: "Example scenario", tags: [], steps: [step])) + |> or_fail_with("expected Scenario to be constructible") + }), + it("Feature can be constructed", fn() { + let step = Step(keyword: Given, text: "I have 1 item", argument: None) + let scenario = Scenario(name: "Example scenario", tags: [], steps: [step]) + + Feature( + name: "Example feature", + description: None, + tags: [], + background: None, + scenarios: [scenario], + ) + |> should + |> be_equal( + Feature( + name: "Example feature", + description: None, + tags: [], + background: None, + scenarios: [scenario], + ), + ) + |> or_fail_with("expected Feature to be constructible") + }), + it("ScenarioOutline can be constructed", fn() { + let step = + Step(keyword: Given, text: "I have items", argument: None) + + ScenarioOutline( + name: "Example outline", + tags: [], + steps: [step], + examples: ExamplesTable(headers: ["count"], rows: [["1"], ["5"]]), + ) + |> should + |> be_equal(ScenarioOutline( + name: "Example outline", + tags: [], + steps: [step], + examples: ExamplesTable(headers: ["count"], rows: [["1"], ["5"]]), + )) + |> or_fail_with("expected ScenarioOutline to be constructible") + }), + ]) +} diff --git a/examples/snippets/test/snippets/gherkin/parser.gleam b/examples/snippets/test/snippets/gherkin/parser.gleam new file mode 100644 index 0000000..fb26328 --- /dev/null +++ b/examples/snippets/test/snippets/gherkin/parser.gleam @@ -0,0 +1,24 @@ +import dream_test/gherkin/parser +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/unit.{describe, it} +import gleam/result + +pub fn tests() { + describe("Gherkin parser", [ + it("parse_string parses a minimal feature", fn() { + let content = + "@smoke\n" + <> "Feature: Demo\n" + <> "\n" + <> " Scenario: One\n" + <> " Given a thing\n" + + use feature <- result.try(parser.parse_string(content)) + + feature.name + |> should + |> be_equal("Demo") + |> or_fail_with("expected feature name Demo") + }), + ]) +} diff --git a/examples/snippets/test/snippets/gherkin/step_trie.gleam b/examples/snippets/test/snippets/gherkin/step_trie.gleam new file mode 100644 index 0000000..f77bb74 --- /dev/null +++ b/examples/snippets/test/snippets/gherkin/step_trie.gleam @@ -0,0 +1,77 @@ +import dream_test/gherkin/step_trie +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/unit.{describe, it} +import gleam/option.{Some} + +pub fn tests() { + describe("Step trie", [ + it( + "parse_step_pattern splits prefix/suffix placeholders into segments", + fn() { + step_trie.parse_step_pattern("the total is ${float}USD") + |> should + |> be_equal([ + step_trie.LiteralWord("the"), + step_trie.LiteralWord("total"), + step_trie.LiteralWord("is"), + step_trie.LiteralWord("$"), + step_trie.FloatParam, + step_trie.LiteralWord("USD"), + ]) + |> or_fail_with( + "expected ${float}USD to split into literal + FloatParam segments", + ) + }, + ), + it( + "tokenize_step_text preserves quoted strings and splits numeric boundaries", + fn() { + step_trie.tokenize_step_text("I add \"Red Widget\" and pay $19.99USD") + |> should + |> be_equal([ + "I", + "add", + "\"Red Widget\"", + "and", + "pay", + "$", + "19.99", + "USD", + ]) + |> or_fail_with( + "expected tokenization to preserve quotes and split $19.99USD", + ) + }, + ), + it("lookup finds the most specific pattern and captures typed values", fn() { + let trie = + step_trie.new() + |> step_trie.insert( + keyword: "Given", + pattern: "I have an empty cart", + handler: "empty", + ) + |> step_trie.insert( + keyword: "Given", + pattern: "I have {int} items", + handler: "count", + ) + |> step_trie.insert( + keyword: "Then", + pattern: "the total is ${float}USD", + handler: "total_usd", + ) + + step_trie.lookup(trie, "Then", "the total is $19.99USD") + |> should + |> be_equal( + Some( + step_trie.StepMatch(handler: "total_usd", captures: [ + step_trie.CapturedFloat(19.99), + ]), + ), + ) + |> or_fail_with("expected float capture for $19.99USD") + }), + ]) +} diff --git a/examples/snippets/test/snippets/gherkin/world_get.gleam b/examples/snippets/test/snippets/gherkin/world_get.gleam new file mode 100644 index 0000000..4f356cb --- /dev/null +++ b/examples/snippets/test/snippets/gherkin/world_get.gleam @@ -0,0 +1,48 @@ +import dream_test/gherkin/feature.{feature, given, scenario, then} +import dream_test/gherkin/steps.{type StepContext, step} +import dream_test/gherkin/world.{get, put} +import dream_test/matchers.{be_equal, or_fail_with, should, succeed} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner + +fn step_store(context: StepContext) { + put(context.world, "count", 42) + Ok(succeed()) +} + +fn step_count_is_42(context: StepContext) { + case get(context.world, "count") { + Ok(count) -> + count + |> should + |> be_equal(42) + |> or_fail_with("count mismatch") + Error(message) -> Error(message) + } +} + +pub fn register(registry) { + registry + |> step("count is stored", step_store) + |> step("count should be 42", step_count_is_42) +} + +pub fn tests() { + let steps = steps.new() |> register() + + feature("World: get", steps, [ + scenario("Reading a stored value", [ + given("count is stored"), + then("count should be 42"), + ]), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} diff --git a/examples/snippets/test/snippets/gherkin/world_has_delete.gleam b/examples/snippets/test/snippets/gherkin/world_has_delete.gleam new file mode 100644 index 0000000..d152888 --- /dev/null +++ b/examples/snippets/test/snippets/gherkin/world_has_delete.gleam @@ -0,0 +1,51 @@ +import dream_test/gherkin/feature.{feature, given, scenario, then, when} +import dream_test/gherkin/steps.{type StepContext, step} +import dream_test/gherkin/world.{delete, has, put} +import dream_test/matchers.{be_equal, or_fail_with, should, succeed} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner + +fn step_store(context: StepContext) { + put(context.world, "temp", True) + Ok(succeed()) +} + +fn step_delete(context: StepContext) { + delete(context.world, "temp") + Ok(succeed()) +} + +fn step_is_absent(context: StepContext) { + has(context.world, "temp") + |> should + |> be_equal(False) + |> or_fail_with("expected temp to be absent") +} + +pub fn register(registry) { + registry + |> step("temp is stored", step_store) + |> step("temp is deleted", step_delete) + |> step("temp should be absent", step_is_absent) +} + +pub fn tests() { + let steps = steps.new() |> register() + + feature("World: has + delete", steps, [ + scenario("Deleting a key", [ + given("temp is stored"), + when("temp is deleted"), + then("temp should be absent"), + ]), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} diff --git a/examples/snippets/test/snippets/gherkin/world_lifecycle.gleam b/examples/snippets/test/snippets/gherkin/world_lifecycle.gleam new file mode 100644 index 0000000..e7530b4 --- /dev/null +++ b/examples/snippets/test/snippets/gherkin/world_lifecycle.gleam @@ -0,0 +1,7 @@ +import dream_test/gherkin/world + +pub fn main() { + // In normal gherkin runs, the runner creates and cleans up the World for you. + let w = world.new_world("example_scenario") + world.cleanup(w) +} diff --git a/examples/snippets/test/snippets/gherkin/world_scenario_id.gleam b/examples/snippets/test/snippets/gherkin/world_scenario_id.gleam new file mode 100644 index 0000000..d39609f --- /dev/null +++ b/examples/snippets/test/snippets/gherkin/world_scenario_id.gleam @@ -0,0 +1,8 @@ +import dream_test/gherkin/world + +pub fn main() { + let w = world.new_world("example_scenario") + let id = world.scenario_id(w) + world.cleanup(w) + id +} diff --git a/examples/snippets/test/snippets/hooks/context_aware_grouping.gleam b/examples/snippets/test/snippets/hooks/context_aware_grouping.gleam new file mode 100644 index 0000000..3cecc5c --- /dev/null +++ b/examples/snippets/test/snippets/hooks/context_aware_grouping.gleam @@ -0,0 +1,47 @@ +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit_context.{before_each, describe, group, it} + +pub type Context { + Context(counter: Int) +} + +fn increment_counter(context: Context) { + Ok(Context(counter: context.counter + 1)) +} + +pub fn suite() { + describe("Context-aware grouping", Context(counter: 0), [ + // This outer hook applies everywhere under this describe, including groups. + before_each(increment_counter), + + group("inner group", [ + // This hook only applies to tests inside this group. + before_each(increment_counter), + + it("sees both outer + inner hooks", fn(context: Context) { + context.counter + |> should + |> be_equal(2) + |> or_fail_with("expected counter to be 2 (outer + inner before_each)") + }), + ]), + + it("sees only outer hook", fn(context: Context) { + context.counter + |> should + |> be_equal(1) + |> or_fail_with("expected counter to be 1 (outer before_each only)") + }), + ]) +} + +pub fn main() { + runner.new([suite()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} diff --git a/examples/snippets/test/snippets/hooks/context_aware_tests.gleam b/examples/snippets/test/snippets/hooks/context_aware_tests.gleam new file mode 100644 index 0000000..597df46 --- /dev/null +++ b/examples/snippets/test/snippets/hooks/context_aware_tests.gleam @@ -0,0 +1,41 @@ +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit_context.{before_each, describe, it} + +pub type Context { + Context(counter: Int) +} + +fn increment_counter(context: Context) { + Ok(Context(counter: context.counter + 1)) +} + +pub fn suite() { + describe("Context-aware suite", Context(counter: 0), [ + before_each(increment_counter), + it("receives the updated context", fn(context: Context) { + context.counter + |> should + |> be_equal(1) + |> or_fail_with("expected counter to be 1 after before_each") + }), + // Hook can be repeated; each applies to subsequent tests. + before_each(increment_counter), + it("sees hook effects for subsequent tests", fn(context: Context) { + context.counter + |> should + |> be_equal(2) + |> or_fail_with("expected counter to be 2 after two before_each hooks") + }), + ]) +} + +pub fn main() { + runner.new([suite()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} diff --git a/examples/snippets/test/snippets/hooks/hook_failure.gleam b/examples/snippets/test/snippets/hooks/hook_failure.gleam new file mode 100644 index 0000000..8e7286c --- /dev/null +++ b/examples/snippets/test/snippets/hooks/hook_failure.gleam @@ -0,0 +1,31 @@ +import dream_test/matchers.{succeed} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{before_all, describe, it} + +fn connect_to_database() { + Ok(Nil) +} + +pub fn tests() { + describe("Handles failures", [ + before_all(fn() { + case connect_to_database() { + Ok(_) -> Ok(Nil) + Error(e) -> Error("Database connection failed: " <> e) + } + }), + // If before_all fails, these tests are marked SetupFailed (not run) + it("test1", fn() { Ok(succeed()) }), + it("test2", fn() { Ok(succeed()) }), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} diff --git a/examples/snippets/test/snippets/hooks/hook_inheritance.gleam b/examples/snippets/test/snippets/hooks/hook_inheritance.gleam new file mode 100644 index 0000000..83ff30e --- /dev/null +++ b/examples/snippets/test/snippets/hooks/hook_inheritance.gleam @@ -0,0 +1,41 @@ +import dream_test/matchers.{succeed} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{after_each, before_each, describe, group, it} +import gleam/io + +pub fn tests() { + describe("Outer", [ + before_each(fn() { + io.println("1. outer setup") + Ok(Nil) + }), + after_each(fn() { + io.println("4. outer teardown") + Ok(Nil) + }), + group("Inner", [ + before_each(fn() { + io.println("2. inner setup") + Ok(Nil) + }), + after_each(fn() { + io.println("3. inner teardown") + Ok(Nil) + }), + it("test", fn() { + io.println("(test)") + Ok(succeed()) + }), + ]), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} diff --git a/examples/snippets/test/snippets/hooks/lifecycle_hooks.gleam b/examples/snippets/test/snippets/hooks/lifecycle_hooks.gleam new file mode 100644 index 0000000..fe39e74 --- /dev/null +++ b/examples/snippets/test/snippets/hooks/lifecycle_hooks.gleam @@ -0,0 +1,64 @@ +import dream_test/matchers.{be_empty, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{ + after_all, after_each, before_all, before_each, describe, it, +} + +pub fn tests() { + describe("Database tests", [ + before_all(fn() { + // Start database once for all tests + start_database() + }), + before_each(fn() { + // Begin transaction before each test + begin_transaction() + }), + it("creates a record", fn() { + [] + |> should + |> be_empty() + |> or_fail_with("Placeholder test") + }), + it("queries records", fn() { + [] + |> should + |> be_empty() + |> or_fail_with("Placeholder test") + }), + after_each(fn() { + // Rollback transaction after each test + rollback_transaction() + }), + after_all(fn() { + // Stop database after all tests + stop_database() + }), + ]) +} + +fn start_database() { + Ok(Nil) +} + +fn stop_database() { + Ok(Nil) +} + +fn begin_transaction() { + Ok(Nil) +} + +fn rollback_transaction() { + Ok(Nil) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} diff --git a/examples/snippets/test/snippets/hooks/unit_context_grouping.gleam b/examples/snippets/test/snippets/hooks/unit_context_grouping.gleam new file mode 100644 index 0000000..86ce897 --- /dev/null +++ b/examples/snippets/test/snippets/hooks/unit_context_grouping.gleam @@ -0,0 +1,44 @@ +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit_context.{before_each, describe, group, it} + +pub type Context { + Context(counter: Int) +} + +fn increment_counter(context: Context) { + Ok(Context(counter: context.counter + 1)) +} + +pub fn suite() { + describe("Context-aware grouping", Context(counter: 0), [ + // This outer hook applies everywhere under this describe, including groups. + before_each(increment_counter), + group("inner group", [ + // This hook only applies to tests inside this group. + before_each(increment_counter), + it("sees both outer + inner hooks", fn(context: Context) { + context.counter + |> should + |> be_equal(2) + |> or_fail_with("expected counter to be 2 (outer + inner before_each)") + }), + ]), + it("sees only outer hook", fn(context: Context) { + context.counter + |> should + |> be_equal(1) + |> or_fail_with("expected counter to be 1 (outer before_each only)") + }), + ]) +} + +pub fn main() { + runner.new([suite()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} diff --git a/examples/snippets/test/snippets/hooks/unit_context_lifecycle_hooks.gleam b/examples/snippets/test/snippets/hooks/unit_context_lifecycle_hooks.gleam new file mode 100644 index 0000000..a3d3f2f --- /dev/null +++ b/examples/snippets/test/snippets/hooks/unit_context_lifecycle_hooks.gleam @@ -0,0 +1,55 @@ +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit_context.{ + after_all, after_each, before_all, before_each, describe, it, +} +import gleam/int +import gleam/io + +pub type Context { + Context(counter: Int) +} + +fn setup_suite(_context: Context) { + Ok(Context(counter: 0)) +} + +fn setup_test(context: Context) { + Ok(Context(counter: context.counter + 1)) +} + +// Teardown hooks commonly do side effects (closing files, deleting temp dirs, +// logging, etc). The hook returns `Ok(Nil)` because teardown doesn’t produce +// a new context value. +fn teardown_test(context: Context) { + Ok(io.println("after_each: counter=" <> int.to_string(context.counter))) +} + +fn teardown_suite(_context: Context) { + Ok(io.println("after_all: suite finished")) +} + +pub fn suite() { + describe("Context lifecycle hooks", Context(counter: 999), [ + before_all(setup_suite), + before_each(setup_test), + after_each(teardown_test), + after_all(teardown_suite), + it("runs with the transformed context", fn(context: Context) { + context.counter + |> should + |> be_equal(1) + |> or_fail_with("counter mismatch") + }), + ]) +} + +pub fn main() { + runner.new([suite()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} diff --git a/examples/snippets/test/snippets/hooks/unit_context_quick_start.gleam b/examples/snippets/test/snippets/hooks/unit_context_quick_start.gleam new file mode 100644 index 0000000..a4d80aa --- /dev/null +++ b/examples/snippets/test/snippets/hooks/unit_context_quick_start.gleam @@ -0,0 +1,33 @@ +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit_context.{before_each, describe, it} + +pub type Context { + Context(counter: Int) +} + +fn increment_counter(context: Context) { + Ok(Context(counter: context.counter + 1)) +} + +pub fn suite() { + describe("Context-aware suite", Context(counter: 0), [ + before_each(increment_counter), + it("receives the updated context", fn(context: Context) { + context.counter + |> should + |> be_equal(1) + |> or_fail_with("expected counter to be 1 after before_each") + }), + ]) +} + +pub fn main() { + runner.new([suite()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} diff --git a/examples/snippets/test/snippets/hooks/unit_context_skipping.gleam b/examples/snippets/test/snippets/hooks/unit_context_skipping.gleam new file mode 100644 index 0000000..f2b042c --- /dev/null +++ b/examples/snippets/test/snippets/hooks/unit_context_skipping.gleam @@ -0,0 +1,32 @@ +import dream_test/matchers.{be_equal, or_fail_with, should, succeed} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit_context.{describe, it, skip} + +pub type Context { + Context(counter: Int) +} + +pub fn suite() { + describe("Skipping context-aware tests", Context(counter: 0), [ + skip("this test is skipped", fn(_context: Context) { + // This would pass if it ran, but Dream Test will mark it skipped. + Ok(succeed()) + }), + it("normal tests still run", fn(context: Context) { + context.counter + |> should + |> be_equal(0) + |> or_fail_with("expected counter to start at 0") + }), + ]) +} + +pub fn main() { + runner.new([suite()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} diff --git a/examples/snippets/test/snippets/hooks/unit_context_tagging.gleam b/examples/snippets/test/snippets/hooks/unit_context_tagging.gleam new file mode 100644 index 0000000..08971e1 --- /dev/null +++ b/examples/snippets/test/snippets/hooks/unit_context_tagging.gleam @@ -0,0 +1,29 @@ +import dream_test/matchers.{succeed} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit_context.{describe, group, it, with_tags} + +pub type Context { + Context(counter: Int) +} + +pub fn suite() { + describe("Tagging context-aware tests", Context(counter: 0), [ + group("group tagged slow", [ + it("inherits the group tag", fn(_context: Context) { Ok(succeed()) }), + ]) + |> with_tags(["slow"]), + it("can tag an individual test", fn(_context: Context) { Ok(succeed()) }) + |> with_tags(["unit_context", "fast"]), + it("untagged tests still work", fn(_context: Context) { Ok(succeed()) }), + ]) +} + +pub fn main() { + runner.new([suite()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} diff --git a/examples/snippets/test/snippets/matchers/booleans.gleam b/examples/snippets/test/snippets/matchers/booleans.gleam new file mode 100644 index 0000000..4b4c948 --- /dev/null +++ b/examples/snippets/test/snippets/matchers/booleans.gleam @@ -0,0 +1,19 @@ +import dream_test/matchers.{be_false, be_true, or_fail_with, should} +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("Matchers: booleans", [ + it("be_true passes for True", fn() { + True + |> should + |> be_true() + |> or_fail_with("expected True") + }), + it("be_false passes for False", fn() { + False + |> should + |> be_false() + |> or_fail_with("expected False") + }), + ]) +} diff --git a/examples/snippets/test/snippets/matchers/builtin_matchers.gleam b/examples/snippets/test/snippets/matchers/builtin_matchers.gleam new file mode 100644 index 0000000..e59e233 --- /dev/null +++ b/examples/snippets/test/snippets/matchers/builtin_matchers.gleam @@ -0,0 +1,67 @@ +import dream_test/matchers.{ + be_between, be_equal, be_false, be_ok, be_some, be_true, contain, + contain_string, have_length, match_regex, or_fail_with, should, +} +import dream_test/unit.{describe, it} +import gleam/option.{Some} + +pub fn tests() { + describe("Built-in matchers", [ + it("boolean: be_true", fn() { + True + |> should + |> be_true() + |> or_fail_with("expected True") + }), + it("boolean: be_false", fn() { + False + |> should + |> be_false() + |> or_fail_with("expected False") + }), + it("option: be_some + equal", fn() { + Some(42) + |> should + |> be_some() + |> be_equal(42) + |> or_fail_with("expected Some(42)") + }), + it("result: be_ok + equal", fn() { + Ok("hello") + |> should + |> be_ok() + |> be_equal("hello") + |> or_fail_with("expected Ok(\"hello\")") + }), + it("collection: have_length", fn() { + [1, 2, 3] + |> should + |> have_length(3) + |> or_fail_with("expected list length 3") + }), + it("collection: contain", fn() { + [1, 2, 3] + |> should + |> contain(2) + |> or_fail_with("expected list to contain 2") + }), + it("comparison: be_between", fn() { + 5 + |> should + |> be_between(1, 10) + |> or_fail_with("expected 5 to be between 1 and 10") + }), + it("string: contain_string", fn() { + "hello world" + |> should + |> contain_string("world") + |> or_fail_with("expected substring match") + }), + it("string: match_regex", fn() { + "user-123" + |> should + |> match_regex("^user-\\d+$") + |> or_fail_with("expected an id like user-123") + }), + ]) +} diff --git a/examples/snippets/test/snippets/matchers/collections.gleam b/examples/snippets/test/snippets/matchers/collections.gleam new file mode 100644 index 0000000..1541b7b --- /dev/null +++ b/examples/snippets/test/snippets/matchers/collections.gleam @@ -0,0 +1,33 @@ +import dream_test/matchers.{ + be_empty, contain, have_length, not_contain, or_fail_with, should, +} +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("Matchers: collections", [ + it("have_length checks the length of a list", fn() { + [1, 2, 3] + |> should + |> have_length(3) + |> or_fail_with("expected list length 3") + }), + it("contain passes when the item is present", fn() { + [1, 2, 3] + |> should + |> contain(2) + |> or_fail_with("expected list to contain 2") + }), + it("not_contain passes when the item is absent", fn() { + ["a", "b", "c"] + |> should + |> not_contain("d") + |> or_fail_with("expected list to not contain \"d\"") + }), + it("be_empty passes for an empty list", fn() { + [] + |> should + |> be_empty() + |> or_fail_with("expected empty list") + }), + ]) +} diff --git a/examples/snippets/test/snippets/matchers/comparison.gleam b/examples/snippets/test/snippets/matchers/comparison.gleam new file mode 100644 index 0000000..c115d87 --- /dev/null +++ b/examples/snippets/test/snippets/matchers/comparison.gleam @@ -0,0 +1,58 @@ +import dream_test/matchers.{ + be_at_least, be_at_most, be_between, be_greater_than, be_greater_than_float, + be_in_range, be_less_than, be_less_than_float, or_fail_with, should, +} +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("Matchers: comparison", [ + it("be_greater_than checks an int is greater than a minimum", fn() { + 10 + |> should + |> be_greater_than(0) + |> or_fail_with("expected 10 to be greater than 0") + }), + it("be_less_than checks an int is less than a maximum", fn() { + 10 + |> should + |> be_less_than(100) + |> or_fail_with("expected 10 to be less than 100") + }), + it("be_at_least checks an int is >= a minimum", fn() { + 10 + |> should + |> be_at_least(10) + |> or_fail_with("expected 10 to be at least 10") + }), + it("be_at_most checks an int is <= a maximum", fn() { + 10 + |> should + |> be_at_most(10) + |> or_fail_with("expected 10 to be at most 10") + }), + it("be_between checks an int is between two bounds", fn() { + 5 + |> should + |> be_between(1, 10) + |> or_fail_with("expected 5 to be between 1 and 10") + }), + it("be_in_range checks an int is within an inclusive range", fn() { + 10 + |> should + |> be_in_range(0, 100) + |> or_fail_with("expected 10 to be in range 0..100") + }), + it("be_greater_than_float checks a float is greater than a minimum", fn() { + 0.5 + |> should + |> be_greater_than_float(0.0) + |> or_fail_with("expected 0.5 to be greater than 0.0") + }), + it("be_less_than_float checks a float is less than a maximum", fn() { + 0.5 + |> should + |> be_less_than_float(1.0) + |> or_fail_with("expected 0.5 to be less than 1.0") + }), + ]) +} diff --git a/examples/snippets/test/custom_matchers.gleam b/examples/snippets/test/snippets/matchers/custom_matchers.gleam similarity index 72% rename from examples/snippets/test/custom_matchers.gleam rename to examples/snippets/test/snippets/matchers/custom_matchers.gleam index 65d0883..75bf587 100644 --- a/examples/snippets/test/custom_matchers.gleam +++ b/examples/snippets/test/snippets/matchers/custom_matchers.gleam @@ -1,14 +1,12 @@ -//// README: Custom matchers - -import dream_test/assertions/should.{or_fail_with, should} -import dream_test/reporter/bdd.{report} -import dream_test/runner.{run_all} +import dream_test/matchers.{or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner import dream_test/types.{ - type MatchResult, AssertionFailure, CustomMatcherFailure, MatchFailed, MatchOk, + AssertionFailure, CustomMatcherFailure, MatchFailed, MatchOk, } -import dream_test/unit.{describe, it, to_test_cases} +import dream_test/unit.{describe, it} import gleam/int -import gleam/io import gleam/option.{Some} import gleam/string @@ -18,7 +16,7 @@ import gleam/string /// Most matchers keep the same type (a == b), but unwrapping matchers /// like `be_some` change the type. /// -pub fn be_even(result: MatchResult(Int)) -> MatchResult(Int) { +pub fn be_even(result) { case result { // If already failed, propagate the failure MatchFailed(failure) -> MatchFailed(failure) @@ -27,7 +25,7 @@ pub fn be_even(result: MatchResult(Int)) -> MatchResult(Int) { } } -fn check_even(value: Int) -> MatchResult(Int) { +fn check_even(value) { case value % 2 == 0 { True -> MatchOk(value) False -> @@ -44,14 +42,14 @@ fn check_even(value: Int) -> MatchResult(Int) { /// A custom matcher that checks if a string is a valid email. /// -pub fn be_valid_email(result: MatchResult(String)) -> MatchResult(String) { +pub fn be_valid_email(result) { case result { MatchFailed(failure) -> MatchFailed(failure) MatchOk(value) -> check_email(value) } } -fn check_email(value: String) -> MatchResult(String) { +fn check_email(value) { let has_at = string.contains(value, "@") let has_dot = string.contains(value, ".") case has_at && has_dot { @@ -72,13 +70,13 @@ pub fn tests() { describe("Custom Matchers", [ it("be_even passes for even numbers", fn() { 4 - |> should() + |> should |> be_even() |> or_fail_with("4 should be even") }), it("be_valid_email passes for valid emails", fn() { "user@example.com" - |> should() + |> should |> be_valid_email() |> or_fail_with("Should be a valid email") }), @@ -86,7 +84,9 @@ pub fn tests() { } pub fn main() { - to_test_cases("custom_matchers", tests()) - |> run_all() - |> report(io.print) + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() } diff --git a/examples/snippets/test/snippets/matchers/equality.gleam b/examples/snippets/test/snippets/matchers/equality.gleam new file mode 100644 index 0000000..9e0e493 --- /dev/null +++ b/examples/snippets/test/snippets/matchers/equality.gleam @@ -0,0 +1,19 @@ +import dream_test/matchers.{be_equal, not_equal, or_fail_with, should} +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("Matchers: equality", [ + it("be_equal compares two values for equality", fn() { + 2 + 3 + |> should + |> be_equal(5) + |> or_fail_with("2 + 3 should equal 5") + }), + it("not_equal asserts two values are different", fn() { + 10 + 3 + |> should + |> not_equal(3) + |> or_fail_with("10 + 3 should not equal 3") + }), + ]) +} diff --git a/examples/snippets/test/snippets/matchers/getting_started.gleam b/examples/snippets/test/snippets/matchers/getting_started.gleam new file mode 100644 index 0000000..2acf946 --- /dev/null +++ b/examples/snippets/test/snippets/matchers/getting_started.gleam @@ -0,0 +1,19 @@ +import dream_test/matchers.{be_true, fail_with, or_fail_with, should, succeed} +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("Matchers: getting started", [ + it("starts a matcher chain with should", fn() { + True + |> should + |> be_true() + |> or_fail_with("expected True") + }), + it("use succeed/fail_with in conditional branches", fn() { + Ok(case 1 + 1 { + 2 -> succeed() + _ -> fail_with("expected 1 + 1 to be 2") + }) + }), + ]) +} diff --git a/examples/snippets/test/snippets/matchers/options.gleam b/examples/snippets/test/snippets/matchers/options.gleam new file mode 100644 index 0000000..29773a7 --- /dev/null +++ b/examples/snippets/test/snippets/matchers/options.gleam @@ -0,0 +1,21 @@ +import dream_test/matchers.{be_equal, be_none, be_some, or_fail_with, should} +import dream_test/unit.{describe, it} +import gleam/option.{None, Some} + +pub fn tests() { + describe("Matchers: options", [ + it("be_some unwraps Some(value) so you can keep matching", fn() { + Some(42) + |> should + |> be_some() + |> be_equal(42) + |> or_fail_with("expected Some(42)") + }), + it("be_none passes for None", fn() { + None + |> should + |> be_none() + |> or_fail_with("expected None") + }), + ]) +} diff --git a/examples/snippets/test/snippets/matchers/results.gleam b/examples/snippets/test/snippets/matchers/results.gleam new file mode 100644 index 0000000..e0794ad --- /dev/null +++ b/examples/snippets/test/snippets/matchers/results.gleam @@ -0,0 +1,21 @@ +import dream_test/matchers.{be_equal, be_error, be_ok, or_fail_with, should} +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("Matchers: results", [ + it("be_ok unwraps Ok(value) so you can keep matching", fn() { + Ok("hello") + |> should + |> be_ok() + |> be_equal("hello") + |> or_fail_with("expected Ok(\"hello\")") + }), + it("be_error unwraps Error(value) so you can keep matching", fn() { + Error("nope") + |> should + |> be_error() + |> be_equal("nope") + |> or_fail_with("expected Error(\"nope\")") + }), + ]) +} diff --git a/examples/snippets/test/snippets/matchers/snapshots.gleam b/examples/snippets/test/snippets/matchers/snapshots.gleam new file mode 100644 index 0000000..851c8e0 --- /dev/null +++ b/examples/snippets/test/snippets/matchers/snapshots.gleam @@ -0,0 +1,67 @@ +import dream_test/file +import dream_test/matchers.{ + be_equal, clear_snapshot, clear_snapshots_in_directory, match_snapshot, + match_snapshot_inspect, or_fail_with, should, +} +import dream_test/unit.{describe, it} +import gleam/option.{Some} +import gleam/result + +pub fn tests() { + describe("Matchers: snapshots", [ + it("match_snapshot compares a string against a snapshot file", fn() { + let path = "./test/tmp/match_snapshot_example.snap" + "hello" + |> should + |> match_snapshot(path) + |> or_fail_with("expected snapshot match") + }), + it( + "match_snapshot_inspect snapshots any value by using string.inspect", + fn() { + let path = "./test/tmp/match_snapshot_inspect_example.snap" + Some(1) + |> should + |> match_snapshot_inspect(path) + |> or_fail_with("expected inspect snapshot match") + }, + ), + it( + "clear_snapshot deletes a snapshot file (so next run recreates it)", + fn() { + let path = "./test/tmp/clear_snapshot_example.snap" + + // Setup: create a snapshot file (no assertions during setup) + use _ <- result.try( + file.write(path, "hello") |> result.map_error(file.error_to_string), + ) + + clear_snapshot(path) + |> should + |> be_equal(Ok(Nil)) + |> or_fail_with("expected clear_snapshot to succeed") + }, + ), + it( + "clear_snapshots_in_directory deletes all .snap files in a directory", + fn() { + let directory = "./test/tmp/clear_snapshots_in_directory_example" + let a = directory <> "/a.snap" + let b = directory <> "/b.snap" + + // Setup: create two snapshot files (no assertions during setup) + use _ <- result.try( + file.write(a, "a") |> result.map_error(file.error_to_string), + ) + use _ <- result.try( + file.write(b, "b") |> result.map_error(file.error_to_string), + ) + + clear_snapshots_in_directory(directory) + |> should + |> be_equal(Ok(2)) + |> or_fail_with("expected two deleted snapshots") + }, + ), + ]) +} diff --git a/examples/snippets/test/snippets/matchers/strings.gleam b/examples/snippets/test/snippets/matchers/strings.gleam new file mode 100644 index 0000000..23c1587 --- /dev/null +++ b/examples/snippets/test/snippets/matchers/strings.gleam @@ -0,0 +1,27 @@ +import dream_test/matchers.{ + contain_string, end_with, or_fail_with, should, start_with, +} +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("Matchers: strings", [ + it("start_with checks the start of a string", fn() { + "hello world" + |> should + |> start_with("hello") + |> or_fail_with("expected string to start with \"hello\"") + }), + it("end_with checks the end of a string", fn() { + "hello.gleam" + |> should + |> end_with(".gleam") + |> or_fail_with("expected .gleam suffix") + }), + it("contain_string checks a string contains a substring", fn() { + "hello world" + |> should + |> contain_string("world") + |> or_fail_with("expected substring match") + }), + ]) +} diff --git a/examples/snippets/test/snippets/reporters/bdd_formatting.gleam b/examples/snippets/test/snippets/reporters/bdd_formatting.gleam new file mode 100644 index 0000000..e16761f --- /dev/null +++ b/examples/snippets/test/snippets/reporters/bdd_formatting.gleam @@ -0,0 +1,31 @@ +import dream_test/matchers.{match_snapshot, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/types +import dream_test/unit.{describe, it} + +fn sample_results() -> List(types.TestResult) { + [ + types.TestResult( + name: "passes", + full_name: ["Example Suite", "passes"], + status: types.Passed, + duration_ms: 0, + tags: [], + failures: [], + kind: types.Unit, + ), + ] +} + +pub fn tests() { + describe("BDD formatting", [ + it("format returns a report string", fn() { + let report = bdd.format(sample_results()) + + report + |> should + |> match_snapshot("./test/snapshots/bdd_format_report.snap") + |> or_fail_with("expected formatted report snapshot match") + }), + ]) +} diff --git a/examples/snippets/test/snippets/reporters/bdd_incremental.gleam b/examples/snippets/test/snippets/reporters/bdd_incremental.gleam new file mode 100644 index 0000000..c1b77c3 --- /dev/null +++ b/examples/snippets/test/snippets/reporters/bdd_incremental.gleam @@ -0,0 +1,64 @@ +import dream_test/matchers.{match_snapshot, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/types +import dream_test/unit.{describe, it} + +fn passing_result() -> types.TestResult { + types.TestResult( + name: "passes", + full_name: ["Example Suite", "passes"], + status: types.Passed, + duration_ms: 0, + tags: [], + failures: [], + kind: types.Unit, + ) +} + +fn incremental_parts_text(parts: bdd.FormatIncrementalPartsResult) -> String { + case parts { + bdd.FormatIncrementalPartsResult(headers: headers, test_line: test_line, ..) -> + headers <> test_line + } +} + +pub fn tests() { + describe("BDD incremental formatting", [ + it("format_incremental_with_test_indent returns a formatted line", fn() { + let result = passing_result() + let bdd.FormatIncrementalResult(text: text, new_path: _new_path) = + bdd.format_incremental_with_test_indent(result, [], 0) + + text + |> should + |> match_snapshot( + "./test/snapshots/bdd_format_incremental_with_test_indent.snap", + ) + |> or_fail_with("expected incremental output snapshot match") + }), + it( + "format_incremental_parts_with_test_indent returns headers + test_line", + fn() { + let result = passing_result() + let text = + bdd.format_incremental_parts_with_test_indent(result, [], 0) + |> incremental_parts_text + + text + |> should + |> match_snapshot( + "./test/snapshots/bdd_format_incremental_parts_with_test_indent.snap", + ) + |> or_fail_with("expected incremental parts snapshot match") + }, + ), + it("format_summary_only returns a summary line", fn() { + let summary = bdd.format_summary_only([passing_result()]) + + summary + |> should + |> match_snapshot("./test/snapshots/bdd_format_summary_only.snap") + |> or_fail_with("expected summary snapshot match") + }), + ]) +} diff --git a/examples/snippets/test/snippets/reporters/bdd_module.gleam b/examples/snippets/test/snippets/reporters/bdd_module.gleam new file mode 100644 index 0000000..05dd6a6 --- /dev/null +++ b/examples/snippets/test/snippets/reporters/bdd_module.gleam @@ -0,0 +1,62 @@ +import dream_test/file +import dream_test/matchers.{be_equal, match_snapshot, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/types +import dream_test/unit.{describe, it} +import gleam/list +import gleam/result + +fn passing_result() -> types.TestResult { + types.TestResult( + name: "passes", + full_name: ["Example Suite", "passes"], + status: types.Passed, + duration_ms: 0, + tags: [], + failures: [], + kind: types.Unit, + ) +} + +fn write_bdd_report_to_file(text: String) { + case file.write("test/tmp/bdd_report.txt", text) { + Ok(_) -> Nil + Error(_) -> Nil + } +} + +fn first_result(results: List(a)) -> Result(a, String) { + case list.first(results) { + Ok(value) -> Ok(value) + Error(_) -> Error("expected at least one test result") + } +} + +pub fn tests() { + describe("BDD reporter module", [ + it("report writes output", fn() { + bdd.report([passing_result()], write_bdd_report_to_file) + + use text <- result.try( + file.read("test/tmp/bdd_report.txt") + |> result.map_error(file.error_to_string), + ) + + text + |> should + |> match_snapshot("./test/snapshots/bdd_report_file_output.snap") + |> or_fail_with("expected report output snapshot match") + }), + it("format_incremental returns the next describe path", fn() { + use first <- result.try(first_result([passing_result()])) + + let bdd.FormatIncrementalResult(text: _text, new_path: new_path) = + bdd.format_incremental(first, []) + + new_path + |> should + |> be_equal(["Example Suite"]) + |> or_fail_with("expected new_path to be the describe path") + }), + ]) +} diff --git a/examples/snippets/test/snippets/reporters/bdd_reporter.gleam b/examples/snippets/test/snippets/reporters/bdd_reporter.gleam new file mode 100644 index 0000000..629df20 --- /dev/null +++ b/examples/snippets/test/snippets/reporters/bdd_reporter.gleam @@ -0,0 +1,20 @@ +import dream_test/matchers.{succeed} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("BDD reporter", [ + it("passes", fn() { Ok(succeed()) }), + it("also passes", fn() { Ok(succeed()) }), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} diff --git a/examples/snippets/test/snippets/reporters/gherkin_formatting.gleam b/examples/snippets/test/snippets/reporters/gherkin_formatting.gleam new file mode 100644 index 0000000..a25388d --- /dev/null +++ b/examples/snippets/test/snippets/reporters/gherkin_formatting.gleam @@ -0,0 +1,29 @@ +import dream_test/matchers.{match_snapshot, or_fail_with, should} +import dream_test/reporters/gherkin as gherkin_reporter +import dream_test/types +import dream_test/unit.{describe, it} + +fn sample_results() -> List(types.TestResult) { + [ + types.TestResult( + name: "Scenario A", + full_name: ["Example Feature", "Scenario A"], + status: types.Passed, + duration_ms: 0, + tags: [], + failures: [], + kind: types.GherkinScenario("example"), + ), + ] +} + +pub fn tests() { + describe("Gherkin formatting", [ + it("format returns a report string", fn() { + gherkin_reporter.format(sample_results()) + |> should + |> match_snapshot("./test/snapshots/gherkin_format_report.snap") + |> or_fail_with("expected formatted report snapshot match") + }), + ]) +} diff --git a/examples/snippets/test/snippets/reporters/gherkin_is_gherkin_result.gleam b/examples/snippets/test/snippets/reporters/gherkin_is_gherkin_result.gleam new file mode 100644 index 0000000..81d1240 --- /dev/null +++ b/examples/snippets/test/snippets/reporters/gherkin_is_gherkin_result.gleam @@ -0,0 +1,45 @@ +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/reporters/gherkin as gherkin_reporter +import dream_test/types +import dream_test/unit.{describe, it} + +fn gherkin_result() -> types.TestResult { + types.TestResult( + name: "Scenario A", + full_name: ["Example Feature", "Scenario A"], + status: types.Passed, + duration_ms: 0, + tags: [], + failures: [], + kind: types.GherkinScenario("example"), + ) +} + +fn unit_result() -> types.TestResult { + types.TestResult( + name: "passes", + full_name: ["Example Suite", "passes"], + status: types.Passed, + duration_ms: 0, + tags: [], + failures: [], + kind: types.Unit, + ) +} + +pub fn tests() { + describe("Gherkin reporter: is_gherkin_result", [ + it("returns True for GherkinScenario results", fn() { + gherkin_reporter.is_gherkin_result(gherkin_result()) + |> should + |> be_equal(True) + |> or_fail_with("expected True for gherkin results") + }), + it("returns False for non-gherkin results", fn() { + gherkin_reporter.is_gherkin_result(unit_result()) + |> should + |> be_equal(False) + |> or_fail_with("expected False for unit results") + }), + ]) +} diff --git a/examples/snippets/test/snippets/reporters/gherkin_reporter.gleam b/examples/snippets/test/snippets/reporters/gherkin_reporter.gleam new file mode 100644 index 0000000..f5e3c86 --- /dev/null +++ b/examples/snippets/test/snippets/reporters/gherkin_reporter.gleam @@ -0,0 +1,26 @@ +import dream_test/gherkin/feature.{feature, given, scenario, then} +import dream_test/gherkin/steps.{step} +import dream_test/matchers.{succeed} +import dream_test/reporters/gherkin as gherkin_reporter +import dream_test/runner +import gleam/io + +fn step_ok(_context) { + Ok(succeed()) +} + +pub fn tests() { + let steps = steps.new() |> step("everything is fine", step_ok) + + feature("Gherkin Reporting", steps, [ + scenario("A passing scenario", [ + given("everything is fine"), + then("everything is fine"), + ]), + ]) +} + +pub fn main() { + let results = runner.new([tests()]) |> runner.run() + gherkin_reporter.report(results, io.print) +} diff --git a/examples/snippets/test/snippets/reporters/json_api_format.gleam b/examples/snippets/test/snippets/reporters/json_api_format.gleam new file mode 100644 index 0000000..2ed175c --- /dev/null +++ b/examples/snippets/test/snippets/reporters/json_api_format.gleam @@ -0,0 +1,15 @@ +import dream_test/matchers.{succeed} +import dream_test/reporters/json +import dream_test/runner +import dream_test/unit.{describe, it} + +fn example_suite() { + describe("Example Suite", [ + it("passes", fn() { Ok(succeed()) }), + ]) +} + +pub fn main() { + let results = runner.new([example_suite()]) |> runner.run() + json.format(results) +} diff --git a/examples/snippets/test/snippets/reporters/json_api_report.gleam b/examples/snippets/test/snippets/reporters/json_api_report.gleam new file mode 100644 index 0000000..374d3a3 --- /dev/null +++ b/examples/snippets/test/snippets/reporters/json_api_report.gleam @@ -0,0 +1,16 @@ +import dream_test/matchers.{succeed} +import dream_test/reporters/json +import dream_test/runner +import dream_test/unit.{describe, it} +import gleam/io + +fn example_suite() { + describe("Example Suite", [ + it("passes", fn() { Ok(succeed()) }), + ]) +} + +pub fn main() { + let results = runner.new([example_suite()]) |> runner.run() + results |> json.format |> io.print +} diff --git a/examples/snippets/test/snippets/reporters/json_api_report_pretty.gleam b/examples/snippets/test/snippets/reporters/json_api_report_pretty.gleam new file mode 100644 index 0000000..fd3cd6d --- /dev/null +++ b/examples/snippets/test/snippets/reporters/json_api_report_pretty.gleam @@ -0,0 +1,16 @@ +import dream_test/matchers.{succeed} +import dream_test/reporters/json +import dream_test/runner +import dream_test/unit.{describe, it} +import gleam/io + +fn example_suite() { + describe("Example Suite", [ + it("passes", fn() { Ok(succeed()) }), + ]) +} + +pub fn main() { + let results = runner.new([example_suite()]) |> runner.run() + results |> json.format_pretty |> io.print +} diff --git a/examples/snippets/test/snippets/reporters/json_format_pretty.gleam b/examples/snippets/test/snippets/reporters/json_format_pretty.gleam new file mode 100644 index 0000000..e14b2b2 --- /dev/null +++ b/examples/snippets/test/snippets/reporters/json_format_pretty.gleam @@ -0,0 +1,15 @@ +import dream_test/matchers.{succeed} +import dream_test/reporters/json +import dream_test/runner +import dream_test/unit.{describe, it} + +fn example_suite() { + describe("Example Suite", [ + it("passes", fn() { Ok(succeed()) }), + ]) +} + +pub fn main() { + let results = runner.new([example_suite()]) |> runner.run() + json.format_pretty(results) +} diff --git a/examples/snippets/test/snippets/reporters/json_formatting.gleam b/examples/snippets/test/snippets/reporters/json_formatting.gleam new file mode 100644 index 0000000..dd888cc --- /dev/null +++ b/examples/snippets/test/snippets/reporters/json_formatting.gleam @@ -0,0 +1,56 @@ +import dream_test/matchers.{match_snapshot, or_fail_with, should, succeed} +import dream_test/reporters/json +import dream_test/runner +import dream_test/unit.{describe, it} +import gleam/result +import gleam/string + +fn example_suite() { + describe("Example Suite", [ + it("passes", fn() { Ok(succeed()) }), + ]) +} + +fn normalize_timestamp_ms(json: String) -> String { + let #(before, after) = + string.split_once(json, "\"timestamp_ms\":") + |> result.unwrap(#("MISSING_TIMESTAMP_MS", "")) + + let #(_timestamp_digits, rest) = + string.split_once(after, ",") + |> result.unwrap(#("MISSING_TIMESTAMP_VALUE", after)) + + before <> "\"timestamp_ms\":0," <> rest +} + +fn normalize_duration_ms(json: String) -> String { + normalize_duration_ms_loop(json, "") +} + +fn normalize_duration_ms_loop(remaining: String, acc: String) -> String { + case string.split_once(remaining, "\"duration_ms\":") { + Ok(#(before, after)) -> { + let #(_duration_digits, rest) = + string.split_once(after, ",") + |> result.unwrap(#("MISSING_DURATION_VALUE", after)) + + normalize_duration_ms_loop(rest, acc <> before <> "\"duration_ms\": 0,") + } + Error(_) -> acc <> remaining + } +} + +pub fn tests() { + describe("JSON formatting", [ + it("format_pretty returns JSON containing tests", fn() { + let results = runner.new([example_suite()]) |> runner.run() + let text = json.format_pretty(results) + let normalized = normalize_timestamp_ms(text) |> normalize_duration_ms + + normalized + |> should + |> match_snapshot("./test/snapshots/json_format_pretty.snap") + |> or_fail_with("expected format_pretty snapshot match") + }), + ]) +} diff --git a/examples/snippets/test/snippets/reporters/json_reporter.gleam b/examples/snippets/test/snippets/reporters/json_reporter.gleam new file mode 100644 index 0000000..1a2be39 --- /dev/null +++ b/examples/snippets/test/snippets/reporters/json_reporter.gleam @@ -0,0 +1,27 @@ +import dream_test/matchers.{succeed} +import dream_test/reporters/json +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("JSON Reporter", [ + it("outputs JSON format", fn() { + // The json.report function outputs machine-readable JSON + // while bdd.report outputs human-readable text + Ok(succeed()) + }), + it("includes test metadata", fn() { + // JSON output includes name, full_name, status, duration, tags + Ok(succeed()) + }), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([json.new()]) + |> runner.exit_on_failure() + |> runner.run() +} diff --git a/examples/snippets/test/snippets/reporters/progress_handle_event.gleam b/examples/snippets/test/snippets/reporters/progress_handle_event.gleam new file mode 100644 index 0000000..3657eb7 --- /dev/null +++ b/examples/snippets/test/snippets/reporters/progress_handle_event.gleam @@ -0,0 +1,38 @@ +import dream_test/file +import dream_test/matchers.{match_regex, or_fail_with, should} +import dream_test/reporters/progress +import dream_test/reporters/types as reporter_types +import dream_test/unit.{describe, it} +import gleam/option.{None, Some} +import gleam/result + +fn write_progress_line_to_file(text: String) { + file.write("test/tmp/progress_handle_event.txt", text) + |> result.unwrap(Nil) +} + +pub fn tests() { + describe("Progress reporter: handle_event", [ + it("writes an in-place line (including carriage return)", fn() { + let reporter = progress.new() + let maybe_progress_text = + progress.handle_event(reporter, reporter_types.RunStarted(total: 10)) + case maybe_progress_text { + Some(text) -> write_progress_line_to_file(text) + None -> Nil + } + + use text <- result.try( + file.read("test/tmp/progress_handle_event.txt") + |> result.map_error(file.error_to_string), + ) + + text + |> should + |> match_regex("\\r0/10 \\[[^\\]]+\\] 0%") + |> or_fail_with( + "expected progress output to include carriage-returned 0/10 0% line", + ) + }), + ]) +} diff --git a/examples/snippets/test/snippets/reporters/progress_render.gleam b/examples/snippets/test/snippets/reporters/progress_render.gleam new file mode 100644 index 0000000..b704be4 --- /dev/null +++ b/examples/snippets/test/snippets/reporters/progress_render.gleam @@ -0,0 +1,15 @@ +import dream_test/matchers.{match_snapshot, or_fail_with, should} +import dream_test/reporters/progress +import dream_test/reporters/types as reporter_types +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("Progress reporter: render", [ + it("renders a fixed-width progress bar line", fn() { + progress.render(30, reporter_types.RunStarted(total: 10)) + |> should + |> match_snapshot("./test/snapshots/progress_render_run_started.snap") + |> or_fail_with("expected render output snapshot match") + }), + ]) +} diff --git a/examples/snippets/test/snippets/reporters/progress_reporter.gleam b/examples/snippets/test/snippets/reporters/progress_reporter.gleam new file mode 100644 index 0000000..1f8f154 --- /dev/null +++ b/examples/snippets/test/snippets/reporters/progress_reporter.gleam @@ -0,0 +1,18 @@ +import dream_test/matchers.{succeed} +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("Progress reporter", [ + it("passes", fn() { Ok(succeed()) }), + it("also passes", fn() { Ok(succeed()) }), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.exit_on_failure() + |> runner.run() +} diff --git a/examples/snippets/test/snippets/reporters/reporter_api_handle_event.gleam b/examples/snippets/test/snippets/reporters/reporter_api_handle_event.gleam new file mode 100644 index 0000000..9443b55 --- /dev/null +++ b/examples/snippets/test/snippets/reporters/reporter_api_handle_event.gleam @@ -0,0 +1,72 @@ +import dream_test/file +import dream_test/matchers.{match_regex, or_fail_with, should} +import dream_test/reporters/progress +import dream_test/reporters/types as reporter_types +import dream_test/types +import dream_test/unit.{describe, it} +import gleam/option.{type Option, None, Some} +import gleam/result + +fn write_to_file(text: String) { + file.write("test/tmp/reporter_api_handle_event.txt", text) + |> result.unwrap(Nil) +} + +fn or_empty(text: Option(String)) -> String { + case text { + Some(s) -> s + None -> "" + } +} + +pub fn tests() { + describe("Reporter API: handle_event", [ + it("can be driven manually with ReporterEvent values", fn() { + let reporter = progress.new() + + let result = + types.TestResult( + name: "passes", + full_name: ["suite", "passes"], + status: types.Passed, + duration_ms: 0, + tags: [], + failures: [], + kind: types.Unit, + ) + + let text = + or_empty(progress.handle_event( + reporter, + reporter_types.RunStarted(total: 1), + )) + <> or_empty(progress.handle_event( + reporter, + reporter_types.TestFinished(completed: 1, total: 1, result: result), + )) + <> or_empty(progress.handle_event( + reporter, + reporter_types.RunFinished(completed: 1, total: 1, results: [result]), + )) + + write_to_file(text) + + // Setup: read output (no assertions during setup) + use output <- result.try( + file.read("test/tmp/reporter_api_handle_event.txt") + |> result.map_error(file.error_to_string), + ) + + output + |> should + |> match_regex("\\r0/1 \\[[^\\]]+\\] 0%") + |> match_regex("\\r1/1 \\[[^\\]]+\\] 100%") + |> match_regex("suite") + |> match_regex("passes") + |> match_regex("\\r1/1 \\[[^\\]]+\\] 100% done") + |> or_fail_with( + "expected progress reporter output to include expected counters/labels", + ) + }), + ]) +} diff --git a/examples/snippets/test/snippets/reporters/reporter_events.gleam b/examples/snippets/test/snippets/reporters/reporter_events.gleam new file mode 100644 index 0000000..87243ce --- /dev/null +++ b/examples/snippets/test/snippets/reporters/reporter_events.gleam @@ -0,0 +1,77 @@ +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/reporters/types as reporter_types +import dream_test/unit.{describe, it} +import gleam/list +import gleam/option.{Some} +import gleam/result + +fn run_started_total(event: reporter_types.ReporterEvent) -> Result(Int, String) { + case event { + reporter_types.RunStarted(total: total) -> Ok(total) + _ -> Error("expected RunStarted") + } +} + +fn hook_error_message( + event: reporter_types.ReporterEvent, +) -> Result(String, String) { + case event { + reporter_types.HookFinished( + outcome: reporter_types.HookError(message: message), + .., + ) -> Ok(message) + _ -> Error("expected HookFinished with HookError") + } +} + +fn run_finished_results_count( + event: reporter_types.ReporterEvent, +) -> Result(Int, String) { + case event { + reporter_types.RunFinished(results: results, ..) -> Ok(list.length(results)) + _ -> Error("expected RunFinished") + } +} + +pub fn tests() { + describe("ReporterEvent", [ + it("RunStarted includes the total number of tests", fn() { + use total <- result.try( + run_started_total(reporter_types.RunStarted(total: 3)), + ) + + total + |> should + |> be_equal(3) + |> or_fail_with("expected total to be 3") + }), + it("HookFinished can include a HookError message", fn() { + let event = + reporter_types.HookFinished( + kind: reporter_types.AfterEach, + scope: ["file"], + test_name: Some("delete"), + outcome: reporter_types.HookError(message: "boom"), + ) + + use message <- result.try(hook_error_message(event)) + + message + |> should + |> be_equal("boom") + |> or_fail_with("expected hook error message 'boom'") + }), + it("RunFinished includes the traversal-ordered results list", fn() { + use count <- result.try( + run_finished_results_count( + reporter_types.RunFinished(completed: 1, total: 1, results: []), + ), + ) + + count + |> should + |> be_equal(0) + |> or_fail_with("expected results count to be 0") + }), + ]) +} diff --git a/examples/snippets/test/snippets/runner/discovery_list_modules.gleam b/examples/snippets/test/snippets/runner/discovery_list_modules.gleam new file mode 100644 index 0000000..2efdf6a --- /dev/null +++ b/examples/snippets/test/snippets/runner/discovery_list_modules.gleam @@ -0,0 +1,6 @@ +import dream_test/discover + +pub fn main() { + discover.tests("snippets/unit/**.gleam") + |> discover.list_modules() +} diff --git a/examples/snippets/test/snippets/runner/discovery_load.gleam b/examples/snippets/test/snippets/runner/discovery_load.gleam new file mode 100644 index 0000000..74d39f6 --- /dev/null +++ b/examples/snippets/test/snippets/runner/discovery_load.gleam @@ -0,0 +1,6 @@ +import dream_test/discover + +pub fn main() { + discover.tests("snippets/unit/**.gleam") + |> discover.load() +} diff --git a/examples/snippets/test/snippets/runner/discovery_runner.gleam b/examples/snippets/test/snippets/runner/discovery_runner.gleam new file mode 100644 index 0000000..9b88dd4 --- /dev/null +++ b/examples/snippets/test/snippets/runner/discovery_runner.gleam @@ -0,0 +1,19 @@ +import dream_test/discover.{from_path, to_suites} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner.{ + exit_on_failure, progress_reporter, results_reporters, run, +} + +pub fn main() { + let suites = + discover.new() + |> from_path("snippets/unit/**.gleam") + |> to_suites() + + runner.new(suites) + |> progress_reporter(progress.new()) + |> results_reporters([bdd.new()]) + |> exit_on_failure() + |> run() +} diff --git a/examples/snippets/test/snippets/runner/discovery_tests_builder.gleam b/examples/snippets/test/snippets/runner/discovery_tests_builder.gleam new file mode 100644 index 0000000..ba07275 --- /dev/null +++ b/examples/snippets/test/snippets/runner/discovery_tests_builder.gleam @@ -0,0 +1,18 @@ +import dream_test/discover +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner.{ + exit_on_failure, progress_reporter, results_reporters, run, +} + +pub fn main() { + let suites = + discover.tests("snippets/unit/**.gleam") + |> discover.to_suites() + + runner.new(suites) + |> progress_reporter(progress.new()) + |> results_reporters([bdd.new()]) + |> exit_on_failure() + |> run() +} diff --git a/examples/snippets/test/snippets/runner/discovery_to_suite.gleam b/examples/snippets/test/snippets/runner/discovery_to_suite.gleam new file mode 100644 index 0000000..8412069 --- /dev/null +++ b/examples/snippets/test/snippets/runner/discovery_to_suite.gleam @@ -0,0 +1,18 @@ +import dream_test/discover +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner.{ + exit_on_failure, progress_reporter, results_reporters, run, +} + +pub fn main() { + let suite = + discover.tests("snippets/unit/**.gleam") + |> discover.to_suite("discovered tests") + + runner.new([suite]) + |> progress_reporter(progress.new()) + |> results_reporters([bdd.new()]) + |> exit_on_failure() + |> run() +} diff --git a/examples/snippets/test/snippets/runner/execution_modes.gleam b/examples/snippets/test/snippets/runner/execution_modes.gleam new file mode 100644 index 0000000..0bed29a --- /dev/null +++ b/examples/snippets/test/snippets/runner/execution_modes.gleam @@ -0,0 +1,24 @@ +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("Execution modes demo", [ + it("runs as a suite", fn() { + 1 + 1 + |> should + |> be_equal(2) + |> or_fail_with("Math works") + }), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} diff --git a/examples/snippets/test/snippets/runner/filter_tests.gleam b/examples/snippets/test/snippets/runner/filter_tests.gleam new file mode 100644 index 0000000..cf99a85 --- /dev/null +++ b/examples/snippets/test/snippets/runner/filter_tests.gleam @@ -0,0 +1,33 @@ +import dream_test/matchers.{be_equal, or_fail_with, should, succeed} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner.{type TestInfo} +import dream_test/unit.{describe, it, with_tags} +import gleam/list + +pub fn tests() { + describe("Filtering tests", [ + it("smoke", fn() { + 1 + 1 + |> should + |> be_equal(2) + |> or_fail_with("math should work") + }) + |> with_tags(["smoke"]), + it("slow", fn() { Ok(succeed()) }) + |> with_tags(["slow"]), + ]) +} + +pub fn only_smoke(info: TestInfo) -> Bool { + list.contains(info.tags, "smoke") +} + +pub fn main() { + runner.new([tests()]) + |> runner.filter_tests(only_smoke) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} diff --git a/examples/snippets/test/snippets/runner/has_failures.gleam b/examples/snippets/test/snippets/runner/has_failures.gleam new file mode 100644 index 0000000..e919d22 --- /dev/null +++ b/examples/snippets/test/snippets/runner/has_failures.gleam @@ -0,0 +1,30 @@ +import dream_test/matchers.{be_equal, or_fail_with, should, succeed} +import dream_test/runner +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("has_failures", [ + it("passes", fn() { Ok(succeed()) }), + ]) +} + +fn failing_suite() { + describe("failing suite", [ + it("fails", fn() { + 1 + |> should + |> be_equal(2) + |> or_fail_with("intentional failure for has_failures example") + }), + ]) +} + +pub fn main() { + let results = runner.new([failing_suite()]) |> runner.run() + + results + |> runner.has_failures() + |> should + |> be_equal(True) + |> or_fail_with("expected failures to be present") +} diff --git a/examples/snippets/test/snippets/runner/minimal_test_runner.gleam b/examples/snippets/test/snippets/runner/minimal_test_runner.gleam new file mode 100644 index 0000000..7378855 --- /dev/null +++ b/examples/snippets/test/snippets/runner/minimal_test_runner.gleam @@ -0,0 +1,24 @@ +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("Example", [ + it("works", fn() { + 1 + 1 + |> should + |> be_equal(2) + |> or_fail_with("math should work") + }), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} diff --git a/examples/snippets/test/snippets/runner/runner_config.gleam b/examples/snippets/test/snippets/runner/runner_config.gleam new file mode 100644 index 0000000..a2d5c0c --- /dev/null +++ b/examples/snippets/test/snippets/runner/runner_config.gleam @@ -0,0 +1,26 @@ +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("Runner config demo", [ + it("runs with custom config", fn() { + 1 + 1 + |> should + |> be_equal(2) + |> or_fail_with("Math works") + }), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.max_concurrency(8) + |> runner.default_timeout_ms(10_000) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} diff --git a/examples/snippets/test/snippets/runner/sequential_execution.gleam b/examples/snippets/test/snippets/runner/sequential_execution.gleam new file mode 100644 index 0000000..46dcb6e --- /dev/null +++ b/examples/snippets/test/snippets/runner/sequential_execution.gleam @@ -0,0 +1,34 @@ +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("Sequential tests", [ + it("first test", fn() { + // When tests share external resources, run them sequentially + 1 + 1 + |> should + |> be_equal(2) + |> or_fail_with("Math works") + }), + it("second test", fn() { + 2 + 2 + |> should + |> be_equal(4) + |> or_fail_with("Math still works") + }), + ]) +} + +pub fn main() { + // Sequential execution for tests with shared state + runner.new([tests()]) + |> runner.max_concurrency(1) + |> runner.default_timeout_ms(30_000) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} diff --git a/examples/snippets/test/snippets/runner/suite_specific_config.gleam b/examples/snippets/test/snippets/runner/suite_specific_config.gleam new file mode 100644 index 0000000..43e6f94 --- /dev/null +++ b/examples/snippets/test/snippets/runner/suite_specific_config.gleam @@ -0,0 +1,33 @@ +import dream_test/matchers.{succeed} +import dream_test/parallel +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, it} + +fn unit_tests() { + describe("Unit suite", [ + it("is fast", fn() { Ok(succeed()) }), + ]) +} + +fn db_tests() { + describe("DB suite (sequential)", [ + it("pretend db test", fn() { Ok(succeed()) }), + ]) +} + +pub fn main() { + let db_config = + parallel.ParallelConfig(max_concurrency: 1, default_timeout_ms: 60_000) + + runner.new([]) + |> runner.add_suites([unit_tests()]) + |> runner.add_suites_with_config(db_config, [db_tests()]) + |> runner.max_concurrency(8) + |> runner.default_timeout_ms(10_000) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} diff --git a/examples/snippets/test/snippets/unit/chaining.gleam b/examples/snippets/test/snippets/unit/chaining.gleam new file mode 100644 index 0000000..2c8039d --- /dev/null +++ b/examples/snippets/test/snippets/unit/chaining.gleam @@ -0,0 +1,35 @@ +import dream_test/matchers.{be_equal, be_ok, be_some, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, it} +import gleam/option.{Some} + +pub fn tests() { + describe("Chaining matchers", [ + // Unwrap Some, then check the value + it("unwraps Option", fn() { + Some(42) + |> should + |> be_some() + |> be_equal(42) + |> or_fail_with("Should contain 42") + }), + // Unwrap Ok, then check the value + it("unwraps Result", fn() { + Ok("success") + |> should + |> be_ok() + |> be_equal("success") + |> or_fail_with("Should be Ok with 'success'") + }), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} diff --git a/examples/snippets/test/explicit_failures.gleam b/examples/snippets/test/snippets/unit/explicit_failures.gleam similarity index 53% rename from examples/snippets/test/explicit_failures.gleam rename to examples/snippets/test/snippets/unit/explicit_failures.gleam index 2f50c10..c41fce8 100644 --- a/examples/snippets/test/explicit_failures.gleam +++ b/examples/snippets/test/snippets/unit/explicit_failures.gleam @@ -1,33 +1,33 @@ -//// README: Explicit failures - -import dream_test/assertions/should.{fail_with, succeed} -import dream_test/reporter/bdd.{report} -import dream_test/runner.{run_all} -import dream_test/unit.{describe, it, to_test_cases} -import gleam/io +import dream_test/matchers.{fail_with, succeed} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, it} import snippets.{divide} pub fn tests() { describe("Explicit failures", [ it("succeeds explicitly when division works", fn() { let result = divide(10, 2) - case result { + Ok(case result { Ok(_) -> succeed() Error(_) -> fail_with("Should have succeeded") - } + }) }), it("fails explicitly when expecting an error", fn() { let result = divide(10, 0) - case result { + Ok(case result { Ok(_) -> fail_with("Should have returned an error") Error(_) -> succeed() - } + }) }), ]) } pub fn main() { - to_test_cases("explicit_failures", tests()) - |> run_all() - |> report(io.print) + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() } diff --git a/examples/snippets/test/snippets/unit/grouping.gleam b/examples/snippets/test/snippets/unit/grouping.gleam new file mode 100644 index 0000000..0d0846a --- /dev/null +++ b/examples/snippets/test/snippets/unit/grouping.gleam @@ -0,0 +1,40 @@ +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, group, it} + +pub fn tests() { + describe("Calculator", [ + group("addition", [ + it("adds small numbers", fn() { + 2 + 3 + |> should + |> be_equal(5) + |> or_fail_with("2 + 3 should equal 5") + }), + it("adds negative numbers", fn() { + -2 + -3 + |> should + |> be_equal(-5) + |> or_fail_with("-2 + -3 should equal -5") + }), + ]), + group("division", [ + it("integer division rounds toward zero", fn() { + 7 / 2 + |> should + |> be_equal(3) + |> or_fail_with("7 / 2 should equal 3") + }), + ]), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} diff --git a/examples/snippets/test/hero.gleam b/examples/snippets/test/snippets/unit/hero.gleam similarity index 51% rename from examples/snippets/test/hero.gleam rename to examples/snippets/test/snippets/unit/hero.gleam index 53aaf08..5cd185d 100644 --- a/examples/snippets/test/hero.gleam +++ b/examples/snippets/test/snippets/unit/hero.gleam @@ -1,32 +1,28 @@ -//// README: Hero example (Calculator) - -import dream_test/assertions/should.{ - be_error, be_ok, equal, or_fail_with, should, -} -import dream_test/reporter/bdd.{report} -import dream_test/runner.{run_all} -import dream_test/unit.{describe, it, to_test_cases} -import gleam/io +import dream_test/matchers.{be_equal, be_error, be_ok, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, it} import snippets.{add, divide} pub fn tests() { describe("Calculator", [ it("adds two numbers", fn() { add(2, 3) - |> should() - |> equal(5) + |> should + |> be_equal(5) |> or_fail_with("2 + 3 should equal 5") }), it("handles division", fn() { divide(10, 2) - |> should() + |> should |> be_ok() - |> equal(5) + |> be_equal(5) |> or_fail_with("10 / 2 should equal 5") }), it("returns error for division by zero", fn() { divide(1, 0) - |> should() + |> should |> be_error() |> or_fail_with("Division by zero should error") }), @@ -34,7 +30,9 @@ pub fn tests() { } pub fn main() { - to_test_cases("hero", tests()) - |> run_all() - |> report(io.print) + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() } diff --git a/examples/snippets/test/snippets/unit/quick_start.gleam b/examples/snippets/test/snippets/unit/quick_start.gleam new file mode 100644 index 0000000..a4c0efe --- /dev/null +++ b/examples/snippets/test/snippets/unit/quick_start.gleam @@ -0,0 +1,33 @@ +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, it} +import gleam/string + +pub fn tests() { + describe("String utilities", [ + it("trims whitespace", fn() { + " hello " + |> string.trim() + |> should + |> be_equal("hello") + |> or_fail_with("Should remove surrounding whitespace") + }), + it("finds substrings", fn() { + "hello world" + |> string.contains("world") + |> should + |> be_equal(True) + |> or_fail_with("Should find 'world' in string") + }), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} diff --git a/examples/snippets/test/skipping_tests.gleam b/examples/snippets/test/snippets/unit/skipping_tests.gleam similarity index 51% rename from examples/snippets/test/skipping_tests.gleam rename to examples/snippets/test/snippets/unit/skipping_tests.gleam index bc67fc3..36d8bb9 100644 --- a/examples/snippets/test/skipping_tests.gleam +++ b/examples/snippets/test/snippets/unit/skipping_tests.gleam @@ -1,38 +1,38 @@ -//// README: Skipping tests - -import dream_test/assertions/should.{equal, or_fail_with, should} -import dream_test/reporter/bdd.{report} -import dream_test/runner.{run_all} -import dream_test/unit.{describe, it, skip, to_test_cases} -import gleam/io +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, it, skip} import snippets.{add} pub fn tests() { describe("Skipping tests", [ it("runs normally", fn() { add(2, 3) - |> should() - |> equal(5) + |> should + |> be_equal(5) |> or_fail_with("2 + 3 should equal 5") }), skip("not implemented yet", fn() { // This test is skipped - the body is preserved but not executed add(100, 200) - |> should() - |> equal(300) + |> should + |> be_equal(300) |> or_fail_with("Should add large numbers") }), it("also runs normally", fn() { add(0, 0) - |> should() - |> equal(0) + |> should + |> be_equal(0) |> or_fail_with("0 + 0 should equal 0") }), ]) } pub fn main() { - to_test_cases("skipping_tests", tests()) - |> run_all() - |> report(io.print) + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() } diff --git a/examples/snippets/test/snippets/unit/tagging.gleam b/examples/snippets/test/snippets/unit/tagging.gleam new file mode 100644 index 0000000..878b1d3 --- /dev/null +++ b/examples/snippets/test/snippets/unit/tagging.gleam @@ -0,0 +1,22 @@ +import dream_test/matchers.{succeed} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, it, with_tags} + +pub fn tests() { + describe("Tagged tests", [ + it("fast", fn() { Ok(succeed()) }) + |> with_tags(["unit", "fast"]), + it("slow", fn() { Ok(succeed()) }) + |> with_tags(["integration", "slow"]), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} diff --git a/examples/snippets/test/snippets/utils/context_helpers.gleam b/examples/snippets/test/snippets/utils/context_helpers.gleam new file mode 100644 index 0000000..a38b3de --- /dev/null +++ b/examples/snippets/test/snippets/utils/context_helpers.gleam @@ -0,0 +1,32 @@ +import dream_test/context +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/types.{AssertionFailure} +import dream_test/unit.{describe, it} +import gleam/option.{None} + +pub fn tests() { + describe("dream_test/context", [ + it("new has no failures", fn() { + context.new() + |> context.failures() + |> should + |> be_equal([]) + |> or_fail_with("expected new context to have no failures") + }), + + it("add_failure stores failures newest-first", fn() { + let first_failure = + AssertionFailure(operator: "op1", message: "m1", payload: None) + let second_failure = + AssertionFailure(operator: "op2", message: "m2", payload: None) + + context.new() + |> context.add_failure(first_failure) + |> context.add_failure(second_failure) + |> context.failures() + |> should + |> be_equal([second_failure, first_failure]) + |> or_fail_with("expected newest-first failure ordering") + }), + ]) +} diff --git a/examples/snippets/test/snippets/utils/file_helpers.gleam b/examples/snippets/test/snippets/utils/file_helpers.gleam new file mode 100644 index 0000000..704e215 --- /dev/null +++ b/examples/snippets/test/snippets/utils/file_helpers.gleam @@ -0,0 +1,71 @@ +import dream_test/file.{ + NotFound, delete, delete_files_matching, error_to_string, read, write, +} +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/process.{unique_port} +import dream_test/unit.{describe, it} +import gleam/int +import gleam/result + +fn tmp_path() { + "./test/tmp/file_helpers_" <> int.to_string(unique_port()) <> ".txt" +} + +pub fn tests() { + describe("File helpers", [ + it("write + read roundtrip", fn() { + let path = tmp_path() + + // Setup: create the file (no assertions during setup) + use _ <- result.try( + write(path, "hello") |> result.map_error(error_to_string), + ) + + read(path) + |> should + |> be_equal(Ok("hello")) + |> or_fail_with("expected to read back written content") + }), + + it("delete removes a file", fn() { + let path = tmp_path() + + // Setup: create the file, then delete it (no assertions during setup) + use _ <- result.try( + write(path, "hello") |> result.map_error(error_to_string), + ) + use _ <- result.try(delete(path) |> result.map_error(error_to_string)) + + read(path) + |> should + |> be_equal(Error(NotFound(path))) + |> or_fail_with("expected deleted file to be NotFound") + }), + + it("error_to_string formats NotFound", fn() { + error_to_string(NotFound("/x")) + |> should + |> be_equal("File not found: /x") + |> or_fail_with("expected NotFound formatting") + }), + + it("delete_files_matching deletes files with a given extension", fn() { + let directory = "./test/tmp/file_helpers_" <> int.to_string(unique_port()) + let a = directory <> "/a.snap" + let b = directory <> "/b.snap" + let keep = directory <> "/keep.txt" + + // Setup: create 2 matching files and 1 non-matching file + use _ <- result.try(write(a, "a") |> result.map_error(error_to_string)) + use _ <- result.try(write(b, "b") |> result.map_error(error_to_string)) + use _ <- result.try( + write(keep, "keep") |> result.map_error(error_to_string), + ) + + delete_files_matching(directory, ".snap") + |> should + |> be_equal(Ok(2)) + |> or_fail_with("expected two deleted snapshots") + }), + ]) +} diff --git a/examples/snippets/test/snippets/utils/parallel_config.gleam b/examples/snippets/test/snippets/utils/parallel_config.gleam new file mode 100644 index 0000000..46c7d4a --- /dev/null +++ b/examples/snippets/test/snippets/utils/parallel_config.gleam @@ -0,0 +1,21 @@ +import dream_test/matchers.{have_length, or_fail_with, should, succeed} +import dream_test/parallel.{ParallelConfig} +import dream_test/unit.{describe, it} + +pub fn tests() { + let config = ParallelConfig(max_concurrency: 2, default_timeout_ms: 1000) + + describe("ParallelConfig", [ + it("can be constructed to customize execution", fn() { + let suite = + describe("Suite", [ + it("a", fn() { Ok(succeed()) }), + ]) + + parallel.run_root_parallel(config, suite) + |> should + |> have_length(1) + |> or_fail_with("expected one result") + }), + ]) +} diff --git a/examples/snippets/test/snippets/utils/parallel_direct.gleam b/examples/snippets/test/snippets/utils/parallel_direct.gleam new file mode 100644 index 0000000..4088372 --- /dev/null +++ b/examples/snippets/test/snippets/utils/parallel_direct.gleam @@ -0,0 +1,20 @@ +import dream_test/matchers.{have_length, or_fail_with, should, succeed} +import dream_test/parallel +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("Parallel executor", [ + it("can run a suite and return a list of results", fn() { + let suite = + describe("Suite", [ + it("a", fn() { Ok(succeed()) }), + it("b", fn() { Ok(succeed()) }), + ]) + + parallel.run_root_parallel(parallel.default_config(), suite) + |> should + |> have_length(2) + |> or_fail_with("expected two results") + }), + ]) +} diff --git a/examples/snippets/test/snippets/utils/parallel_with_reporter.gleam b/examples/snippets/test/snippets/utils/parallel_with_reporter.gleam new file mode 100644 index 0000000..6970f1c --- /dev/null +++ b/examples/snippets/test/snippets/utils/parallel_with_reporter.gleam @@ -0,0 +1,60 @@ +import dream_test/matchers.{succeed} +import dream_test/parallel +import dream_test/reporters/progress +import dream_test/reporters/types as reporter_types +import dream_test/types.{type TestSuite} +import dream_test/unit.{describe, it} +import gleam/option.{type Option, None, Some} + +pub fn suite() -> TestSuite(Nil) { + describe("suite", [ + it("passes", fn() { Ok(succeed()) }), + ]) +} + +fn write_if_some(text: Option(String), write: fn(String) -> Nil) { + case text { + Some(s) -> write(s) + None -> Nil + } +} + +pub fn main() { + let total = 1 + let completed = 0 + + let reporter = progress.new() + let write = fn(_s: String) { Nil } + + progress.handle_event(reporter, reporter_types.RunStarted(total: total)) + |> write_if_some(write) + + let parallel_result = + parallel.run_root_parallel_with_reporter( + parallel.RunRootParallelWithReporterConfig( + config: parallel.default_config(), + suite: suite(), + progress_reporter: Some(reporter), + write: write, + total: total, + completed: completed, + ), + ) + let parallel.RunRootParallelWithReporterResult( + results: results, + completed: completed_after_suite, + progress_reporter: _progress_reporter, + ) = parallel_result + + progress.handle_event( + reporter, + reporter_types.RunFinished( + completed: completed_after_suite, + total: total, + results: results, + ), + ) + |> write_if_some(write) + + results +} diff --git a/examples/snippets/test/snippets/utils/process_helpers.gleam b/examples/snippets/test/snippets/utils/process_helpers.gleam new file mode 100644 index 0000000..3e9978a --- /dev/null +++ b/examples/snippets/test/snippets/utils/process_helpers.gleam @@ -0,0 +1,133 @@ +import dream_test/matchers.{be_between, be_equal, or_fail_with, should} +import dream_test/process +import dream_test/unit.{describe, it} +import gleam/erlang/process as erlang_process +import gleam/list +import gleam/otp/actor + +pub type TodoMessage { + Add(String) + GetAll(erlang_process.Subject(List(String))) +} + +fn handle_todo_message( + items: List(String), + message: TodoMessage, +) -> actor.Next(List(String), TodoMessage) { + case message { + Add(item) -> actor.continue(list.append(items, [item])) + GetAll(reply_to) -> { + erlang_process.send(reply_to, items) + actor.continue(items) + } + } +} + +fn always_true() -> Bool { + True +} + +fn always_ok_42() -> Result(Int, Nil) { + Ok(42) +} + +pub fn tests() { + describe("Process helpers", [ + it("start_counter + increment + get_count work", fn() { + let counter = process.start_counter() + process.increment(counter) + process.increment(counter) + + process.get_count(counter) + |> should + |> be_equal(2) + |> or_fail_with("expected counter to be 2") + }), + + it("CounterMessage can be sent directly", fn() { + let counter = process.start_counter() + + erlang_process.send(counter, process.Increment) + erlang_process.send(counter, process.SetCount(10)) + + process.get_count(counter) + |> should + |> be_equal(10) + |> or_fail_with("expected counter to be 10 after SetCount") + }), + + it("start_counter_with initializes the counter", fn() { + let counter = process.start_counter_with(10) + process.decrement(counter) + + process.get_count(counter) + |> should + |> be_equal(9) + |> or_fail_with("expected counter to be 9 after decrement") + }), + + it("set_count sets a counter to a specific value", fn() { + let counter = process.start_counter() + process.set_count(counter, 42) + + process.get_count(counter) + |> should + |> be_equal(42) + |> or_fail_with("expected counter to be 42 after set_count") + }), + + it("unique_port returns a value in the safe range", fn() { + process.unique_port() + |> should + |> be_between(10_000, 60_000) + |> or_fail_with("expected unique_port to be within 10k..60k") + }), + + it("start_actor + call_actor work", fn() { + let todos = process.start_actor([], handle_todo_message) + + erlang_process.send(todos, Add("Write tests")) + erlang_process.send(todos, Add("Run tests")) + + process.call_actor(todos, GetAll, 1000) + |> should + |> be_equal(["Write tests", "Run tests"]) + |> or_fail_with("expected items to be preserved in insertion order") + }), + + it("await_ready returns Ready(True) when the check returns True", fn() { + process.await_ready(process.quick_poll_config(), always_true) + |> should + |> be_equal(process.Ready(True)) + |> or_fail_with("expected await_ready to return Ready(True)") + }), + + it("await_some returns Ready(value) when the check returns Ok", fn() { + process.await_some(process.default_poll_config(), always_ok_42) + |> should + |> be_equal(process.Ready(42)) + |> or_fail_with("expected await_some to return Ready(42)") + }), + + it("default_poll_config has expected values", fn() { + process.default_poll_config() + |> should + |> be_equal(process.PollConfig(timeout_ms: 5000, interval_ms: 50)) + |> or_fail_with("expected default_poll_config to be 5000ms/50ms") + }), + + it("quick_poll_config has expected values", fn() { + process.quick_poll_config() + |> should + |> be_equal(process.PollConfig(timeout_ms: 1000, interval_ms: 10)) + |> or_fail_with("expected quick_poll_config to be 1000ms/10ms") + }), + + it("PortSelection can be constructed", fn() { + process.Port(1234) + |> should + |> be_equal(process.Port(1234)) + |> or_fail_with("expected PortSelection to be constructible") + }), + ]) +} diff --git a/examples/snippets/test/snippets/utils/sandboxing.gleam b/examples/snippets/test/snippets/utils/sandboxing.gleam new file mode 100644 index 0000000..1333384 --- /dev/null +++ b/examples/snippets/test/snippets/utils/sandboxing.gleam @@ -0,0 +1,51 @@ +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/sandbox.{ + SandboxCompleted, SandboxConfig, SandboxCrashed, SandboxTimedOut, +} +import dream_test/unit.{describe, it} + +fn loop_forever() { + loop_forever() +} + +pub fn tests() { + describe("Sandboxing", [ + it("run_isolated returns SandboxCompleted(value) on success", fn() { + let config = SandboxConfig(timeout_ms: 100, show_crash_reports: False) + let result = sandbox.run_isolated(config, fn() { 123 }) + + result + |> should + |> be_equal(SandboxCompleted(123)) + |> or_fail_with("expected SandboxCompleted(123)") + }), + + it( + "run_isolated returns SandboxTimedOut when the function is too slow", + fn() { + let config = SandboxConfig(timeout_ms: 10, show_crash_reports: False) + let result = sandbox.run_isolated(config, loop_forever) + + result + |> should + |> be_equal(SandboxTimedOut) + |> or_fail_with("expected SandboxTimedOut") + }, + ), + + it("run_isolated returns SandboxCrashed when the function panics", fn() { + let config = SandboxConfig(timeout_ms: 100, show_crash_reports: False) + let result = sandbox.run_isolated(config, fn() { panic as "boom" }) + + let did_crash = case result { + SandboxCrashed(_) -> True + _ -> False + } + + did_crash + |> should + |> be_equal(True) + |> or_fail_with("expected SandboxCrashed(...)") + }), + ]) +} diff --git a/examples/snippets/test/snippets/utils/timing_helpers.gleam b/examples/snippets/test/snippets/utils/timing_helpers.gleam new file mode 100644 index 0000000..486cfef --- /dev/null +++ b/examples/snippets/test/snippets/utils/timing_helpers.gleam @@ -0,0 +1,54 @@ +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/timing +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("Timing", [ + it("format_duration_ms scales milliseconds and seconds", fn() { + // Arrange & Act + let ms = timing.format_duration_ms(42) + + // Assert + ms + |> should + |> be_equal("42ms") + |> or_fail_with("expected 42ms") + }), + + it("format_duration_ms formats 1500ms as seconds", fn() { + timing.format_duration_ms(1500) + |> should + |> be_equal("1.5s") + |> or_fail_with("expected 1.5s") + }), + + it("format_duration_us formats sub-millisecond values", fn() { + timing.format_duration_us(500) + |> should + |> be_equal("0.5ms") + |> or_fail_with("expected 0.5ms") + }), + + it("now_ms is monotonic (non-decreasing)", fn() { + let t1 = timing.now_ms() + let t2 = timing.now_ms() + let ok = t2 >= t1 + + ok + |> should + |> be_equal(True) + |> or_fail_with("expected now_ms to be monotonic") + }), + + it("now_us is monotonic (non-decreasing)", fn() { + let t1 = timing.now_us() + let t2 = timing.now_us() + let ok = t2 >= t1 + + ok + |> should + |> be_equal(True) + |> or_fail_with("expected now_us to be monotonic") + }), + ]) +} diff --git a/examples/snippets/test/snippets/utils/types_helpers.gleam b/examples/snippets/test/snippets/utils/types_helpers.gleam new file mode 100644 index 0000000..c4c3207 --- /dev/null +++ b/examples/snippets/test/snippets/utils/types_helpers.gleam @@ -0,0 +1,32 @@ +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/types +import dream_test/unit.{describe, it} +import gleam/option.{None} + +pub fn tests() { + describe("Types", [ + it("status_from_failures returns Passed for empty failures", fn() { + types.status_from_failures([]) + |> should + |> be_equal(types.Passed) + |> or_fail_with("expected Passed for empty failures") + }), + + it("to_assertion_result converts match results", fn() { + types.to_assertion_result(types.MatchOk(1)) + |> should + |> be_equal(types.AssertionOk) + |> or_fail_with("expected MatchOk -> AssertionOk") + }), + + it("status_from_failures returns Failed for non-empty failures", fn() { + let failure = + types.AssertionFailure(operator: "op", message: "msg", payload: None) + + types.status_from_failures([failure]) + |> should + |> be_equal(types.Failed) + |> or_fail_with("expected Failed for non-empty failures") + }), + ]) +} diff --git a/examples/snippets/test/snippets_test.gleam b/examples/snippets/test/snippets_test.gleam index 704829e..2e4e26c 100644 --- a/examples/snippets/test/snippets_test.gleam +++ b/examples/snippets/test/snippets_test.gleam @@ -1,70 +1,20 @@ -//// Main test runner for all README snippets -//// -//// Each snippet lives in its own file for easy linking from README.md +import dream_test/discover +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner -import chaining -import custom_matchers -import dream_test/reporter/bdd.{report} -import dream_test/runner.{exit_on_failure, run_all, run_suite} -import dream_test/unit.{describe, to_test_cases} -import execution_modes -import explicit_failures -import gherkin_discover -import gherkin_feature -import gherkin_file -import gherkin_hero -import gherkin_placeholders -import gherkin_step_handler -import gleam/io -import gleam/list -import hero -import hook_failure -import hook_inheritance -import json_reporter -import lifecycle_hooks -import quick_start -import runner_config -import sequential_execution -import skipping_tests +fn suites() { + let discover.LoadResult(suites: suites, errors: _errors) = + discover.tests("snippets/**.gleam") + |> discover.load() -pub fn tests() { - describe("README Snippets", [ - quick_start.tests(), - hero.tests(), - chaining.tests(), - custom_matchers.tests(), - lifecycle_hooks.tests(), - explicit_failures.tests(), - hook_inheritance.tests(), - hook_failure.tests(), - runner_config.tests(), - json_reporter.tests(), - sequential_execution.tests(), - execution_modes.tests(), - skipping_tests.tests(), - ]) + suites } pub fn main() { - // Run unit test snippets - let unit_results = - to_test_cases("snippets_test", tests()) - |> run_all() - - // Run Gherkin snippets (these return TestSuite, not UnitTest) - let gherkin_results = - [ - gherkin_hero.tests(), - gherkin_feature.tests(), - gherkin_step_handler.tests(), - gherkin_placeholders.tests(), - gherkin_file.tests(), - gherkin_discover.tests(), - ] - |> list.flat_map(run_suite) - - let all_results = list.append(unit_results, gherkin_results) - - report(all_results, io.print) - exit_on_failure(all_results) + runner.new(suites()) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new() |> bdd.color()]) + |> runner.exit_on_failure() + |> runner.run() } diff --git a/gleam.toml b/gleam.toml index f7e5a78..e92a2a9 100644 --- a/gleam.toml +++ b/gleam.toml @@ -1,16 +1,15 @@ name = "dream_test" -version = "1.2.0" +version = "2.0.0" description = "A testing framework for Gleam that gets out of your way" licences = ["MIT"] repository = { type = "github", user = "TrustBound", repo = "dream_test" } links = [{ title = "Documentation", href = "https://hexdocs.pm/dream_test" }] [dependencies] -gleam_stdlib = ">= 0.44.0 and < 2.0.0" -gleam_otp = ">= 1.2.0 and < 2.0.0" -gleam_erlang = ">= 0.25.0 and < 2.0.0" +gleam_stdlib = ">= 0.60.0 and < 1.0.0" +gleam_otp = ">= 1.1.0 and < 2.0.0" +gleam_erlang = ">= 1.0.0 and < 2.0.0" gleam_regexp = ">= 1.0.0 and < 2.0.0" -gleam_json = ">= 2.0.0 and < 4.0.0" +gleam_json = ">= 3.0.1 and < 4.0.0" [dev-dependencies] -gleeunit = ">= 1.0.0 and < 2.0.0" diff --git a/manifest.toml b/manifest.toml index 606e0b7..ba2e1f6 100644 --- a/manifest.toml +++ b/manifest.toml @@ -7,13 +7,11 @@ packages = [ { name = "gleam_otp", version = "1.2.0", build_tools = ["gleam"], requirements = ["gleam_erlang", "gleam_stdlib"], otp_app = "gleam_otp", source = "hex", outer_checksum = "BA6A294E295E428EC1562DC1C11EA7530DCB981E8359134BEABC8493B7B2258E" }, { name = "gleam_regexp", version = "1.1.1", build_tools = ["gleam"], requirements = ["gleam_stdlib"], otp_app = "gleam_regexp", source = "hex", outer_checksum = "9C215C6CA84A5B35BB934A9B61A9A306EC743153BE2B0425A0D032E477B062A9" }, { name = "gleam_stdlib", version = "0.67.0", build_tools = ["gleam"], requirements = [], otp_app = "gleam_stdlib", source = "hex", outer_checksum = "6368313DB35963DC02F677A513BB0D95D58A34ED0A9436C8116820BF94BE3511" }, - { name = "gleeunit", version = "1.9.0", build_tools = ["gleam"], requirements = ["gleam_stdlib"], otp_app = "gleeunit", source = "hex", outer_checksum = "DA9553CE58B67924B3C631F96FE3370C49EB6D6DC6B384EC4862CC4AAA718F3C" }, ] [requirements] -gleam_erlang = { version = ">= 0.25.0 and < 2.0.0" } -gleam_json = { version = ">= 2.0.0 and < 4.0.0" } -gleam_otp = { version = ">= 1.2.0 and < 2.0.0" } +gleam_erlang = { version = ">= 1.0.0 and < 2.0.0" } +gleam_json = { version = ">= 3.0.1 and < 4.0.0" } +gleam_otp = { version = ">= 1.1.0 and < 2.0.0" } gleam_regexp = { version = ">= 1.0.0 and < 2.0.0" } -gleam_stdlib = { version = ">= 0.44.0 and < 2.0.0" } -gleeunit = { version = ">= 1.0.0 and < 2.0.0" } +gleam_stdlib = { version = ">= 0.60.0 and < 2.0.0" } diff --git a/releases/release-1.0.0.md b/releases/release-1.0.0.md index e37f13a..278e815 100644 --- a/releases/release-1.0.0.md +++ b/releases/release-1.0.0.md @@ -14,7 +14,7 @@ Dream Test 1.0.0 is the initial stable release of the testing framework for Glea ### Assertions Module -- Fluent assertion API with `should()` builder +- Fluent assertion API with `should` builder - `or_fail_with` for custom failure messages - Chainable assertion pattern @@ -77,3 +77,5 @@ dream_test = "~> 1.0" + + diff --git a/releases/release-1.0.1.md b/releases/release-1.0.1.md index a4b14fc..3022b85 100644 --- a/releases/release-1.0.1.md +++ b/releases/release-1.0.1.md @@ -68,3 +68,5 @@ This release contains no breaking changes or code modifications. All functionali + + diff --git a/releases/release-1.0.3.md b/releases/release-1.0.3.md index 869d4bc..9d37820 100644 --- a/releases/release-1.0.3.md +++ b/releases/release-1.0.3.md @@ -75,3 +75,5 @@ This release is fully backward compatible. All existing code continues to work w + + diff --git a/releases/release-1.1.0.md b/releases/release-1.1.0.md index 98feabf..dd6abd6 100644 --- a/releases/release-1.1.0.md +++ b/releases/release-1.1.0.md @@ -14,11 +14,11 @@ Write behavior-driven tests using familiar Given/When/Then syntax: ```gleam import dream_test/gherkin/feature.{feature, scenario, given, when, then} -import dream_test/gherkin/steps.{type StepContext, get_int, new_registry, step} +import dream_test/gherkin/steps.{type StepContext, get_int, step} pub fn tests() { let steps = - new_registry() + steps.new() |> step("I have {int} items in my cart", step_have_items) |> step("I add {int} more items", step_add_items) |> step("I should have {int} items total", step_should_have) @@ -196,7 +196,7 @@ pub fn be_even(result: MatchResult(Int)) -> MatchResult(Int) { } // Use it like any built-in matcher -4 |> should() |> be_even() |> or_fail_with("Should be even") +4 |> should |> be_even() |> or_fail_with("Should be even") ``` ### 📊 JSON Reporter @@ -204,8 +204,8 @@ pub fn be_even(result: MatchResult(Int)) -> MatchResult(Int) { Output test results as JSON for CI/CD integration, test aggregation, or tooling: ```gleam -import dream_test/reporter/json -import dream_test/reporter/bdd.{report} +import dream_test/reporters/json +import dream_test/reporters/bdd.{report} pub fn main() { to_test_cases("my_test", tests()) diff --git a/releases/release-1.2.0.md b/releases/release-1.2.0.md index df45ca1..b1580d0 100644 --- a/releases/release-1.2.0.md +++ b/releases/release-1.2.0.md @@ -17,7 +17,7 @@ import dream_test/assertions/should.{should, match_snapshot, or_fail_with} it("renders user profile", fn() { render_profile(user) - |> should() + |> should |> match_snapshot("./test/snapshots/user_profile.snap") |> or_fail_with("Profile should match snapshot") }) @@ -38,7 +38,7 @@ Test any value using `string.inspect` serialization—perfect for records, lists ```gleam build_config() -|> should() +|> should |> match_snapshot_inspect("./test/snapshots/config.snap") |> or_fail_with("Config should match snapshot") ``` @@ -49,7 +49,7 @@ No magic environment variables or flags. To update a snapshot: ```sh rm ./test/snapshots/user_profile.snap -gleam test +make test ``` The new snapshot is created automatically on the next run. @@ -127,3 +127,4 @@ See the new [examples/snippets/test/snapshot_testing.gleam](https://github.com/T + diff --git a/releases/release-2.0.0.md b/releases/release-2.0.0.md new file mode 100644 index 0000000..cf23edc --- /dev/null +++ b/releases/release-2.0.0.md @@ -0,0 +1,401 @@ +# Dream Test 2.0.0 Release Notes + +**Release Date:** 2025-12-27 + +Dream Test 2.0 is a **major** release that makes test execution and reporting more explicit: + +- A **suite-first runner** with a pipe-friendly builder (`runner.new([suite]) |> ... |> runner.run()`). +- **Split reporting**: a live **progress reporter** during the run, and one or more + deterministic **results reporters** printed at the end. +- **Result-returning tests + hooks**: test bodies and lifecycle hooks return `Result(..., String)`, enabling clean multi-step setup with `use <- result.try(...)`. +- A unified public assertions surface under `dream_test/matchers`. +- Clearer, safer behavior for hooks, timeouts, and crashes (with optional crash reports). +- **Suite-specific execution config**: run some suites sequentially / with different timeouts in the same runner via `runner.add_suites_with_config(...)`. + +## Why Dream Test 2.0? + +Dream Test 2.0 is mostly about **reducing surprise** in real-world suites (parallel execution, multi-step setup, CI logs) by making the framework's "execution model" explicit and composable. + +What we were fixing: + +- **Hidden control flow** in tests with multi-step setup. In 1.x, tests often needed extra boilerplate to bail early with a useful message. +- **Output that gets confusing under parallelism**. When many tests finish out of order, "nice output" requires an explicit event model so reporters can stay deterministic. +- **Friction around wiring suites**. Manually maintaining import lists is tedious and error-prone as a codebase grows. + +What we wanted in 2.0: + +- **Linear, readable multi-step tests**: test bodies and hooks return `Result(_, String)` so you can use `use <- result.try(...)` and early-exit with a human error message. +- **A single "runner pipeline"**: your `main()` becomes the policy surface (parallelism, timeouts, filtering, reporting, CI exit codes). +- **Reporters driven by structured events**: output becomes reliable even when execution is concurrent. +- **Optional discovery** for teams that prefer not to maintain explicit suite lists. + +## Highlights + +### ✅ Runner: suite-first builder (`dream_test/runner`) + +**Why this change:** in 1.x, "how tests run" was spread across helpers and defaults. In 2.0, `main()` is the explicit policy surface: you can read one pipeline and know concurrency, timeouts, filtering, reporting, and CI behavior. + +The runner is now a single, explicit pipeline: + +```gleam +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("Example", [ + it("works", fn() { + 1 + 1 + |> should + |> be_equal(2) + |> or_fail_with("math should work") + }), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.max_concurrency(8) + |> runner.default_timeout_ms(10_000) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} +``` + +Runner builder functions: + +| Function | Purpose | Default | +| --------------------------------- | --------------------------------------- | ----------- | +| `runner.new([suite])` | Creates a `RunBuilder(ctx)` | — | +| `runner.max_concurrency(n)` | Controls parallelism (`1` = sequential) | `50` | +| `runner.default_timeout_ms(ms)` | Default timeout for tests | `5000` (5s) | +| `runner.progress_reporter(...)` | Live progress during the run | None | +| `runner.results_reporters([...])` | End-of-run reporters | `[]` | +| `runner.filter_tests(predicate)` | Pre-execution filtering | None | +| `runner.exit_on_failure()` | Exit with code 1 on failure | Disabled | +| `runner.run()` | Execute and return `List(TestResult)` | — | + +### ✅ Result-returning tests + hooks + +**Why this change:** multi-step tests are common (fixtures → setup → assertions). Returning `Result(_, String)` lets tests stay linear, and lets failures carry a human explanation without forcing extra plumbing. + +In v2, **tests return `Result(AssertionResult, String)`** and **hooks return `Result(ctx, String)`**. You can bail out early with `Error("...")`, and the runner will record it as a failure with that message. + +```gleam +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/unit.{describe, it} +import gleam/result + +fn load_config() -> Result(String, String) { + // Setup helpers should return Result(_, String) so result.try stays clean. + Ok("config loaded") +} + +fn connect_db(config: String) -> Result(String, String) { + case config { + "" -> Error("config was empty") + _ -> Ok("db connected") + } +} + +pub fn tests() { + describe("Multi-step setup", [ + it("can short-circuit with a message", fn() { + use config <- result.try(load_config()) + use db_status <- result.try(connect_db(config)) + + db_status + |> should + |> be_equal("db connected") + |> or_fail_with("database should connect") + }), + ]) +} +``` + +### 📣 Reporting: progress + results reporters + +**Why this change:** parallel execution means completion order is not declaration order. Dream Test splits reporting so live progress can react to completion order, while final reports are printed in traversal order (deterministic). + +This is a **full refactor** of reporting in 2.0: + +- During the run, the runner emits structured events (`dream_test/reporters/types.ReporterEvent`) so live UIs can update. +- At the end of the run, the runner hands the **full traversal-ordered results** to one or more **results reporters**. +- The runner owns output routing via `runner.output(...)` / `runner.silent()`. + +The runner emits these events: + +| Event | When | +| ---------------------------------------- | -------------------------------------- | +| `RunStarted(total)` | Run begins | +| `TestFinished(completed, total, result)` | Each test completes (completion order) | +| `HookStarted(...)` / `HookFinished(...)` | Lifecycle hooks run | +| `RunFinished(completed, total, results)` | Run ends (results in traversal order) | + +Built-in reporters: + +| Reporter | Purpose | Configuration | +| ---------------- | -------------------------------- | ---------------------------------------------------- | +| `progress.new()` | Live progress bar during run | — | +| `bdd.new()` | Human-readable BDD report at end | `bdd.color`, `bdd.failures_only`, `bdd.summary_only` | +| `json.new()` | Machine-readable JSON at end | `json.pretty` | + +Notes: + +- The **BDD reporter now also formats Gherkin scenarios** (Feature → Scenario layout) when results include `GherkinScenario(_)`. +- Use **progress + bdd** for local dev; use **json** for CI/tooling. + +### 🧯 Sandbox: optional crash reports (`dream_test/sandbox`) + +**Why this change:** crash reports are useful when debugging locally, but noisy in CI. 2.0 keeps crash isolation while letting you opt into crash logs when you need them. + +- `SandboxConfig(show_crash_reports: False)` suppresses `=CRASH REPORT====` output (default). +- `sandbox.with_crash_reports(config)` enables crash reports for local debugging. + +--- + +## Breaking changes & migration guide + +If you're upgrading from Dream Test 1.x → 2.0, follow these steps in order. Most projects can upgrade in **10–20 minutes**. + +### Removed and renamed APIs + +| 1.x API | 2.0 replacement | +| -------------------------------------------------------------- | --------------------------------------------------------------- | +| `dream_test/assertions/should` | `dream_test/matchers` | +| `dream_test/reporter` | `dream_test/reporters/bdd`, `dream_test/reporters/json`, etc. | +| `run_all(test_cases)` | `runner.new([suite]) \|> runner.run()` | +| `run_suite(suite)` | `runner.new([suite]) \|> runner.run()` | +| `to_test_cases(suite)` | No longer needed; pass suites directly to `runner.new(...)` | +| `report(results, io.print)` | `runner.results_reporters([bdd.new()])` | +| `exit_on_failure(results)` | `runner.exit_on_failure()` (call before `run()`) | +| `RunnerConfig { ... }` | Builder functions: `runner.max_concurrency(...)`, etc. | +| (new) run suites with different concurrency/timeout in one run | `runner.add_suites(...)` + `runner.add_suites_with_config(...)` | +| Test body returns `AssertionResult` | Test body returns `Result(AssertionResult, String)` | +| Hook body returns `ctx` | Hook body returns `Result(ctx, String)` | +| `world.get` returns `Result(a, Nil)` | `world.get` returns `Result(a, String)` | + +### Step 1: bump the dependency + +```toml +[dev-dependencies] +dream_test = "~> 2.0" +``` + +```bash +gleam deps download +``` + +### Step 2: update imports and runner entrypoint + +Replace your 1.x runner with the v2 builder pipeline: + +```gleam +import dream_test/matchers.{be_equal, or_fail_with, should} +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner +import dream_test/unit.{describe, it} + +pub fn tests() { + describe("Example", [ + it("works", fn() { + 1 + 1 + |> should + |> be_equal(2) + |> or_fail_with("math should work") + }), + ]) +} + +pub fn main() { + runner.new([tests()]) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} +``` + +For colored BDD + pretty JSON output: + +```gleam +runner.new([tests()]) +|> runner.progress_reporter(progress.new()) +|> runner.results_reporters([ + bdd.new() |> bdd.color, + json.new() |> json.pretty, +]) +|> runner.exit_on_failure() +|> runner.run() +``` + +### Step 3: update test bodies to return `Result(_, String)` + +- **Most matcher chains already work**: `... |> or_fail_with("...")` returns `Result(AssertionResult, String)`. +- If you used `succeed()` / `fail_with(...)` in a branch, wrap it in `Ok(...)`. +- If setup should abort immediately, return `Error("message")`. + +```gleam +import dream_test/matchers.{be_equal, fail_with, or_fail_with, should, succeed} +import dream_test/unit.{describe, it} +import gleam/result + +pub fn tests() { + describe("Result-returning tests", [ + // 1) Matcher chain (no change needed) + it("matcher chain", fn() { + "hello" + |> should + |> be_equal("hello") + |> or_fail_with("strings should match") + }), + + // 2) Branchy logic: wrap in Ok(...) + it("branchy logic", fn() { + Ok(case True { + True -> succeed() + False -> fail_with("expected True") + }) + }), + + // 3) Multi-step setup: use result.try + it("multi-step", fn() { + use value <- result.try(Ok("setup done")) + value + |> should + |> be_equal("setup done") + |> or_fail_with("setup should complete") + }), + ]) +} +``` + +### Step 4: update hooks to return `Result(ctx, String)` + +Hooks now return `Result(ctx, String)` so they can fail with a message: + +```gleam +import dream_test/matchers.{succeed} +import dream_test/unit.{before_each, describe, it} + +pub fn tests() { + describe("With hooks", [ + before_each(fn() { + // Return Ok(Nil) on success, Error("message") on failure + Ok(Nil) + }), + it("runs after setup", fn() { + Ok(succeed()) + }), + ]) +} +``` + +For context-aware tests (`dream_test/unit_context`): + +```gleam +import dream_test/unit_context.{before_each, describe, it} + +pub fn tests() { + describe( + "With context", + fn() { Ok("initial context") }, // seed returns Result + [ + before_each(fn(ctx) { + // Transform context, return Result(new_ctx, String) + Ok(ctx <> " + setup") + }), + it("has context", fn(ctx) { + ctx + |> should + |> be_equal("initial context + setup") + |> or_fail_with("context should be threaded") + }), + ], + ) +} +``` + +### Step 5 (optional): migrate filtering to `runner.filter_tests` + +Filtering now happens **before execution**, so skipped subtrees don't run hooks. + +```gleam +import dream_test/runner.{type TestInfo} +import gleam/list + +pub fn only_smoke(info: TestInfo) -> Bool { + list.contains(info.tags, "smoke") +} + +pub fn main() { + runner.new([tests()]) + |> runner.filter_tests(only_smoke) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.run() +} +``` + +### Step 6 (optional): use discovery instead of explicit imports + +If you have many test modules, discovery removes the manual import burden: + +```gleam +import dream_test/discover +import dream_test/reporters/bdd +import dream_test/reporters/progress +import dream_test/runner + +pub fn main() { + let suites = + discover.new() + |> discover.from_path("unit/**_test.gleam") + |> discover.to_suites() + + runner.new(suites) + |> runner.progress_reporter(progress.new()) + |> runner.results_reporters([bdd.new()]) + |> runner.exit_on_failure() + |> runner.run() +} +``` + +Notes: + +- `from_path("unit/**_test.gleam")` is a module path glob under `./test/`. +- Discovery loads modules that export `tests/0` returning `TestSuite(Nil)`. +- Requires compiled BEAM modules (run `gleam build` first). + +### Step 7 (if you use Gherkin): update `world.get` calls + +`world.get` now returns `Result(a, String)` instead of `Result(a, Nil)`: + +```gleam +// 1.x +case world.get(context.world, "count") { + Ok(count) -> ... + Error(Nil) -> Error("count not found") // had to add message manually +} + +// 2.0 +case world.get(context.world, "count") { + Ok(count) -> ... + Error(message) -> Error(message) // message is already a String +} +``` + +--- + +## Documentation + +- [Quick Start](../documentation/02-quick-start.md) +- [Runner & Execution](../documentation/07-runner-and-execution.md) +- [Reporters](../documentation/08-reporters.md) +- [Utilities (sandbox)](../documentation/11-utilities.md) +- [CHANGELOG](../CHANGELOG.md) diff --git a/src/dream_test/assertions/should.gleam b/src/dream_test/assertions/should.gleam deleted file mode 100644 index 7794ce5..0000000 --- a/src/dream_test/assertions/should.gleam +++ /dev/null @@ -1,633 +0,0 @@ -//// Assertion API for dream_test. -//// -//// This module provides a fluent, pipe-friendly assertion API. Every assertion -//// chain starts with `should()` and ends with `or_fail_with()`. -//// -//// ## Basic Pattern -//// -//// ```gleam -//// value -//// |> should() -//// |> equal(expected) -//// |> or_fail_with("Helpful error message") -//// ``` -//// -//// ## Available Matchers -//// -//// | Category | Matchers | -//// |----------------|-------------------------------------------------------------| -//// | **Equality** | `equal`, `not_equal` | -//// | **Boolean** | `be_true`, `be_false` | -//// | **Option** | `be_some`, `be_none` | -//// | **Result** | `be_ok`, `be_error` | -//// | **Collections**| `contain`, `not_contain`, `have_length`, `be_empty` | -//// | **Comparison** | `be_greater_than`, `be_less_than`, `be_at_least`, `be_at_most`, `be_between`, `be_in_range` | -//// | **String** | `start_with`, `end_with`, `contain_string` | -//// | **Snapshot** | `match_snapshot`, `match_snapshot_inspect` | -//// -//// ## Chaining Matchers -//// -//// Matchers can be chained. Each matcher passes its unwrapped value to the next: -//// -//// ```gleam -//// // Unwrap Some, then check the inner value -//// Some(42) -//// |> should() -//// |> be_some() -//// |> equal(42) -//// |> or_fail_with("Should be Some(42)") -//// -//// // Unwrap Ok, then check the inner value -//// Ok("hello") -//// |> should() -//// |> be_ok() -//// |> equal("hello") -//// |> or_fail_with("Should be Ok with 'hello'") -//// -//// // Unwrap Ok, then check the inner Option -//// Ok(Some(42)) -//// |> should() -//// |> be_ok() -//// |> be_some() -//// |> be_greater_than(40) -//// |> or_fail_with("Should be Ok(Some(n)) where n > 40") -//// ``` -//// -//// ## Explicit Failures -//// -//// Sometimes you need to fail a test explicitly in a conditional branch: -//// -//// ```gleam -//// case result { -//// Ok(user) -> { -//// user.name -//// |> should() -//// |> equal("Alice") -//// |> or_fail_with("User should be Alice") -//// } -//// Error(_) -> fail_with("Should have returned a user") -//// } -//// ``` -//// -//// ## Import Style -//// -//// For best readability, import the commonly used functions unqualified: -//// -//// ```gleam -//// import dream_test/assertions/should.{ -//// should, equal, be_ok, be_some, or_fail_with, fail_with, succeed, -//// } -//// ``` - -import dream_test/matchers/boolean -import dream_test/matchers/collection -import dream_test/matchers/comparison -import dream_test/matchers/equality -import dream_test/matchers/option -import dream_test/matchers/result -import dream_test/matchers/snapshot -import dream_test/matchers/string -import dream_test/types.{ - type AssertionResult, type MatchResult, AssertionFailed, AssertionFailure, - AssertionOk, MatchFailed, MatchOk, -} -import gleam/option as gleam_option - -/// Start an assertion chain. -/// -/// This wraps any value in a `MatchResult` so it can be piped into matchers. -/// Every assertion chain should start with this function. -/// -/// ## Example -/// -/// ```gleam -/// 42 -/// |> should() -/// |> equal(42) -/// |> or_fail_with("Should be 42") -/// ``` -/// -pub fn should(value: a) -> MatchResult(a) { - MatchOk(value) -} - -// ============================================================================= -// Equality Matchers -// ============================================================================= - -/// Assert that a value equals the expected value. -/// -/// Uses Gleam's structural equality (`==`). -/// -/// ## Example -/// -/// ```gleam -/// add(2, 3) -/// |> should() -/// |> equal(5) -/// |> or_fail_with("2 + 3 should equal 5") -/// ``` -/// -pub const equal = equality.equal - -/// Assert that a value does not equal the unexpected value. -/// -/// ## Example -/// -/// ```gleam -/// divide(10, 3) -/// |> should() -/// |> not_equal(3) -/// |> or_fail_with("10/3 should not equal 3 exactly") -/// ``` -/// -pub const not_equal = equality.not_equal - -// ============================================================================= -// Boolean Matchers -// ============================================================================= - -/// Assert that a value is `True`. -/// -/// ## Example -/// -/// ```gleam -/// is_valid(input) -/// |> should() -/// |> be_true() -/// |> or_fail_with("Input should be valid") -/// ``` -/// -pub const be_true = boolean.be_true - -/// Assert that a value is `False`. -/// -/// ## Example -/// -/// ```gleam -/// is_empty(list) -/// |> should() -/// |> be_false() -/// |> or_fail_with("List should not be empty") -/// ``` -/// -pub const be_false = boolean.be_false - -// ============================================================================= -// Option Matchers -// ============================================================================= - -/// Assert that an `Option` is `Some` and extract its value. -/// -/// If the assertion passes, the inner value is passed to subsequent matchers. -/// This enables chaining like `be_some() |> equal(42)`. -/// -/// ## Example -/// -/// ```gleam -/// find_user(id) -/// |> should() -/// |> be_some() -/// |> or_fail_with("User should exist") -/// -/// // With chaining: -/// find_user(id) -/// |> should() -/// |> be_some() -/// |> equal(expected_user) -/// |> or_fail_with("Should find the expected user") -/// ``` -/// -pub const be_some = option.be_some - -/// Assert that an `Option` is `None`. -/// -/// ## Example -/// -/// ```gleam -/// find_deleted_user(id) -/// |> should() -/// |> be_none() -/// |> or_fail_with("Deleted user should not exist") -/// ``` -/// -pub const be_none = option.be_none - -// ============================================================================= -// Result Matchers -// ============================================================================= - -/// Assert that a `Result` is `Ok` and extract its value. -/// -/// If the assertion passes, the `Ok` value is passed to subsequent matchers. -/// This enables chaining like `be_ok() |> equal(42)`. -/// -/// ## Example -/// -/// ```gleam -/// parse_int("42") -/// |> should() -/// |> be_ok() -/// |> or_fail_with("Should parse successfully") -/// -/// // With chaining: -/// parse_int("42") -/// |> should() -/// |> be_ok() -/// |> equal(42) -/// |> or_fail_with("Should parse to 42") -/// ``` -/// -pub const be_ok = result.be_ok - -/// Assert that a `Result` is `Error` and extract the error value. -/// -/// If the assertion passes, the error value is passed to subsequent matchers. -/// -/// ## Example -/// -/// ```gleam -/// parse_int("not a number") -/// |> should() -/// |> be_error() -/// |> or_fail_with("Should fail to parse") -/// -/// // With chaining: -/// validate(input) -/// |> should() -/// |> be_error() -/// |> equal(ValidationError("email required")) -/// |> or_fail_with("Should fail with email error") -/// ``` -/// -pub const be_error = result.be_error - -// ============================================================================= -// Collection Matchers -// ============================================================================= - -/// Assert that a list contains a specific item. -/// -/// ## Example -/// -/// ```gleam -/// [1, 2, 3] -/// |> should() -/// |> contain(2) -/// |> or_fail_with("List should contain 2") -/// ``` -/// -pub const contain = collection.contain - -/// Assert that a list does not contain a specific item. -/// -/// ## Example -/// -/// ```gleam -/// ["a", "b", "c"] -/// |> should() -/// |> not_contain("d") -/// |> or_fail_with("List should not contain 'd'") -/// ``` -/// -pub const not_contain = collection.not_contain - -/// Assert that a list has a specific length. -/// -/// ## Example -/// -/// ```gleam -/// get_users() -/// |> should() -/// |> have_length(3) -/// |> or_fail_with("Should have 3 users") -/// ``` -/// -pub const have_length = collection.have_length - -/// Assert that a list is empty. -/// -/// ## Example -/// -/// ```gleam -/// get_errors() -/// |> should() -/// |> be_empty() -/// |> or_fail_with("Should have no errors") -/// ``` -/// -pub const be_empty = collection.be_empty - -// ============================================================================= -// Comparison Matchers (Int) -// ============================================================================= - -/// Assert that an integer is greater than a threshold. -/// -/// ## Example -/// -/// ```gleam -/// count_items() -/// |> should() -/// |> be_greater_than(0) -/// |> or_fail_with("Should have at least one item") -/// ``` -/// -pub const be_greater_than = comparison.be_greater_than - -/// Assert that an integer is less than a threshold. -/// -/// ## Example -/// -/// ```gleam -/// response_time_ms -/// |> should() -/// |> be_less_than(100) -/// |> or_fail_with("Response should be under 100ms") -/// ``` -/// -pub const be_less_than = comparison.be_less_than - -/// Assert that an integer is at least a minimum value (>=). -/// -/// ## Example -/// -/// ```gleam -/// user.age -/// |> should() -/// |> be_at_least(18) -/// |> or_fail_with("User must be at least 18") -/// ``` -/// -pub const be_at_least = comparison.be_at_least - -/// Assert that an integer is at most a maximum value (<=). -/// -/// ## Example -/// -/// ```gleam -/// password.length -/// |> should() -/// |> be_at_most(128) -/// |> or_fail_with("Password must be at most 128 characters") -/// ``` -/// -pub const be_at_most = comparison.be_at_most - -/// Assert that an integer is between two values (exclusive). -/// -/// The value must be strictly greater than `min` and strictly less than `max`. -/// -/// ## Example -/// -/// ```gleam -/// port -/// |> should() -/// |> be_between(1024, 65535) -/// |> or_fail_with("Port must be between 1024 and 65535") -/// ``` -/// -pub const be_between = comparison.be_between - -/// Assert that an integer is within a range (inclusive). -/// -/// The value must be >= `min` and <= `max`. -/// -/// ## Example -/// -/// ```gleam -/// score -/// |> should() -/// |> be_in_range(0, 100) -/// |> or_fail_with("Score must be 0-100") -/// ``` -/// -pub const be_in_range = comparison.be_in_range - -/// Assert that a float is greater than a threshold. -/// -/// ## Example -/// -/// ```gleam -/// average -/// |> should() -/// |> be_greater_than_float(0.0) -/// |> or_fail_with("Average should be positive") -/// ``` -/// -pub const be_greater_than_float = comparison.be_greater_than_float - -/// Assert that a float is less than a threshold. -/// -/// ## Example -/// -/// ```gleam -/// error_rate -/// |> should() -/// |> be_less_than_float(0.01) -/// |> or_fail_with("Error rate should be under 1%") -/// ``` -/// -pub const be_less_than_float = comparison.be_less_than_float - -// ============================================================================= -// String Matchers -// ============================================================================= - -/// Assert that a string starts with a prefix. -/// -/// ## Example -/// -/// ```gleam -/// greeting -/// |> should() -/// |> start_with("Hello") -/// |> or_fail_with("Greeting should start with Hello") -/// ``` -/// -pub const start_with = string.start_with - -/// Assert that a string ends with a suffix. -/// -/// ## Example -/// -/// ```gleam -/// filename -/// |> should() -/// |> end_with(".gleam") -/// |> or_fail_with("File should be a Gleam file") -/// ``` -/// -pub const end_with = string.end_with - -/// Assert that a string contains a substring. -/// -/// ## Example -/// -/// ```gleam -/// log_message -/// |> should() -/// |> contain_string("error") -/// |> or_fail_with("Log should mention error") -/// ``` -/// -pub const contain_string = string.contain_string - -// ============================================================================= -// Snapshot Matchers -// ============================================================================= - -/// Assert that a string matches the content of a snapshot file. -/// -/// - If snapshot **doesn't exist**: creates it and passes -/// - If snapshot **exists and matches**: passes -/// - If snapshot **exists but doesn't match**: fails -/// -/// **To update a snapshot:** delete the file and re-run the test. -/// -/// ## Example -/// -/// ```gleam -/// render_html() -/// |> should() -/// |> match_snapshot("./test/snapshots/page.snap") -/// |> or_fail_with("HTML should match snapshot") -/// ``` -/// -pub const match_snapshot = snapshot.match_snapshot - -/// Assert that any value matches a snapshot (using string.inspect). -/// -/// Serializes the value using `string.inspect` and compares against -/// the stored snapshot. Useful for testing complex data structures. -/// -/// ## Example -/// -/// ```gleam -/// build_config() -/// |> should() -/// |> match_snapshot_inspect("./test/snapshots/config.snap") -/// |> or_fail_with("Config should match snapshot") -/// ``` -/// -pub const match_snapshot_inspect = snapshot.match_snapshot_inspect - -/// Delete a snapshot file. -/// -/// Use this to force regeneration of a snapshot on the next test run. -/// -/// ## Example -/// -/// ```gleam -/// let _ = clear_snapshot("./test/snapshots/old.snap") -/// ``` -/// -pub const clear_snapshot = snapshot.clear_snapshot - -/// Delete all snapshot files in a directory. -/// -/// Deletes all files with the `.snap` extension in the given directory. -/// -/// ## Example -/// -/// ```gleam -/// let _ = clear_snapshots_in_directory("./test/snapshots") -/// ``` -/// -pub const clear_snapshots_in_directory = snapshot.clear_snapshots_in_directory - -// ============================================================================= -// Terminal Operations -// ============================================================================= - -/// Complete an assertion chain and provide a failure message. -/// -/// This is the **terminal operation** that ends every assertion chain. It -/// converts the `MatchResult` into an `AssertionResult` that the test runner -/// understands. -/// -/// If the assertion passed, returns `AssertionOk`. If it failed, returns -/// `AssertionFailed` with the provided message. -/// -/// ## Example -/// -/// ```gleam -/// result -/// |> should() -/// |> equal(42) -/// |> or_fail_with("Result should be 42") -/// ``` -/// -/// ## Writing Good Messages -/// -/// Good failure messages explain **what should have happened**: -/// - ✓ "User should be authenticated after login" -/// - ✓ "Cart total should include tax" -/// - ✗ "wrong" -/// - ✗ "failed" -/// -pub fn or_fail_with(result: MatchResult(a), message: String) -> AssertionResult { - case result { - MatchOk(_) -> AssertionOk - - MatchFailed(failure) -> - AssertionFailed(AssertionFailure(..failure, message: message)) - } -} - -/// Explicitly fail a test with a message. -/// -/// Use this when you need to fail a test in a conditional branch where -/// the normal assertion chain doesn't apply. -/// -/// ## Example -/// -/// ```gleam -/// case result { -/// Ok(value) -> { -/// value -/// |> should() -/// |> equal(expected) -/// |> or_fail_with("Value should match") -/// } -/// Error(_) -> fail_with("Should have succeeded but got an error") -/// } -/// ``` -/// -/// ## When to Use -/// -/// - In `case` branches that represent unexpected states -/// - When testing that something does NOT happen -/// - As a placeholder for unimplemented test branches -/// -pub fn fail_with(message: String) -> AssertionResult { - AssertionFailed(AssertionFailure( - operator: "fail_with", - message: message, - payload: gleam_option.None, - )) -} - -/// Explicitly mark an assertion as successful. -/// -/// Use this when you need to explicitly succeed in a conditional branch, -/// as the counterpart to `fail_with`. -/// -/// ## Example -/// -/// ```gleam -/// case result { -/// Ok(_) -> succeed() -/// Error(_) -> fail_with("Should have succeeded") -/// } -/// ``` -/// -/// ## When to Use -/// -/// - In `case` branches where success is the expected outcome -/// - When all branches of a case must return an `AssertionResult` -/// - To make intent explicit rather than relying on implicit success -/// -pub fn succeed() -> AssertionResult { - AssertionOk -} diff --git a/src/dream_test/context.gleam b/src/dream_test/context.gleam index 0860b52..d508bd9 100644 --- a/src/dream_test/context.gleam +++ b/src/dream_test/context.gleam @@ -1,24 +1,227 @@ +//// Per-test context. +//// +//// This module provides a small record (`TestContext`) for storing +//// `types.AssertionFailure` values. Most test code won’t interact with it +//// directly—matchers already turn failures into `types.AssertionResult`. +//// +//// You may find it useful if you’re building custom integrations where you want +//// to accumulate multiple failures during a single test run. + import dream_test/types.{type AssertionFailure} /// Per-test context carrying assertion failures and any other /// per-test metadata we may need later. /// -/// This is the core state threaded through assertions. +/// Most users do not need this type. Dream Test’s public matcher pipeline +/// (`should |> ...`) carries failures via `types.MatchResult`, and the runner +/// reports failures via `types.TestResult`. +/// +/// `TestContext` exists as a small, explicit record for internal bookkeeping +/// and future extension (e.g. if the framework needs to accumulate multiple +/// failures during a single test run). +/// +/// ## Example +/// +/// ```gleam +/// import dream_test/context +/// import dream_test/matchers.{be_equal, or_fail_with, should} +/// import dream_test/types.{AssertionFailure} +/// import dream_test/unit.{describe, it} +/// import gleam/option.{None} +/// +/// pub fn tests() { +/// describe("dream_test/context", [ +/// it("new has no failures", fn() { +/// context.new() +/// |> context.failures() +/// |> should +/// |> be_equal([]) +/// |> or_fail_with("expected new context to have no failures") +/// }), +/// +/// it("add_failure stores failures newest-first", fn() { +/// let first_failure = +/// AssertionFailure(operator: "op1", message: "m1", payload: None) +/// let second_failure = +/// AssertionFailure(operator: "op2", message: "m2", payload: None) +/// +/// context.new() +/// |> context.add_failure(first_failure) +/// |> context.add_failure(second_failure) +/// |> context.failures() +/// |> should +/// |> be_equal([second_failure, first_failure]) +/// |> or_fail_with("expected newest-first failure ordering") +/// }), +/// ]) +/// } +/// ``` +/// +/// ## Fields +/// +/// - `failures`: stored newest-first pub type TestContext { TestContext(failures: List(AssertionFailure)) } +/// Create a new, empty `TestContext`. +/// +/// ## Returns +/// +/// A `TestContext` with no recorded failures. +/// +/// ## Parameters +/// +/// None. +/// +/// ## Example +/// +/// ```gleam +/// import dream_test/context +/// import dream_test/matchers.{be_equal, or_fail_with, should} +/// import dream_test/types.{AssertionFailure} +/// import dream_test/unit.{describe, it} +/// import gleam/option.{None} +/// +/// pub fn tests() { +/// describe("dream_test/context", [ +/// it("new has no failures", fn() { +/// context.new() +/// |> context.failures() +/// |> should +/// |> be_equal([]) +/// |> or_fail_with("expected new context to have no failures") +/// }), +/// +/// it("add_failure stores failures newest-first", fn() { +/// let first_failure = +/// AssertionFailure(operator: "op1", message: "m1", payload: None) +/// let second_failure = +/// AssertionFailure(operator: "op2", message: "m2", payload: None) +/// +/// context.new() +/// |> context.add_failure(first_failure) +/// |> context.add_failure(second_failure) +/// |> context.failures() +/// |> should +/// |> be_equal([second_failure, first_failure]) +/// |> or_fail_with("expected newest-first failure ordering") +/// }), +/// ]) +/// } +/// ``` pub fn new() -> TestContext { TestContext(failures: []) } -pub fn failures(context: TestContext) -> List(AssertionFailure) { +/// Get all failures recorded in a `TestContext`. +/// +/// Failures are stored newest-first. +/// +/// ## Parameters +/// +/// - `context`: the `TestContext` to inspect +/// +/// ## Example +/// +/// ```gleam +/// import dream_test/context +/// import dream_test/matchers.{be_equal, or_fail_with, should} +/// import dream_test/types.{AssertionFailure} +/// import dream_test/unit.{describe, it} +/// import gleam/option.{None} +/// +/// pub fn tests() { +/// describe("dream_test/context", [ +/// it("new has no failures", fn() { +/// context.new() +/// |> context.failures() +/// |> should +/// |> be_equal([]) +/// |> or_fail_with("expected new context to have no failures") +/// }), +/// +/// it("add_failure stores failures newest-first", fn() { +/// let first_failure = +/// AssertionFailure(operator: "op1", message: "m1", payload: None) +/// let second_failure = +/// AssertionFailure(operator: "op2", message: "m2", payload: None) +/// +/// context.new() +/// |> context.add_failure(first_failure) +/// |> context.add_failure(second_failure) +/// |> context.failures() +/// |> should +/// |> be_equal([second_failure, first_failure]) +/// |> or_fail_with("expected newest-first failure ordering") +/// }), +/// ]) +/// } +/// ``` +/// +/// ## Returns +/// +/// A list of `AssertionFailure` values (newest-first). +pub fn failures(context context: TestContext) -> List(AssertionFailure) { context.failures } +/// Record an `AssertionFailure` in a `TestContext`. +/// +/// Dream Test represents assertion failures as structured values +/// (`types.AssertionFailure`). This helper lets internal code accumulate those +/// failures while a test runs. +/// +/// Failures are stored **newest-first**, so adding a failure is \(O(1)\). +/// +/// ## Parameters +/// +/// - `context`: the current `TestContext` +/// - `failure`: the failure to record +/// +/// ## Returns +/// +/// A new `TestContext` containing the added failure. +/// +/// ## Example +/// +/// ```gleam +/// import dream_test/context +/// import dream_test/matchers.{be_equal, or_fail_with, should} +/// import dream_test/types.{AssertionFailure} +/// import dream_test/unit.{describe, it} +/// import gleam/option.{None} +/// +/// pub fn tests() { +/// describe("dream_test/context", [ +/// it("new has no failures", fn() { +/// context.new() +/// |> context.failures() +/// |> should +/// |> be_equal([]) +/// |> or_fail_with("expected new context to have no failures") +/// }), +/// +/// it("add_failure stores failures newest-first", fn() { +/// let first_failure = +/// AssertionFailure(operator: "op1", message: "m1", payload: None) +/// let second_failure = +/// AssertionFailure(operator: "op2", message: "m2", payload: None) +/// +/// context.new() +/// |> context.add_failure(first_failure) +/// |> context.add_failure(second_failure) +/// |> context.failures() +/// |> should +/// |> be_equal([second_failure, first_failure]) +/// |> or_fail_with("expected newest-first failure ordering") +/// }), +/// ]) +/// } +/// ``` pub fn add_failure( - context: TestContext, - failure: AssertionFailure, + context context: TestContext, + failure failure: AssertionFailure, ) -> TestContext { TestContext(failures: [failure, ..context.failures]) } diff --git a/src/dream_test/discover.gleam b/src/dream_test/discover.gleam new file mode 100644 index 0000000..ea33ca6 --- /dev/null +++ b/src/dream_test/discover.gleam @@ -0,0 +1,580 @@ +//// Test module discovery for Dream Test. +//// +//// This module provides an ergonomic way to discover test modules at runtime +//// (compiled `.beam` modules) and load their `tests/0` suites without having to +//// manually import every module. +//// +//// ## Mental model +//// +//// - You provide one or more **module path globs** (e.g. `"unit/**_test.gleam"`). +//// - Dream Test finds matching modules under `./test/` that export `tests/0`. +//// - It calls `tests/0` to get `TestSuite(Nil)` values. +//// +//// ## Example +//// +//// ```gleam +//// import dream_test/discover.{from_path, to_suites} +//// import dream_test/reporters/bdd +//// import dream_test/reporters/progress +//// import dream_test/runner.{exit_on_failure, progress_reporter, results_reporters, run} +//// +//// pub fn main() { +//// let suites = +//// discover.new() +//// |> from_path("snippets/unit/**.gleam") +//// |> to_suites() +//// +//// runner.new(suites) +//// |> progress_reporter(progress.new()) +//// |> results_reporters([bdd.new()]) +//// |> exit_on_failure() +//// |> run() +//// } +//// ``` +//// +//// Note: discovery requires compiled BEAM modules. + +import dream_test/types.{ + type AssertionResult, type Node, type TestSuite, AssertionFailed, + AssertionFailure, Group, Root, Test, Unit, +} +import gleam/list +import gleam/option.{None} +import gleam/string + +// ============================================================================ +// Types +// ============================================================================ + +/// Builder for discovering test modules and loading their suites. +/// +/// Discovery is built incrementally by adding one or more module-path glob +/// patterns (see `from_path` / `tests`). +/// +/// `TestDiscovery` is **opaque**: you can only build/consume it through the +/// functions in this module. +/// +/// ## Example +/// +/// ```gleam +/// discover.tests("snippets/unit/**.gleam") +/// |> discover.load() +/// ``` +pub opaque type TestDiscovery { + TestDiscovery(patterns: List(String)) +} + +/// Result of loading suites, containing both successes and errors. +/// +/// This is returned by `load` so callers can decide how to handle errors: +/// return them, log them, or convert them into failing suites via `to_suites`. +/// +/// ## Fields +/// +/// - `suites`: successfully loaded suites +/// - `errors`: discovery or load errors (human-readable strings) +/// +/// ## Example +/// +/// ```gleam +/// let discover.LoadResult(suites: suites, errors: _errors) = +/// discover.tests("snippets/**.gleam") +/// |> discover.load() +/// ``` +pub type LoadResult { + LoadResult(suites: List(TestSuite(Nil)), errors: List(String)) +} + +// ============================================================================ +// Builder API +// ============================================================================ + +/// Create an empty discovery builder. +/// +/// Most users will start with `tests(pattern)` instead. +/// +/// ## Returns +/// +/// A new empty `TestDiscovery`. +/// +/// ## Example +/// +/// ```gleam +/// import dream_test/discover.{from_path, to_suites} +/// import dream_test/reporters/bdd +/// import dream_test/reporters/progress +/// import dream_test/runner.{exit_on_failure, progress_reporter, results_reporters, run} +/// import gleam/io +/// +/// pub fn main() { +/// let suites = +/// discover.new() +/// |> from_path("snippets/unit/**.gleam") +/// |> to_suites() +/// +/// runner.new(suites) +/// |> progress_reporter(progress.new()) +/// |> results_reporters([bdd.new()]) +/// |> exit_on_failure() +/// |> run() +/// } +/// ``` +pub fn new() -> TestDiscovery { + TestDiscovery(patterns: []) +} + +/// Add a glob pattern to the discovery set. +/// +/// You can call this multiple times to build up a list of globs. +/// +/// ## Parameters +/// +/// - `discovery`: The current discovery builder +/// - `pattern`: A slash-separated module path glob (the `.gleam` extension is optional) +/// +/// ## Returns +/// +/// A new `TestDiscovery` with the pattern appended. +/// +/// ## Example +/// +/// ```gleam +/// import dream_test/discover.{from_path, to_suites} +/// import dream_test/reporters/bdd +/// import dream_test/reporters/progress +/// import dream_test/runner.{exit_on_failure, progress_reporter, results_reporters, run} +/// import gleam/io +/// +/// pub fn main() { +/// let suites = +/// discover.new() +/// |> from_path("snippets/unit/**.gleam") +/// |> to_suites() +/// +/// runner.new(suites) +/// |> progress_reporter(progress.new()) +/// |> results_reporters([bdd.new()]) +/// |> exit_on_failure() +/// |> run() +/// } +/// ``` +pub fn from_path( + discovery discovery: TestDiscovery, + pattern pattern: String, +) -> TestDiscovery { + TestDiscovery(patterns: list.append(discovery.patterns, [pattern])) +} + +/// Start discovering tests matching a module path glob pattern. +/// +/// The pattern is written using slash-separated module paths and may include +/// `*` / `**` globs. The `.gleam` extension is optional. +/// +/// Examples: +/// - `"unit/**_test.gleam"` +/// - `"unit/errors/**_test.gleam"` +/// - `"dream_test/**_test.gleam"` +/// +/// ## Parameters +/// +/// - `pattern`: A slash-separated module path glob (the `.gleam` extension is optional) +/// +/// ## Returns +/// +/// A new `TestDiscovery` initialized with the pattern. +/// +/// ## Example +/// +/// ```gleam +/// import dream_test/discover +/// import dream_test/reporters/bdd +/// import dream_test/reporters/progress +/// import dream_test/runner.{exit_on_failure, progress_reporter, results_reporters, run} +/// import gleam/io +/// +/// pub fn main() { +/// let suites = +/// discover.tests("snippets/unit/**.gleam") +/// |> discover.to_suites() +/// +/// runner.new(suites) +/// |> progress_reporter(progress.new()) +/// |> results_reporters([bdd.new()]) +/// |> exit_on_failure() +/// |> run() +/// } +/// ``` +pub fn tests(pattern pattern: String) -> TestDiscovery { + new() |> from_path(pattern) +} + +/// List module names discovered for the configured pattern. +/// +/// This returns the discovered module names (as strings) or an aggregated error +/// message if discovery failed. +/// +/// ## Parameters +/// +/// - `discovery`: The configured discovery builder +/// +/// ## Returns +/// +/// - `Ok(modules)`: A list of discovered module names +/// - `Error(message)`: A human-readable error message (may contain multiple causes) +/// +/// ## Example +/// +/// ```gleam +/// import dream_test/discover +/// +/// pub fn main() { +/// discover.tests("snippets/unit/**.gleam") +/// |> discover.list_modules() +/// } +/// ``` +pub fn list_modules( + discovery discovery: TestDiscovery, +) -> Result(List(String), String) { + let #(modules, errors) = discover_all_modules(discovery.patterns) + case errors { + [] -> Ok(modules) + _ -> Error(string.join(errors, "; ")) + } +} + +/// Load discovered suites and return both suites and errors. +/// +/// This never panics; discovery errors are returned in `LoadResult.errors`. +/// +/// ## Parameters +/// +/// - `discovery`: The configured discovery builder +/// +/// ## Returns +/// +/// A `LoadResult` with: +/// - `suites`: successfully loaded `TestSuite(Nil)` values +/// - `errors`: discovery or load errors (as strings) +/// +/// ## Example +/// +/// ```gleam +/// import dream_test/discover +/// +/// pub fn main() { +/// discover.tests("snippets/unit/**.gleam") +/// |> discover.load() +/// } +/// ``` +pub fn load(discovery discovery: TestDiscovery) -> LoadResult { + let #(module_names, discover_errors) = + discover_all_modules(discovery.patterns) + let LoadResult(suites: suites, errors: load_errors) = + load_suites_from_modules(module_names, [], []) + + LoadResult(suites: suites, errors: list.append(discover_errors, load_errors)) +} + +/// Load discovered suites and return them as a list. +/// +/// Any discovery/load errors are converted into failing unit tests tagged with +/// `"discovery-error"`, so missing coverage is visible. +/// +/// ## Parameters +/// +/// - `discovery`: The configured discovery builder +/// +/// ## Returns +/// +/// A list of suites. If any errors occurred, an additional failing suite tagged +/// `"discovery-error"` is appended. +/// +/// ## Example +/// +/// ```gleam +/// import dream_test/discover.{from_path, to_suites} +/// import dream_test/reporters/bdd +/// import dream_test/reporters/progress +/// import dream_test/runner.{exit_on_failure, progress_reporter, results_reporters, run} +/// import gleam/io +/// +/// pub fn main() { +/// let suites = +/// discover.new() +/// |> from_path("snippets/unit/**.gleam") +/// |> to_suites() +/// +/// runner.new(suites) +/// |> progress_reporter(progress.new()) +/// |> results_reporters([bdd.new()]) +/// |> exit_on_failure() +/// |> run() +/// } +/// ``` +pub fn to_suites(discovery discovery: TestDiscovery) -> List(TestSuite(Nil)) { + let LoadResult(suites: suites, errors: errors) = load(discovery) + + case list.is_empty(errors) { + True -> suites + False -> list.append(suites, [errors_suite(errors)]) + } +} + +/// Build a single suite from discovered suites. +/// +/// Any discovery/load errors are converted into failing unit tests tagged with +/// `"discovery-error"`. +/// +/// ## Parameters +/// +/// - `discovery`: The configured discovery builder +/// - `suite_name`: Name to use for the outer group in the combined suite +/// +/// ## Returns +/// +/// A single `TestSuite(Nil)` containing: +/// - all discovered suites, and +/// - any errors as failing tests tagged `"discovery-error"`. +/// +/// ## Example +/// +/// ```gleam +/// import dream_test/discover +/// import dream_test/reporters/bdd +/// import dream_test/reporters/progress +/// import dream_test/runner.{exit_on_failure, progress_reporter, results_reporters, run} +/// import gleam/io +/// +/// pub fn main() { +/// let suite = +/// discover.tests("snippets/unit/**.gleam") +/// |> discover.to_suite("discovered tests") +/// +/// runner.new([suite]) +/// |> progress_reporter(progress.new()) +/// |> results_reporters([bdd.new()]) +/// |> exit_on_failure() +/// |> run() +/// } +/// ``` +pub fn to_suite( + discovery discovery: TestDiscovery, + suite_name suite_name: String, +) -> TestSuite(Nil) { + let LoadResult(suites: suites, errors: errors) = load(discovery) + + let suite_nodes = suites_to_nodes(suites, []) + + let error_nodes = errors_to_nodes(errors, []) + + Root( + seed: Nil, + tree: Group( + name: suite_name, + tags: [], + children: list.append(suite_nodes, error_nodes), + ), + ) +} + +// ============================================================================ +// Internal helpers (no anonymous fns) +// ============================================================================ + +fn to_beam_glob(pattern: String) -> String { + // Convert a module-path glob to a beam filename glob: + // - "/" -> "@" + // - "**" -> "*" (module names are flat strings with "@" separators) + // - ".gleam" -> ".beam" (if present) + // - otherwise append ".beam" + let normalized = pattern |> string.trim() |> string.replace("/", "@") + let flattened = normalized |> string.replace("**", "*") + + case string.ends_with(flattened, ".gleam") { + True -> replace_gleam_extension(flattened) + False -> ensure_beam_extension(flattened) + } +} + +fn replace_gleam_extension(path: String) -> String { + path |> string.replace(".gleam", ".beam") +} + +fn ensure_beam_extension(path: String) -> String { + case string.ends_with(path, ".beam") { + True -> path + False -> path <> ".beam" + } +} + +fn load_suites_from_modules( + module_names: List(String), + suites_rev: List(TestSuite(Nil)), + errors_rev: List(String), +) -> LoadResult { + case module_names { + [] -> + LoadResult( + suites: list.reverse(suites_rev), + errors: list.reverse(errors_rev), + ) + + [module_name, ..rest] -> + load_suites_from_modules_next(module_name, rest, suites_rev, errors_rev) + } +} + +fn load_suites_from_modules_next( + module_name: String, + rest: List(String), + suites_rev: List(TestSuite(Nil)), + errors_rev: List(String), +) -> LoadResult { + case call_tests(module_name) { + Ok(suite) -> + load_suites_from_modules(rest, [suite, ..suites_rev], errors_rev) + Error(message) -> + load_suites_from_modules(rest, suites_rev, [ + format_load_error(module_name, message), + ..errors_rev + ]) + } +} + +fn format_load_error(module_name: String, message: String) -> String { + module_name <> ": " <> message +} + +fn format_discover_error(pattern: String, message: String) -> String { + pattern <> ": " <> message +} + +fn discover_all_modules(patterns: List(String)) -> #(List(String), List(String)) { + discover_all_modules_loop(patterns, [], [], []) +} + +fn discover_all_modules_loop( + patterns: List(String), + seen: List(String), + acc_rev: List(String), + errors_rev: List(String), +) -> #(List(String), List(String)) { + case patterns { + [] -> #(list.reverse(acc_rev), list.reverse(errors_rev)) + [pattern, ..rest] -> + discover_all_modules_loop_next(pattern, rest, seen, acc_rev, errors_rev) + } +} + +fn discover_all_modules_loop_next( + pattern: String, + rest: List(String), + seen: List(String), + acc_rev: List(String), + errors_rev: List(String), +) -> #(List(String), List(String)) { + let beam_glob = to_beam_glob(pattern) + case discover_test_modules(beam_glob) { + Ok(mods) -> { + let #(seen2, acc2) = add_unique_modules(mods, seen, acc_rev) + discover_all_modules_loop(rest, seen2, acc2, errors_rev) + } + Error(message) -> + discover_all_modules_loop(rest, seen, acc_rev, [ + format_discover_error(pattern, message), + ..errors_rev + ]) + } +} + +fn add_unique_modules( + modules: List(String), + seen: List(String), + acc_rev: List(String), +) -> #(List(String), List(String)) { + case modules { + [] -> #(seen, acc_rev) + [m, ..rest] -> add_unique_modules_next(m, rest, seen, acc_rev) + } +} + +fn add_unique_modules_next( + module_name: String, + rest: List(String), + seen: List(String), + acc_rev: List(String), +) -> #(List(String), List(String)) { + case list.contains(seen, module_name) { + True -> add_unique_modules(rest, seen, acc_rev) + False -> + add_unique_modules(rest, [module_name, ..seen], [module_name, ..acc_rev]) + } +} + +fn suites_to_nodes( + suites: List(TestSuite(Nil)), + acc_rev: List(Node(Nil)), +) -> List(Node(Nil)) { + case suites { + [] -> list.reverse(acc_rev) + [suite, ..rest] -> suites_to_nodes(rest, [root_to_group(suite), ..acc_rev]) + } +} + +fn errors_to_nodes( + errors: List(String), + acc_rev: List(Node(Nil)), +) -> List(Node(Nil)) { + case errors { + [] -> list.reverse(acc_rev) + [error, ..rest] -> errors_to_nodes(rest, [error_to_node(error), ..acc_rev]) + } +} + +fn errors_suite(errors: List(String)) -> TestSuite(Nil) { + Root( + seed: Nil, + tree: Group( + name: "Discovery Errors", + tags: ["discovery-error"], + children: errors_to_nodes(errors, []), + ), + ) +} + +fn root_to_group(suite: TestSuite(Nil)) -> Node(Nil) { + let Root(_seed, tree) = suite + tree +} + +fn error_to_node(error: String) -> Node(Nil) { + Test( + name: "Discovery Error: " <> error, + tags: ["discovery-error"], + kind: Unit, + run: discovery_error_run, + timeout_ms: None, + ) +} + +fn discovery_error_run(_nil: Nil) -> Result(AssertionResult, String) { + Ok(discovery_error_assertion()) +} + +fn discovery_error_assertion() -> AssertionResult { + AssertionFailed(AssertionFailure( + operator: "discover", + message: "Failed to discover/load test modules (see test name for details)", + payload: None, + )) +} + +// ============================================================================ +// FFI +// ============================================================================ + +@external(erlang, "dream_test_test_discovery_ffi", "discover_test_modules") +fn discover_test_modules(beam_glob: String) -> Result(List(String), String) + +@external(erlang, "dream_test_test_discovery_ffi", "call_tests") +fn call_tests(module_name: String) -> Result(TestSuite(Nil), String) diff --git a/src/dream_test/file.gleam b/src/dream_test/file.gleam index 3b7eead..11b839a 100644 --- a/src/dream_test/file.gleam +++ b/src/dream_test/file.gleam @@ -1,61 +1,25 @@ //// File operations for dream_test. //// -//// This module provides file I/O operations for internal use by dream_test, -//// particularly for snapshot testing and Gherkin file parsing. It wraps -//// Erlang's file operations with proper error handling. +//// Dream Test uses this module internally for snapshot testing and Gherkin file +//// parsing. It wraps Erlang file operations with a small, structured error type +//// (`FileError`) so callers can decide how to handle failures. //// -//// ## Error Handling +//// ## Example //// -//// Unlike many file libraries that return opaque errors, this module provides -//// structured `FileError` types that tell you exactly what went wrong: +//// Use this in test code (for example inside a snippet `tests()` function). //// //// ```gleam -//// case file.read("config.json") { -//// Ok(content) -> parse(content) -//// Error(NotFound(_)) -> use_defaults() -//// Error(PermissionDenied(path)) -> panic as "Cannot read " <> path -//// Error(error) -> panic as file.error_to_string(error) -//// } -//// ``` -//// -//// ## Usage Examples -//// -//// ### Reading Files -//// -//// ```gleam -//// import dream_test/file -//// -//// case file.read("./test/fixtures/expected.json") { -//// Ok(content) -> content -//// Error(error) -> { -//// io.println("Error: " <> file.error_to_string(error)) -//// "" -//// } -//// } -//// ``` -//// -//// ### Writing Files +//// let path = tmp_path() //// -//// ```gleam -//// // Creates parent directories automatically -//// case file.write("./test/snapshots/output.snap", result) { -//// Ok(Nil) -> io.println("Saved!") -//// Error(NoSpace(_)) -> io.println("Disk full!") -//// Error(error) -> io.println(file.error_to_string(error)) -//// } -//// ``` -//// -//// ### Deleting Files -//// -//// ```gleam -//// // Safe to call even if file doesn't exist -//// let _ = file.delete("./test/snapshots/old.snap") +//// // Setup: create the file (no assertions during setup) +//// use _ <- result.try( +//// write(path, "hello") |> result.map_error(error_to_string), +//// ) //// -//// // Delete all snapshots -//// case file.delete_files_matching("./test/snapshots", ".snap") { -//// Ok(count) -> io.println("Deleted " <> int.to_string(count) <> " files") -//// Error(error) -> io.println(file.error_to_string(error)) -//// } +//// read(path) +//// |> should +//// |> be_equal(Ok("hello")) +//// |> or_fail_with("expected to read back written content") //// ``` // ============================================================================= @@ -80,19 +44,10 @@ /// ## Example /// /// ```gleam -/// case file.read("secret.txt") { -/// Ok(content) -> use(content) -/// Error(PermissionDenied(path)) -> { -/// io.println("Access denied: " <> path) -/// io.println("Try: chmod +r " <> path) -/// } -/// Error(NotFound(path)) -> { -/// io.println("File not found: " <> path) -/// } -/// Error(error) -> { -/// io.println(file.error_to_string(error)) -/// } -/// } +/// error_to_string(NotFound("/x")) +/// |> should +/// |> be_equal("File not found: /x") +/// |> or_fail_with("expected NotFound formatting") /// ``` /// pub type FileError { @@ -132,20 +87,24 @@ pub type FileError { /// Formats the error with both the error type and the affected path, /// suitable for logging or displaying to users. /// -/// ## Examples +/// ## Parameters /// -/// ```gleam -/// error_to_string(NotFound("/app/config.json")) -/// // -> "File not found: /app/config.json" +/// - `error`: the `FileError` value to format /// -/// error_to_string(PermissionDenied("/etc/shadow")) -/// // -> "Permission denied: /etc/shadow" +/// ## Returns +/// +/// A human-readable message string. This function is pure (no I/O). +/// +/// ## Examples /// -/// error_to_string(FileSystemError("/dev/null", "ebusy")) -/// // -> "File error (ebusy): /dev/null" +/// ```gleam +/// error_to_string(NotFound("/x")) +/// |> should +/// |> be_equal("File not found: /x") +/// |> or_fail_with("expected NotFound formatting") /// ``` /// -pub fn error_to_string(error: FileError) -> String { +pub fn error_to_string(error error: FileError) -> String { case error { NotFound(path) -> "File not found: " <> path PermissionDenied(path) -> "Permission denied: " <> path @@ -179,26 +138,21 @@ pub fn error_to_string(error: FileError) -> String { /// ## Examples /// /// ```gleam -/// // Read a configuration file -/// case file.read("gleam.toml") { -/// Ok(toml) -> parse_config(toml) -/// Error(NotFound(_)) -> default_config() -/// Error(error) -> panic as file.error_to_string(error) -/// } -/// ``` +/// let path = tmp_path() /// -/// ```gleam -/// // Read with full error handling -/// case file.read(path) { -/// Ok(content) -> Ok(content) -/// Error(NotFound(_)) -> Error("Config file missing") -/// Error(PermissionDenied(_)) -> Error("Cannot read config (permission denied)") -/// Error(error) -> Error(file.error_to_string(error)) -/// } +/// // Setup: create the file (no assertions during setup) +/// use _ <- result.try( +/// write(path, "hello") |> result.map_error(error_to_string), +/// ) +/// +/// read(path) +/// |> should +/// |> be_equal(Ok("hello")) +/// |> or_fail_with("expected to read back written content") /// ``` /// @external(erlang, "dream_test_file_ffi", "read_file") -pub fn read(path: String) -> Result(String, FileError) +pub fn read(path path: String) -> Result(String, FileError) /// Write a string to a file, creating parent directories if needed. /// @@ -221,21 +175,19 @@ pub fn read(path: String) -> Result(String, FileError) /// ## Examples /// /// ```gleam -/// // Write a snapshot file -/// case file.write("./test/snapshots/user.snap", json_output) { -/// Ok(Nil) -> io.println("Snapshot saved") -/// Error(error) -> io.println("Failed: " <> file.error_to_string(error)) -/// } -/// ``` +/// let path = tmp_path() /// -/// ```gleam -/// // Creates nested directories automatically -/// file.write("./deep/nested/path/file.txt", "content") -/// // Creates ./deep/nested/path/ if it doesn't exist +/// // Setup: create the file (no assertions during setup) +/// use _ <- result.try( +/// write(path, "hello") |> result.map_error(error_to_string), +/// ) /// ``` /// @external(erlang, "dream_test_file_ffi", "write_file") -pub fn write(path: String, content: String) -> Result(Nil, FileError) +pub fn write( + path path: String, + content content: String, +) -> Result(Nil, FileError) /// Delete a file. /// @@ -256,21 +208,22 @@ pub fn write(path: String, content: String) -> Result(Nil, FileError) /// ## Examples /// /// ```gleam -/// // Safe cleanup - doesn't fail if already deleted -/// let _ = file.delete("./test/temp/output.txt") -/// ``` -/// -/// ```gleam -/// // Delete with error handling -/// case file.delete(snapshot_path) { -/// Ok(Nil) -> io.println("Snapshot cleared") -/// Error(PermissionDenied(_)) -> io.println("Cannot delete (permission denied)") -/// Error(error) -> io.println(file.error_to_string(error)) -/// } +/// let path = tmp_path() +/// +/// // Setup: create the file, then delete it (no assertions during setup) +/// use _ <- result.try( +/// write(path, "hello") |> result.map_error(error_to_string), +/// ) +/// use _ <- result.try(delete(path) |> result.map_error(error_to_string)) +/// +/// read(path) +/// |> should +/// |> be_equal(Error(NotFound(path))) +/// |> or_fail_with("expected deleted file to be NotFound") /// ``` /// @external(erlang, "dream_test_file_ffi", "delete_file") -pub fn delete(path: String) -> Result(Nil, FileError) +pub fn delete(path path: String) -> Result(Nil, FileError) /// Delete all files in a directory that have a specific extension. /// @@ -286,22 +239,27 @@ pub fn delete(path: String) -> Result(Nil, FileError) /// ## Returns /// /// - `Ok(Int)` - Number of files deleted -/// - `Error(FileSystemError)` - Directory access failed +/// - `Error(FileError)` - Directory access failed (the specific variant depends on the underlying file system error) /// /// ## Examples /// /// ```gleam -/// // Clear all snapshot files -/// case file.delete_files_matching("./test/snapshots", ".snap") { -/// Ok(0) -> io.println("No snapshots to delete") -/// Ok(n) -> io.println("Deleted " <> int.to_string(n) <> " snapshots") -/// Error(error) -> io.println(file.error_to_string(error)) -/// } -/// ``` -/// -/// ```gleam -/// // Clean up temporary files before test run -/// let _ = file.delete_files_matching("./test/temp", ".tmp") +/// let directory = "./test/tmp/file_helpers_" <> int.to_string(unique_port()) +/// let a = directory <> "/a.snap" +/// let b = directory <> "/b.snap" +/// let keep = directory <> "/keep.txt" +/// +/// // Setup: create 2 matching files and 1 non-matching file +/// use _ <- result.try(write(a, "a") |> result.map_error(error_to_string)) +/// use _ <- result.try(write(b, "b") |> result.map_error(error_to_string)) +/// use _ <- result.try( +/// write(keep, "keep") |> result.map_error(error_to_string), +/// ) +/// +/// delete_files_matching(directory, ".snap") +/// |> should +/// |> be_equal(Ok(2)) +/// |> or_fail_with("expected two deleted snapshots") /// ``` /// /// ## Notes @@ -312,6 +270,6 @@ pub fn delete(path: String) -> Result(Nil, FileError) /// @external(erlang, "dream_test_file_ffi", "delete_files_matching") pub fn delete_files_matching( - directory: String, - extension: String, + directory directory: String, + extension extension: String, ) -> Result(Int, FileError) diff --git a/src/dream_test/gherkin/discover.gleam b/src/dream_test/gherkin/discover.gleam index e562dbe..db4a447 100644 --- a/src/dream_test/gherkin/discover.gleam +++ b/src/dream_test/gherkin/discover.gleam @@ -1,48 +1,71 @@ -//// Feature discovery and loading for Gherkin tests. +//// Discover and load Gherkin `.feature` files. //// -//// Provides a builder pattern for discovering `.feature` files and -//// converting them to TestSuites without manual file parsing. +//// Use this module to: +//// - find `.feature` files via a glob pattern +//// - parse them (`load`) for inspection, or +//// - convert them into runnable `TestSuite`s (`to_suite`) when you provide a +//// step registry (your Given/When/Then handlers). //// //// ## Example //// //// ```gleam //// import dream_test/gherkin/discover -//// import dream_test/gherkin/steps.{new_registry, step} -//// import dream_test/runner +//// import dream_test/gherkin/steps.{type StepContext, get_int, step} +//// import dream_test/gherkin/world.{get_or, put} +//// import dream_test/matchers.{be_equal, or_fail_with, should, succeed} +//// import gleam/result //// -//// pub fn main() { -//// let steps = new_registry() -//// |> step("I have {int} items", have_items) -//// |> step("I add {int} items", add_items) +//// fn step_server_running(context: StepContext) { +//// put(context.world, "server_running", True) +//// Ok(succeed()) +//// } //// -//// discover.features("features/*.feature") -//// |> discover.with_registry(steps) -//// |> discover.to_suite("my_features") -//// |> runner.run_suite() +//// fn step_empty_cart(context: StepContext) { +//// put(context.world, "cart", 0) +//// Ok(succeed()) //// } -//// ``` //// -//// ## Glob Patterns +//// fn step_add_items(context: StepContext) { +//// let current = get_or(context.world, "cart", 0) +//// let to_add = get_int(context.captures, 0) |> result.unwrap(0) +//// put(context.world, "cart", current + to_add) +//// Ok(succeed()) +//// } //// -//// Uses Erlang's `filelib:wildcard/1` for pattern matching: +//// fn step_verify_count(context: StepContext) { +//// let expected = get_int(context.captures, 0) |> result.unwrap(0) +//// get_or(context.world, "cart", 0) +//// |> should +//// |> be_equal(expected) +//// |> or_fail_with("Cart count mismatch") +//// } //// -//// - `features/*.feature` — all `.feature` files in `features/` -//// - `test/**/*.feature` — recursive search in `test/` -//// - `*.feature` — all `.feature` files in current directory +//// pub fn tests() { +//// // Define step handlers +//// let steps = +//// steps.new() +//// |> step("the server is running", step_server_running) +//// |> step("the cart is empty", step_empty_cart) +//// |> step("I add {int} items", step_add_items) +//// |> step("the cart should have {int} items", step_verify_count) //// +//// // Discover and load all .feature files +//// discover.features("test/*.feature") +//// |> discover.with_registry(steps) +//// |> discover.to_suite("cart_features") +//// } +//// ``` import dream_test/gherkin/feature.{FeatureConfig, to_test_suite} import dream_test/gherkin/parser import dream_test/gherkin/steps.{type StepRegistry} -import dream_test/gherkin/types.{type Feature} +import dream_test/gherkin/types as gherkin_types import dream_test/types.{ - type AssertionResult, type TestSuite, type TestSuiteItem, AssertionFailed, - AssertionFailure, SingleTestConfig, SuiteGroup, SuiteTest, TestCase, TestSuite, - Unit, -} as _dream_types + type AssertionResult, type Node, type TestSuite, AssertionFailed, + AssertionFailure, Group, Root, Test, Unit, +} import gleam/list import gleam/option.{type Option, None, Some} -import gleam/result // ============================================================================ // Types @@ -50,9 +73,10 @@ import gleam/result /// Builder for discovering and loading feature files. /// -/// Use `features()` to create, then chain with `with_registry()` and -/// `to_suite()` to build a TestSuite. +/// Use `features()` to create a discovery, then chain with `with_registry()` and +/// `to_suite()` to build a runnable `TestSuite`. /// +/// This is opaque so callers can’t depend on internal fields that may change. pub opaque type FeatureDiscovery { FeatureDiscovery( /// Glob pattern for finding feature files @@ -60,7 +84,7 @@ pub opaque type FeatureDiscovery { /// Step registry (set via with_registry) registry: Option(StepRegistry), /// Parsed features (populated during to_suite) - features: List(Feature), + features: List(gherkin_types.Feature), /// Parse errors encountered errors: List(String), ) @@ -68,13 +92,15 @@ pub opaque type FeatureDiscovery { /// Result of loading features, containing both successes and errors. /// +/// This is useful if you want to control how parse errors are handled rather +/// than converting them to failing tests. +/// +/// ## Fields +/// +/// - `features`: successfully parsed feature values +/// - `errors`: parse errors as strings (typically `"path: message"`) pub type LoadResult { - LoadResult( - /// Successfully parsed features - features: List(Feature), - /// Errors encountered during parsing - errors: List(String), - ) + LoadResult(features: List(gherkin_types.Feature), errors: List(String)) } // ============================================================================ @@ -83,111 +109,169 @@ pub type LoadResult { /// Start discovering features matching a glob pattern. /// -/// ## Parameters -/// -/// - `pattern`: Glob pattern for `.feature` files +/// Use Erlang’s `filelib:wildcard/1` semantics (see `wildcard/1` below). /// /// ## Example /// /// ```gleam -/// discover.features("features/**/*.feature") -/// |> discover.with_registry(steps) -/// |> discover.to_suite("my_tests") +/// discover.features("test/*.feature") /// ``` /// -pub fn features(pattern: String) -> FeatureDiscovery { +/// ## Parameters +/// +/// - `pattern`: glob pattern used to find `.feature` files +/// +/// ## Returns +/// +/// A `FeatureDiscovery` builder you can pipe into `with_registry`, `load`, +/// `list_files`, or `to_suite`. +pub fn features(pattern pattern: String) -> FeatureDiscovery { FeatureDiscovery(pattern: pattern, registry: None, features: [], errors: []) } /// Attach a step registry to the discovery. /// -/// The registry contains all step definitions needed to execute the features. +/// The step registry is the set of step definitions (Given/When/Then handlers) +/// used to execute scenarios. It is required before calling `to_suite`. +/// +/// ## Example +/// +/// ```gleam +/// // Define step handlers +/// let steps = +/// steps.new() +/// |> step("the server is running", step_server_running) +/// |> step("the cart is empty", step_empty_cart) +/// |> step("I add {int} items", step_add_items) +/// |> step("the cart should have {int} items", step_verify_count) +/// +/// // Discover and load all .feature files +/// discover.features("test/*.feature") +/// |> discover.with_registry(steps) +/// |> discover.to_suite("cart_features") +/// ``` /// /// ## Parameters /// -/// - `discovery`: The feature discovery builder -/// - `registry`: Step registry with step definitions +/// - `discovery`: a `FeatureDiscovery` created with `features` +/// - `registry`: step definitions used to execute scenarios /// +/// ## Returns +/// +/// The updated discovery. pub fn with_registry( - discovery: FeatureDiscovery, - registry: StepRegistry, + discovery discovery: FeatureDiscovery, + registry registry: StepRegistry, ) -> FeatureDiscovery { FeatureDiscovery(..discovery, registry: Some(registry)) } /// Build a TestSuite from discovered features. /// -/// Discovers all matching files, parses them, and creates a combined TestSuite. -/// Parse errors are collected but don't prevent other features from running. +/// Panics if `with_registry()` was not called. /// -/// ## Parameters +/// Parse errors are converted into failing unit tests tagged with +/// `"parse-error"`. /// -/// - `discovery`: The configured feature discovery -/// - `suite_name`: Name for the combined test suite +/// ## What does this produce? /// -/// ## Returns +/// The returned suite contains one test per scenario. Each test runs the +/// scenario’s steps using the provided step registry. +/// +/// ## Example /// -/// A TestSuite containing all successfully parsed features. -/// If there are parse errors, they're reported as failed tests. +/// ```gleam +/// // Discover and load all .feature files +/// discover.features("test/*.feature") +/// |> discover.with_registry(steps) +/// |> discover.to_suite("cart_features") +/// ``` /// -/// ## Panics +/// ## Parameters /// -/// Panics if `with_registry()` was not called. +/// - `discovery`: a `FeatureDiscovery` with a registry attached via `with_registry` +/// - `suite_name`: name to show for the top-level suite/group in reports /// -pub fn to_suite(discovery: FeatureDiscovery, suite_name: String) -> TestSuite { +/// ## Returns +/// +/// A `TestSuite(Nil)` containing one test per discovered scenario, plus failing +/// tests for any parse errors (tagged `"parse-error"`). +pub fn to_suite( + discovery discovery: FeatureDiscovery, + suite_name suite_name: String, +) -> TestSuite(Nil) { let registry = case discovery.registry { Some(r) -> r None -> panic as "FeatureDiscovery requires a registry. Call with_registry() first." } - // Discover and parse all matching files let files = discover_files(discovery.pattern) let load_result = load_all_features(files) - // Convert each feature to a TestSuite, then combine as nested groups - let suite_items = - list.map(load_result.features, fn(feature) { - let config = FeatureConfig(feature: feature, step_registry: registry) - let feature_suite = to_test_suite(suite_name, config) - SuiteGroup(feature_suite) - }) - - // Add error items for any parse failures - let error_items = list.map(load_result.errors, error_to_suite_item) - let all_items = list.append(suite_items, error_items) - - TestSuite( - name: suite_name, - full_name: [suite_name], - before_all_hooks: [], - after_all_hooks: [], - items: all_items, + let children = features_to_groups(load_result.features, registry, []) + let error_nodes = errors_to_nodes(load_result.errors, []) + Root( + seed: Nil, + tree: Group( + name: suite_name, + tags: [], + children: list.append(children, error_nodes), + ), ) } /// Load features and return detailed results. /// -/// Use this when you need access to parse errors for custom handling. +/// This does **not** require a step registry because it only discovers files +/// and parses Gherkin syntax. Step definitions are only needed when you want to +/// execute scenarios (`to_suite`). +/// +/// ## Example +/// +/// ```gleam +/// let result = discover.features("test/*.feature") |> discover.load() +/// +/// result.features +/// |> should +/// |> have_length(1) +/// |> or_fail_with("expected one parsed feature") +/// ``` /// /// ## Parameters /// -/// - `discovery`: The feature discovery builder +/// - `discovery`: a `FeatureDiscovery` created with `features` /// /// ## Returns /// -/// LoadResult with lists of successfully parsed features and errors. -/// -pub fn load(discovery: FeatureDiscovery) -> LoadResult { +/// A `LoadResult` containing parsed features and any parse errors. +pub fn load(discovery discovery: FeatureDiscovery) -> LoadResult { let files = discover_files(discovery.pattern) load_all_features(files) } /// Get the list of files matching the discovery pattern. /// -/// Useful for debugging or custom file handling. +/// This is a pure discovery step; files are not parsed. +/// +/// ## Example +/// +/// ```gleam +/// discover.features("test/*.feature") +/// |> discover.list_files() +/// |> should +/// |> contain("test/cart.feature") +/// |> or_fail_with("expected list_files to include test/cart.feature") +/// ``` +/// +/// ## Parameters +/// +/// - `discovery`: a `FeatureDiscovery` created with `features` +/// +/// ## Returns /// -pub fn list_files(discovery: FeatureDiscovery) -> List(String) { +/// A list of file paths matching the glob pattern. +pub fn list_files(discovery discovery: FeatureDiscovery) -> List(String) { discover_files(discovery.pattern) } @@ -200,51 +284,53 @@ fn discover_files(pattern: String) -> List(String) { } fn load_all_features(files: List(String)) -> LoadResult { - let results = list.map(files, parse_feature_file) - let features = - results - |> list.filter_map(fn(r) { - case r { - Ok(f) -> Ok(f) - Error(_) -> Error(Nil) - } - }) - let errors = - results - |> list.filter_map(fn(r) { - case r { - Ok(_) -> Error(Nil) - Error(e) -> Ok(e) + load_all_features_loop(files, [], []) +} + +fn load_all_features_loop( + files: List(String), + features_rev: List(gherkin_types.Feature), + errors_rev: List(String), +) -> LoadResult { + case files { + [] -> + LoadResult( + features: list.reverse(features_rev), + errors: list.reverse(errors_rev), + ) + [path, ..rest] -> { + case parse_feature_file(path) { + Ok(feature) -> + load_all_features_loop(rest, [feature, ..features_rev], errors_rev) + Error(error) -> + load_all_features_loop(rest, features_rev, [error, ..errors_rev]) } - }) - LoadResult(features: features, errors: errors) + } + } +} + +fn parse_feature_file(path: String) -> Result(gherkin_types.Feature, String) { + case parser.parse_file(path) { + Ok(feature) -> Ok(feature) + Error(e) -> Error(path <> ": " <> e) + } } -fn parse_feature_file(path: String) -> Result(Feature, String) { - parser.parse_file(path) - |> result.map_error(fn(e) { path <> ": " <> e }) +fn error_to_node(error: String) -> Node(Nil) { + Test( + name: "Parse Error: " <> error, + tags: ["parse-error"], + kind: Unit, + run: parse_error_test_run, + timeout_ms: None, + ) } -fn error_to_suite_item(error: String) -> TestSuiteItem { - // Create a failing test case for the parse error - // The error message is in the test name since we can't use closures - let error_test = - TestCase( - SingleTestConfig( - name: "Parse Error: " <> error, - full_name: ["Parse Error", error], - tags: ["parse-error"], - kind: Unit, - run: parse_error_runner, - timeout_ms: None, - before_each_hooks: [], - after_each_hooks: [], - ), - ) - SuiteTest(error_test) +fn parse_error_test_run(_nil: Nil) -> Result(AssertionResult, String) { + Ok(parse_error_assertion()) } -fn parse_error_runner() -> AssertionResult { +fn parse_error_assertion() -> AssertionResult { AssertionFailed(AssertionFailure( operator: "parse", message: "Failed to parse feature file (see test name for details)", @@ -252,6 +338,44 @@ fn parse_error_runner() -> AssertionResult { )) } +fn features_to_groups( + features: List(gherkin_types.Feature), + registry: StepRegistry, + acc_rev: List(Node(Nil)), +) -> List(Node(Nil)) { + case features { + [] -> list.reverse(acc_rev) + [feature, ..rest] -> { + let group = feature_to_group(feature, registry) + features_to_groups(rest, registry, [group, ..acc_rev]) + } + } +} + +fn feature_to_group( + feature: gherkin_types.Feature, + registry: StepRegistry, +) -> Node(Nil) { + let config = FeatureConfig(feature: feature, step_registry: registry) + let feature_suite = to_test_suite(config) + root_to_group(feature_suite) +} + +fn errors_to_nodes( + errors: List(String), + acc_rev: List(Node(Nil)), +) -> List(Node(Nil)) { + case errors { + [] -> list.reverse(acc_rev) + [error, ..rest] -> errors_to_nodes(rest, [error_to_node(error), ..acc_rev]) + } +} + +fn root_to_group(suite: TestSuite(Nil)) -> Node(Nil) { + let Root(_seed, tree) = suite + tree +} + // ============================================================================ // FFI // ============================================================================ diff --git a/src/dream_test/gherkin/feature.gleam b/src/dream_test/gherkin/feature.gleam index 7bdc6a6..7631c1f 100644 --- a/src/dream_test/gherkin/feature.gleam +++ b/src/dream_test/gherkin/feature.gleam @@ -1,63 +1,61 @@ -//// Feature execution and TestSuite conversion for Gherkin tests. +//// Convert Gherkin features into runnable `TestSuite`s. //// -//// This module converts parsed Gherkin features to dream_test TestSuites -//// and provides an inline DSL for defining features directly in Gleam. +//// This module does two related jobs: +//// - **Execute parsed `.feature` files**: turn a parsed `gherkin/types.Feature` +//// into a `TestSuite` using a step registry (your Given/When/Then handlers). +//// - **Provide an inline DSL**: build features directly in Gleam when you don’t +//// want to keep `.feature` files on disk. //// -//// ## Two Approaches -//// -//// 1. **File-based**: Parse `.feature` files with standard Gherkin syntax -//// 2. **Inline DSL**: Define features directly in Gleam code -//// -//// Both approaches share the same step definitions and execution engine. -//// -//// ## File-Based Usage -//// -//// Parse a `.feature` file and convert to TestSuite: +//// ## Example (file-based) //// //// ```gleam -//// import dream_test/gherkin/feature.{FeatureConfig, to_test_suite} -//// import dream_test/gherkin/parser -//// import dream_test/gherkin/steps.{new_registry, given, when_, then_} -//// import dream_test/runner +//// pub fn tests() { +//// // Define step handlers +//// let steps = +//// steps.new() +//// |> step("the server is running", step_server_running) +//// |> step("the cart is empty", step_empty_cart) +//// |> step("I add {int} items", step_add_items) +//// |> step("the cart should have {int} items", step_verify_count) //// -//// pub fn main() { -//// let steps = new_registry() -//// |> given("I have {int} items", have_items) -//// |> when_("I add {int} items", add_items) -//// |> then_("the total is ${float}", check_total) +//// // Parse the .feature file +//// let assert Ok(feature) = parser.parse_file("test/cart.feature") //// -//// let assert Ok(parsed) = parser.parse_file("features/cart.feature") -//// let config = FeatureConfig(feature: parsed, step_registry: steps) -//// -//// to_test_suite("cart_test", config) -//// |> runner.run_suite() +//// // Convert to TestSuite and run +//// let config = FeatureConfig(feature: feature, step_registry: steps) +//// to_test_suite(config) //// } //// ``` //// -//// ## Inline DSL Usage -//// -//// Define features directly in Gleam without `.feature` files: +//// ## Example (inline DSL) //// //// ```gleam -//// import dream_test/gherkin/feature.{ -//// feature, scenario, given, when, then, and, with_tags, -//// } +//// pub fn tests() { +//// let steps = +//// steps.new() +//// |> step("the server is running", step_server_running) +//// |> step("the cart is empty", step_empty_cart) +//// |> step("I add {int} items", step_add_items) +//// |> step("the cart should have {int} items", step_verify_count) //// -//// pub fn tests() -> TestSuite { -//// let steps = cart_steps() -//// -//// feature("Shopping Cart", steps, [ +//// let bg = background([given("the server is running")]) +//// +//// feature_with_background("Shopping Cart", steps, bg, [ //// scenario("Adding items", [ -//// given("I have an empty cart"), -//// when("I add 2 apples to the cart"), -//// then("the cart should contain 2 items"), -//// and("the total should be $3.00"), +//// given("the cart is empty"), +//// when("I add 3 items"), +//// then("the cart should have 3 items"), //// ]) -//// |> with_tags(["happy-path"]), +//// |> with_tags(["smoke"]), +//// scenario("Adding more items", [ +//// given("the cart is empty"), +//// when("I add 2 items"), +//// and("I add 3 items"), +//// then("the cart should have 5 items"), +//// ]), //// ]) //// } //// ``` -//// import dream_test/gherkin/step_trie.{type StepMatch} import dream_test/gherkin/steps.{ @@ -66,9 +64,8 @@ import dream_test/gherkin/steps.{ import dream_test/gherkin/types as gherkin_types import dream_test/gherkin/world.{type World} import dream_test/types.{ - type AssertionResult, type TestCase, type TestSuite, type TestSuiteItem, - AssertionFailed, AssertionFailure, AssertionOk, GherkinScenario, - SingleTestConfig, SuiteGroup, SuiteTest, TestCase, TestSuite, + type AssertionResult, type Node, type TestSuite, AssertionFailed, + AssertionFailure, AssertionOk, GherkinScenario, Group, Root, Test, } import gleam/dict.{type Dict} import gleam/int @@ -131,78 +128,72 @@ pub type InlineStep { /// /// ## Parameters /// -/// - `module_name`: Name for the suite (usually the test module name) /// - `config`: FeatureConfig with feature and step registry /// /// ## Returns /// -/// A TestSuite that can be run with `runner.run_suite()` +/// A TestSuite that can be run with `runner.new([suite]) |> runner.run()`. +/// +/// ## Example /// -pub fn to_test_suite(module_name: String, config: FeatureConfig) -> TestSuite { +/// ```gleam +/// let config = FeatureConfig(feature: feature, step_registry: steps) +/// to_test_suite(config) +/// ``` +pub fn to_test_suite(config config: FeatureConfig) -> TestSuite(Nil) { let feature = config.feature - let items = build_suite_items(feature, config) - - TestSuite( - name: feature.name, - full_name: [module_name, feature.name], - before_all_hooks: [], - after_all_hooks: [], - items: items, + let children = build_suite_items(feature, config) + + Root( + seed: Nil, + tree: Group(name: feature.name, tags: feature.tags, children: children), ) } -/// Convert a Feature to a flat list of TestCases. -/// -/// Unlike `to_test_suite`, this flattens the feature to a simple list. -/// Use this when you don't need before_all/after_all hooks. -/// -/// ## Parameters -/// -/// - `module_name`: Name prefix for test paths -/// - `config`: FeatureConfig with feature and step registry -/// -/// ## Returns -/// -/// A list of TestCases that can be run with `runner.run_all()` -/// -pub fn to_test_cases( - module_name: String, +fn build_suite_items( + feature: gherkin_types.Feature, config: FeatureConfig, -) -> List(TestCase) { - let suite = to_test_suite(module_name, config) - flatten_suite(suite) -} - -fn flatten_suite(suite: TestSuite) -> List(TestCase) { - list.flat_map(suite.items, flatten_item) +) -> List(Node(Nil)) { + build_suite_items_loop(feature, feature.scenarios, config, []) } -fn flatten_item(item: TestSuiteItem) -> List(TestCase) { - case item { - SuiteTest(test_case) -> [test_case] - SuiteGroup(nested_suite) -> flatten_suite(nested_suite) +fn build_suite_items_loop( + feature: gherkin_types.Feature, + scenarios: List(gherkin_types.Scenario), + config: FeatureConfig, + acc_rev: List(Node(Nil)), +) -> List(Node(Nil)) { + case scenarios { + [] -> list.reverse(acc_rev) + [scenario, ..rest] -> { + let items = scenario_to_suite_items(feature, scenario, config) + build_suite_items_loop( + feature, + rest, + config, + reverse_append(items, acc_rev), + ) + } } } -fn build_suite_items( - feature: gherkin_types.Feature, - config: FeatureConfig, -) -> List(TestSuiteItem) { - list.flat_map(feature.scenarios, fn(scenario) { - scenario_to_suite_items(feature, scenario, config) - }) +fn reverse_append(items: List(a), acc: List(a)) -> List(a) { + case items { + [] -> acc + [item, ..rest] -> reverse_append(rest, [item, ..acc]) + } } fn scenario_to_suite_items( feature: gherkin_types.Feature, scenario: gherkin_types.Scenario, config: FeatureConfig, -) -> List(TestSuiteItem) { +) -> List(Node(Nil)) { case scenario { gherkin_types.Scenario(name, tags, steps) -> { - let test_case = - build_scenario_test_case(feature, name, tags, steps, config, None) - [SuiteTest(test_case)] + let test_node = + build_scenario_test_node(feature, name, tags, steps, config, None) + [test_node] } gherkin_types.ScenarioOutline(name, tags, steps, examples) -> { expand_scenario_outline(feature, name, tags, steps, examples, config) @@ -210,14 +201,14 @@ fn scenario_to_suite_items( } } -fn build_scenario_test_case( +fn build_scenario_test_node( feature: gherkin_types.Feature, scenario_name: String, scenario_tags: List(String), steps: List(gherkin_types.Step), config: FeatureConfig, example_suffix: Option(String), -) -> TestCase { +) -> Node(Nil) { let full_name = build_full_name(feature.name, scenario_name, example_suffix) let scenario_id = string.join(full_name, "::") let all_tags = list.append(feature.tags, scenario_tags) @@ -228,23 +219,15 @@ fn build_scenario_test_case( list.append(background_steps, steps) None -> steps } - - let run_fn = - build_scenario_runner(scenario_id, all_steps, config.step_registry) - - let single_config = - SingleTestConfig( - name: scenario_name, - full_name: full_name, - tags: all_tags, - kind: GherkinScenario(scenario_id), - run: run_fn, - timeout_ms: None, - before_each_hooks: [], - after_each_hooks: [], - ) - - TestCase(single_config) + Test( + name: scenario_name, + tags: all_tags, + kind: GherkinScenario(scenario_id), + run: fn(_nil: Nil) { + Ok(execute_scenario(scenario_id, all_steps, config.step_registry)) + }, + timeout_ms: None, + ) } fn build_full_name( @@ -265,43 +248,103 @@ fn expand_scenario_outline( steps: List(gherkin_types.Step), examples: gherkin_types.ExamplesTable, config: FeatureConfig, -) -> List(TestSuiteItem) { +) -> List(Node(Nil)) { let headers = examples.headers - list.index_map(examples.rows, fn(row, index) { - let substitutions = build_substitution_map(headers, row) - let expanded_steps = substitute_steps(steps, substitutions) - let suffix = "(Example " <> int.to_string(index + 1) <> ")" + expand_scenario_outline_rows_loop( + feature, + name, + tags, + steps, + config, + headers, + examples.rows, + 0, + [], + ) +} - let test_case = - build_scenario_test_case( +fn expand_scenario_outline_rows_loop( + feature: gherkin_types.Feature, + name: String, + tags: List(String), + steps: List(gherkin_types.Step), + config: FeatureConfig, + headers: List(String), + rows: List(List(String)), + index: Int, + acc_rev: List(Node(Nil)), +) -> List(Node(Nil)) { + case rows { + [] -> list.reverse(acc_rev) + [row, ..rest] -> { + let substitutions = build_substitution_map(headers, row) + let expanded_steps = substitute_steps(steps, substitutions) + let suffix = "(Example " <> int.to_string(index + 1) <> ")" + + let node = + build_scenario_test_node( + feature, + name, + tags, + expanded_steps, + config, + Some(suffix), + ) + + expand_scenario_outline_rows_loop( feature, name, tags, - expanded_steps, + steps, config, - Some(suffix), + headers, + rest, + index + 1, + [node, ..acc_rev], ) - SuiteTest(test_case) - }) + } + } } fn build_substitution_map( headers: List(String), values: List(String), ) -> Dict(String, String) { - list.zip(headers, values) - |> list.fold(dict.new(), fn(acc, pair) { - let #(header, value) = pair - dict.insert(acc, header, value) - }) + build_substitution_map_loop(list.zip(headers, values), dict.new()) +} + +fn build_substitution_map_loop( + pairs: List(#(String, String)), + acc: Dict(String, String), +) -> Dict(String, String) { + case pairs { + [] -> acc + [#(header, value), ..rest] -> + build_substitution_map_loop(rest, dict.insert(acc, header, value)) + } } fn substitute_steps( steps: List(gherkin_types.Step), substitutions: Dict(String, String), ) -> List(gherkin_types.Step) { - list.map(steps, fn(step) { substitute_step(step, substitutions) }) + substitute_steps_loop(steps, substitutions, []) +} + +fn substitute_steps_loop( + steps: List(gherkin_types.Step), + substitutions: Dict(String, String), + acc_rev: List(gherkin_types.Step), +) -> List(gherkin_types.Step) { + case steps { + [] -> list.reverse(acc_rev) + [step, ..rest] -> + substitute_steps_loop(rest, substitutions, [ + substitute_step(step, substitutions), + ..acc_rev + ]) + } } fn substitute_step( @@ -320,24 +363,27 @@ fn substitute_placeholders( text: String, substitutions: Dict(String, String), ) -> String { - dict.fold(substitutions, text, fn(acc, header, value) { - let placeholder = "<" <> header <> ">" - string.replace(acc, placeholder, value) - }) + substitute_placeholders_loop(dict.to_list(substitutions), text) +} + +fn substitute_placeholders_loop( + pairs: List(#(String, String)), + acc: String, +) -> String { + case pairs { + [] -> acc + [#(header, value), ..rest] -> { + let placeholder = "<" <> header <> ">" + let next = string.replace(acc, placeholder, value) + substitute_placeholders_loop(rest, next) + } + } } // ============================================================================ // Scenario Execution // ============================================================================ -fn build_scenario_runner( - scenario_id: String, - steps: List(gherkin_types.Step), - registry: StepRegistry, -) -> fn() -> AssertionResult { - fn() { execute_scenario(scenario_id, steps, registry) } -} - fn execute_scenario( scenario_id: String, steps: List(gherkin_types.Step), @@ -390,7 +436,15 @@ fn execute_step( case steps.find_step(registry, effective_keyword, step.text) { Ok(match) -> { let context = build_step_context(match, step, the_world) - match.handler(context) + case match.handler(context) { + Ok(result) -> result + Error(message) -> + AssertionFailed(AssertionFailure( + operator: "step", + message: message, + payload: None, + )) + } } Error(msg) -> { AssertionFailed(AssertionFailure( @@ -451,17 +505,31 @@ fn extract_doc_string_from_step(step: gherkin_types.Step) -> Option(String) { /// /// ## Returns /// -/// A TestSuite that can be run with `runner.run_suite()` +/// A TestSuite that can be run with `runner.new([suite]) |> runner.run()`. /// /// ## Example /// /// ```gleam +/// let steps = +/// steps.new() +/// |> step("I have {int} items in my cart", step_have_items) +/// |> step("I add {int} more items", step_add_items) +/// |> step("I should have {int} items total", step_should_have) +/// /// feature("Shopping Cart", steps, [ +/// scenario("Adding items to cart", [ +/// given("I have 3 items in my cart"), +/// when("I add 2 more items"), +/// then("I should have 5 items total"), +/// but("I should have 5 items total"), +/// ]), +/// ]) +/// ``` pub fn feature( - name: String, - registry: StepRegistry, - scenarios: List(InlineScenario), -) -> TestSuite { + name name: String, + registry registry: StepRegistry, + scenarios scenarios: List(InlineScenario), +) -> TestSuite(Nil) { let parsed_scenarios = list.map(scenarios, inline_to_parsed_scenario) let parsed_feature = gherkin_types.Feature( @@ -473,7 +541,7 @@ pub fn feature( ) let config = FeatureConfig(feature: parsed_feature, step_registry: registry) - to_test_suite(name <> "_test", config) + to_test_suite(config) } /// Define an inline scenario. @@ -486,14 +554,18 @@ pub fn feature( /// ## Example /// /// ```gleam -/// scenario("Adding items", [ -/// given("I have an empty cart"), -/// when_step("I add 5 items"), -/// then_step("I should have 5 items"), -/// ]) +/// scenario("Adding items to cart", [ +/// given("I have 3 items in my cart"), +/// when("I add 2 more items"), +/// then("I should have 5 items total"), +/// but("I should have 5 items total"), +/// ]), /// ``` /// -pub fn scenario(name: String, inline_steps: List(InlineStep)) -> InlineScenario { +pub fn scenario( + name name: String, + inline_steps inline_steps: List(InlineStep), +) -> InlineScenario { InlineScenario(name: name, steps: inline_steps, tags: []) } @@ -503,10 +575,11 @@ pub fn scenario(name: String, inline_steps: List(InlineStep)) -> InlineScenario /// /// ```gleam /// scenario("Adding items", [ -/// when("I add 2 apples to the cart"), -/// then("the cart should contain 2 items"), -/// ]) -/// |> with_tags(["happy-path", "smoke"]) +/// given("the cart is empty"), +/// when("I add 3 items"), +/// then("the cart should have 3 items"), +/// ]) +/// |> with_tags(["smoke"]), /// ``` /// /// ## Note @@ -515,8 +588,8 @@ pub fn scenario(name: String, inline_steps: List(InlineStep)) -> InlineScenario /// `dream_test/unit.with_tags` instead. /// pub fn with_tags( - inline_scenario: InlineScenario, - tags: List(String), + inline_scenario inline_scenario: InlineScenario, + tags tags: List(String), ) -> InlineScenario { InlineScenario(..inline_scenario, tags: tags) } @@ -526,10 +599,10 @@ pub fn with_tags( /// ## Example /// /// ```gleam -/// given("I have {int} items in my cart") +/// given("the cart is empty"), /// ``` /// -pub fn given(text: String) -> InlineStep { +pub fn given(text text: String) -> InlineStep { InlineStep(keyword: "Given", text: text) } @@ -538,10 +611,10 @@ pub fn given(text: String) -> InlineStep { /// ## Example /// /// ```gleam -/// when("I add {int} items") +/// when("I add 3 items"), /// ``` /// -pub fn when(text: String) -> InlineStep { +pub fn when(text text: String) -> InlineStep { InlineStep(keyword: "When", text: text) } @@ -550,10 +623,10 @@ pub fn when(text: String) -> InlineStep { /// ## Example /// /// ```gleam -/// then("I should have {int} items") +/// then("the cart should have 3 items"), /// ``` /// -pub fn then(text: String) -> InlineStep { +pub fn then(text text: String) -> InlineStep { InlineStep(keyword: "Then", text: text) } @@ -562,10 +635,10 @@ pub fn then(text: String) -> InlineStep { /// ## Example /// /// ```gleam -/// and("I have a coupon") +/// and("I add 3 items"), /// ``` /// -pub fn and(text: String) -> InlineStep { +pub fn and(text text: String) -> InlineStep { InlineStep(keyword: "And", text: text) } @@ -574,10 +647,10 @@ pub fn and(text: String) -> InlineStep { /// ## Example /// /// ```gleam -/// but("I should not see errors") +/// but("I should have 5 items total"), /// ``` /// -pub fn but(text: String) -> InlineStep { +pub fn but(text text: String) -> InlineStep { InlineStep(keyword: "But", text: text) } @@ -617,15 +690,12 @@ fn parse_keyword(keyword_str: String) -> gherkin_types.StepKeyword { /// ## Example /// /// ```gleam -/// let bg = background([ -/// given("I am logged in"), -/// given("I have an empty cart"), -/// ]) -/// -/// feature_with_background("Shopping", steps, bg, [...scenarios...]) +/// let bg = background([given("the server is running")]) /// ``` /// -pub fn background(inline_steps: List(InlineStep)) -> List(gherkin_types.Step) { +pub fn background( + inline_steps inline_steps: List(InlineStep), +) -> List(gherkin_types.Step) { list.map(inline_steps, inline_to_parsed_step) } @@ -638,12 +708,32 @@ pub fn background(inline_steps: List(InlineStep)) -> List(gherkin_types.Step) { /// - `background_steps`: Steps to run before each scenario /// - `scenarios`: List of inline scenarios /// +/// ## Example +/// +/// ```gleam +/// let bg = background([given("the server is running")]) +/// +/// feature_with_background("Shopping Cart", steps, bg, [ +/// scenario("Adding items", [ +/// given("the cart is empty"), +/// when("I add 3 items"), +/// then("the cart should have 3 items"), +/// ]) +/// |> with_tags(["smoke"]), +/// scenario("Adding more items", [ +/// given("the cart is empty"), +/// when("I add 2 items"), +/// and("I add 3 items"), +/// then("the cart should have 5 items"), +/// ]), +/// ]) +/// ``` pub fn feature_with_background( - name: String, - registry: StepRegistry, - background_steps: List(gherkin_types.Step), - scenarios: List(InlineScenario), -) -> TestSuite { + name name: String, + registry registry: StepRegistry, + background_steps background_steps: List(gherkin_types.Step), + scenarios scenarios: List(InlineScenario), +) -> TestSuite(Nil) { let parsed_scenarios = list.map(scenarios, inline_to_parsed_scenario) let parsed_feature = gherkin_types.Feature( @@ -655,5 +745,5 @@ pub fn feature_with_background( ) let config = FeatureConfig(feature: parsed_feature, step_registry: registry) - to_test_suite(name <> "_test", config) + to_test_suite(config) } diff --git a/src/dream_test/gherkin/parser.gleam b/src/dream_test/gherkin/parser.gleam index 93a6936..d827269 100644 --- a/src/dream_test/gherkin/parser.gleam +++ b/src/dream_test/gherkin/parser.gleam @@ -1,37 +1,12 @@ -//// Gherkin parser for `.feature` files. +//// Parse Gherkin `.feature` files into `gherkin/types.Feature`. //// -//// Parses Gherkin syntax into structured Feature types that can be -//// converted to dream_test TestSuites. +//// Use this when you have Gherkin text (from a file on disk, a fixture, or a +//// string literal) and you want a structured representation you can convert +//// into runnable tests via `dream_test/gherkin/feature`. //// -//// ## Supported Syntax -//// -//// - Feature, Scenario, Scenario Outline -//// - Background -//// - Given/When/Then/And/But steps -//// - Tags (@tag syntax) -//// - DataTables (pipe-delimited) -//// - DocStrings (triple quotes) -//// - Examples tables for Scenario Outlines -//// - Comments (# lines) -//// -//// ## Example Usage -//// -//// ```gleam -//// import dream_test/gherkin/parser -//// -//// // Parse from file -//// case parser.parse_file("test/features/shopping.feature") { -//// Ok(feature) -> run_feature(feature) -//// Error(msg) -> panic as msg -//// } -//// -//// // Parse from string -//// let content = "Feature: My Feature\n Scenario: Test\n Given something" -//// case parser.parse_string(content) { -//// Ok(feature) -> run_feature(feature) -//// Error(msg) -> panic as msg -//// } -//// ``` +//// The parser supports common Gherkin syntax: +//// Feature / Scenario / Scenario Outline, Background, tags (`@tag`), steps, +//// DocStrings (`"""`), DataTables (`| ... |`), Examples tables, and comments. import dream_test/file import dream_test/gherkin/types.{ @@ -60,7 +35,13 @@ import gleam/string /// - `Ok(Feature)`: Successfully parsed feature /// - `Error(String)`: Parse error with description /// -pub fn parse_file(path: String) -> Result(Feature, String) { +/// ## Example +/// +/// ```gleam +/// let assert Ok(feature) = parser.parse_file("test/cart.feature") +/// ``` +/// +pub fn parse_file(path path: String) -> Result(Feature, String) { case file.read(path) { Ok(content) -> parse_string(content) Error(error) -> @@ -81,7 +62,25 @@ pub fn parse_file(path: String) -> Result(Feature, String) { /// - `Ok(Feature)`: Successfully parsed feature /// - `Error(String)`: Parse error with description /// -pub fn parse_string(content: String) -> Result(Feature, String) { +/// ## Example +/// +/// ```gleam +/// let content = +/// "@smoke\n" +/// <> "Feature: Demo\n" +/// <> "\n" +/// <> " Scenario: One\n" +/// <> " Given a thing\n" +/// +/// use feature <- result.try(parser.parse_string(content)) +/// +/// feature.name +/// |> should +/// |> be_equal("Demo") +/// |> or_fail_with("expected feature name Demo") +/// ``` +/// +pub fn parse_string(content content: String) -> Result(Feature, String) { let lines = string.split(content, "\n") let state = initial_state() parse_lines(lines, state) diff --git a/src/dream_test/gherkin/step_trie.gleam b/src/dream_test/gherkin/step_trie.gleam index 15d46ea..273be78 100644 --- a/src/dream_test/gherkin/step_trie.gleam +++ b/src/dream_test/gherkin/step_trie.gleam @@ -1,67 +1,32 @@ -//// Step Trie - Fast step definition lookup using Cucumber Expressions. +//// Fast step-definition lookup using Cucumber-Expression-style placeholders. //// -//// Provides O(word count) step matching instead of O(step definitions) linear search. -//// This is the core data structure for efficient Gherkin step matching. +//// This is the data structure behind `dream_test/gherkin/steps`. It matches step +//// text in time proportional to the **number of tokens in the step text**, not +//// the number of registered step definitions. //// -//// ## Performance Characteristics +//// ## Placeholder syntax //// -//// - **Insert**: O(words in pattern) - typically 5-10 words -//// - **Lookup**: O(words in step text) - independent of total step definitions -//// - **Memory**: O(total words across all patterns) +//// - `{int}`: integers like `42`, `-5` +//// - `{float}`: decimals like `3.14`, `-0.5` +//// - `{string}`: quoted strings like `"hello world"` +//// - `{word}` / `{}`: a single token //// -//// ## Placeholder Syntax -//// -//// Patterns use Cucumber Expression placeholders: -//// -//// | Placeholder | Matches | Captures | -//// |-------------|---------|----------| -//// | `{int}` | Integer like `42`, `-5` | `CapturedInt(Int)` | -//// | `{float}` | Decimal like `3.14`, `0.5` | `CapturedFloat(Float)` | -//// | `{string}` | Quoted text like `"hello"` | `CapturedString(String)` | -//// | `{word}` | Single word like `apple` | `CapturedWord(String)` | -//// | `{}` | Any single token | `CapturedWord(String)` | -//// -//// ## Prefix and Suffix Support -//// -//// Placeholders can have literal prefixes and suffixes: -//// -//// - `${float}` matches `$3.99` and captures `3.99` -//// - `{int}%` matches `50%` and captures `50` -//// - `${float}USD` matches `$19.99USD` and captures `19.99` -//// -//// This works by splitting patterns and text at placeholder/numeric boundaries, -//// so `${float}` becomes `["$", "{float}"]` in the trie, and `$3.99` becomes -//// `["$", "3.99"]` during matching. -//// -//// ## Matching Priority -//// -//// When multiple patterns could match, the trie uses this priority order: -//// -//// 1. **Literal words** - exact string match (highest priority) -//// 2. **{string}** - quoted string capture -//// 3. **{int}** - integer capture -//// 4. **{float}** - decimal capture -//// 5. **{word}** - single word capture -//// 6. **{}** - any word capture (lowest priority) -//// -//// This ensures the most specific step definition wins. +//// Prefix/suffix text can be attached to placeholders. For example, +//// `${float}USD` matches `$19.99USD` and captures `19.99` as a float. //// //// ## Example //// //// ```gleam -//// let trie = new() -//// |> insert("Given", "I have {int} items", count_handler) -//// |> insert("Given", "I have an empty cart", empty_handler) -//// |> insert("Then", "the total is ${float}", total_handler) -//// -//// // Matches empty_handler (literal "an" beats {int}) -//// lookup(trie, "Given", "I have an empty cart") -//// -//// // Matches count_handler, captures [CapturedInt(42)] -//// lookup(trie, "Given", "I have 42 items") -//// -//// // Matches total_handler, captures [CapturedFloat(19.99)] -//// lookup(trie, "Then", "the total is $19.99") +//// step_trie.lookup(trie, "Then", "the total is $19.99USD") +//// |> should +//// |> be_equal( +//// Some( +//// step_trie.StepMatch(handler: "total_usd", captures: [ +//// step_trie.CapturedFloat(19.99), +//// ]), +//// ), +//// ) +//// |> or_fail_with("expected float capture for $19.99USD") //// ``` import gleam/dict.{type Dict} @@ -209,8 +174,13 @@ pub type StepMatch(handler) { /// ## Example /// /// ```gleam -/// let trie = new() -/// |> insert("Given", "I have {int} items", handler) +/// let trie = +/// step_trie.new() +/// |> step_trie.insert( +/// keyword: "Given", +/// pattern: "I have an empty cart", +/// handler: "empty", +/// ) /// ``` /// pub fn new() -> StepTrie(handler) { @@ -232,16 +202,18 @@ pub fn new() -> StepTrie(handler) { /// ## Example /// /// ```gleam -/// let trie = new() -/// |> insert("Given", "I have {int} items", have_items) -/// |> insert("When", "I add {int} more", add_items) +/// |> step_trie.insert( +/// keyword: "Given", +/// pattern: "I have {int} items", +/// handler: "count", +/// ) /// ``` /// pub fn insert( - trie: StepTrie(handler), - keyword: String, - pattern: String, - handler: handler, + trie trie: StepTrie(handler), + keyword keyword: String, + pattern pattern: String, + handler handler: handler, ) -> StepTrie(handler) { let segments = parse_step_pattern(pattern) let updated_root = insert_into_node(trie.root, keyword, segments, handler) @@ -385,22 +357,25 @@ fn get_or_create_literal_child( /// This enables patterns like `"the price is ${float}"` to match text like /// `"the price is $19.99"` and capture `19.99` as the float value. /// -/// ## Examples +/// ## Example /// /// ```gleam -/// parse_step_pattern("I have {int} items") -/// // [LiteralWord("I"), LiteralWord("have"), IntParam, LiteralWord("items")] -/// -/// parse_step_pattern("the total is ${float}") -/// // [LiteralWord("the"), LiteralWord("total"), LiteralWord("is"), -/// // LiteralWord("$"), FloatParam] -/// -/// parse_step_pattern("I apply a {int}% discount") -/// // [LiteralWord("I"), LiteralWord("apply"), LiteralWord("a"), -/// // IntParam, LiteralWord("%"), LiteralWord("discount")] +/// step_trie.parse_step_pattern("the total is ${float}USD") +/// |> should +/// |> be_equal([ +/// step_trie.LiteralWord("the"), +/// step_trie.LiteralWord("total"), +/// step_trie.LiteralWord("is"), +/// step_trie.LiteralWord("$"), +/// step_trie.FloatParam, +/// step_trie.LiteralWord("USD"), +/// ]) +/// |> or_fail_with( +/// "expected ${float}USD to split into literal + FloatParam segments", +/// ) /// ``` /// -pub fn parse_step_pattern(pattern: String) -> List(StepSegment) { +pub fn parse_step_pattern(pattern pattern: String) -> List(StepSegment) { pattern |> string.split(" ") |> list.filter(is_non_empty) @@ -438,27 +413,54 @@ fn split_on_first_placeholder( ) -> List(String) { case placeholders { [] -> [word] - [placeholder, ..rest] -> { - case string.split_once(word, placeholder) { - Ok(#(before, after)) -> { - let parts = [] - let parts = case before { - "" -> parts - _ -> list.append(parts, [before]) - } - let parts = list.append(parts, [placeholder]) - let parts = case after { - "" -> parts - _ -> list.append(parts, split_word_around_placeholder(after)) - } - parts - } - Error(_) -> split_on_first_placeholder(word, rest) + [placeholder, ..rest] -> + case try_split_on_placeholder(word, placeholder) { + Some(parts) -> parts + None -> split_on_first_placeholder(word, rest) } - } } } +fn try_split_on_placeholder( + word: String, + placeholder: String, +) -> Option(List(String)) { + case string.split_once(word, placeholder) { + Ok(#(before, after)) -> + Some(append_placeholder_parts(before, placeholder, after)) + Error(_) -> None + } +} + +fn append_placeholder_parts( + before: String, + placeholder: String, + after: String, +) -> List(String) { + [] + |> append_if_non_empty(before) + |> list.append([placeholder]) + |> append_list(split_after(after)) +} + +fn append_if_non_empty(parts: List(String), value: String) -> List(String) { + case value { + "" -> parts + _ -> list.append(parts, [value]) + } +} + +fn split_after(after: String) -> List(String) { + case after { + "" -> [] + _ -> split_word_around_placeholder(after) + } +} + +fn append_list(parts: List(String), extra: List(String)) -> List(String) { + list.append(parts, extra) +} + /// Split a token on boundaries between numeric and non-numeric characters. /// e.g., "$3.00" -> ["$", "3.00"] /// e.g., "10%" -> ["10", "%"] @@ -473,17 +475,19 @@ fn split_numeric_boundaries(token: String) -> List(String) { fn do_split_numeric(token: String) -> List(String) { case regexp.from_string("(-?[0-9]+\\.?[0-9]*)") { - Ok(re) -> { - case regexp.split(re, token) { - // No match, return as-is - [only] -> [only] - parts -> parts |> list.filter(is_non_empty) - } - } + Ok(re) -> split_numeric_with_regex(token, re) Error(_) -> [token] } } +fn split_numeric_with_regex(token: String, re: regexp.Regexp) -> List(String) { + case regexp.split(re, token) { + // No match, return as-is + [only] -> [only] + parts -> parts |> list.filter(is_non_empty) + } +} + // ============================================================================ // Trie Lookup // ============================================================================ @@ -509,19 +513,22 @@ fn do_split_numeric(token: String) -> List(String) { /// ## Example /// /// ```gleam -/// let result = lookup(trie, "Given", "I have 42 items") -/// case result { -/// Some(StepMatch(handler, captures)) -> { -/// // captures = [CapturedInt(42)] -/// } -/// None -> // No matching step definition -/// } +/// step_trie.lookup(trie, "Then", "the total is $19.99USD") +/// |> should +/// |> be_equal( +/// Some( +/// step_trie.StepMatch(handler: "total_usd", captures: [ +/// step_trie.CapturedFloat(19.99), +/// ]), +/// ), +/// ) +/// |> or_fail_with("expected float capture for $19.99USD") /// ``` /// pub fn lookup( - trie: StepTrie(handler), - keyword: String, - text: String, + trie trie: StepTrie(handler), + keyword keyword: String, + text text: String, ) -> Option(StepMatch(handler)) { let words = tokenize_step_text(text) lookup_in_node(trie.root, keyword, words, []) @@ -544,31 +551,27 @@ pub fn lookup( /// pattern like `${float}` is parsed, it becomes `["$", "{float}"]`. For /// matching to work, the text `$19.99` must also become `["$", "19.99"]`. /// -/// ## Examples +/// ## Example /// /// ```gleam -/// // Basic tokenization -/// tokenize_step_text("I have 5 items") -/// // ["I", "have", "5", "items"] -/// -/// // Quoted strings preserved -/// tokenize_step_text("I add \"Red Widget\" to cart") -/// // ["I", "add", "\"Red Widget\"", "to", "cart"] -/// -/// // Currency prefix split from number -/// tokenize_step_text("the total is $19.99") -/// // ["the", "total", "is", "$", "19.99"] -/// -/// // Percentage suffix split from number -/// tokenize_step_text("I apply a 15% discount") -/// // ["I", "apply", "a", "15", "%", "discount"] -/// -/// // Multiple numeric boundaries -/// tokenize_step_text("price is $99.99USD") -/// // ["price", "is", "$", "99.99", "USD"] +/// step_trie.tokenize_step_text("I add \"Red Widget\" and pay $19.99USD") +/// |> should +/// |> be_equal([ +/// "I", +/// "add", +/// "\"Red Widget\"", +/// "and", +/// "pay", +/// "$", +/// "19.99", +/// "USD", +/// ]) +/// |> or_fail_with( +/// "expected tokenization to preserve quotes and split $19.99USD", +/// ) /// ``` /// -pub fn tokenize_step_text(text: String) -> List(String) { +pub fn tokenize_step_text(text text: String) -> List(String) { text |> tokenize_preserving_quotes([], "", False) |> list.flat_map(split_numeric_boundaries) diff --git a/src/dream_test/gherkin/steps.gleam b/src/dream_test/gherkin/steps.gleam index 2f71ce5..cd1404e 100644 --- a/src/dream_test/gherkin/steps.gleam +++ b/src/dream_test/gherkin/steps.gleam @@ -1,74 +1,33 @@ //// Step definition registry for Gherkin scenarios. //// -//// This module provides the user-facing API for defining step definitions -//// that match Gherkin steps (Given/When/Then) to Gleam handler functions. +//// Use this module to: //// -//// ## Quick Start +//// - Build a `StepRegistry` by registering patterns and handlers. +//// - Match step text against registered patterns (including typed placeholders). +//// - Extract typed captures (`{int}`, `{float}`, `{string}`, `{word}`, `{}`). //// -//// ```gleam -//// import dream_test/gherkin/steps.{ -//// type StepContext, given, new_registry, then_, when_, get_int, get_float, -//// } -//// -//// pub fn cart_steps() -> StepRegistry { -//// new_registry() -//// |> given("I have {int} items in my cart", have_items) -//// |> when_("I add {int} items of {word}", add_items) -//// |> then_("the total should be ${float}", check_total) -//// } +//// ## Example //// -//// fn have_items(context: StepContext) -> AssertionResult { -//// case get_int(context.captures, 0) { -//// Ok(count) -> { -//// world.put(context.world, "count", count) -//// AssertionOk -//// } -//// Error(msg) -> fail_with(msg) -//// } -//// } +//// ```gleam +//// let steps = +//// new() +//// |> step("I have {int} items in my cart", step_have_items) +//// |> step("I add {int} more items", step_add_items) +//// |> step("I should have {int} items total", step_should_have) //// ``` //// -//// ## Pattern Syntax -//// -//// Step patterns use Cucumber Expression syntax with typed placeholders: -//// -//// | Placeholder | Matches | Example | -//// |-------------|-----------------------|----------------| -//// | `{int}` | Integers | `42`, `-5` | -//// | `{float}` | Decimals | `3.14`, `-0.5` | -//// | `{string}` | Quoted strings | `"hello"` | -//// | `{word}` | Single unquoted word | `apple` | -//// | `{}` | Any single token | (anonymous) | -//// -//// ## Prefix and Suffix Support +//// ## Placeholder types //// -//// Placeholders can have literal prefixes and suffixes attached: +//// Step patterns use typed placeholders: //// -//// | Pattern | Matches | Captures | -//// |---------|---------|----------| -//// | `${float}` | `$19.99` | `19.99` as Float | -//// | `{int}%` | `50%` | `50` as Int | -//// | `${float}USD` | `$99.99USD` | `99.99` as Float | +//// - `{int}`: integers (e.g. `42`, `-5`) +//// - `{float}`: decimals (e.g. `3.14`, `-0.5`) +//// - `{string}`: quoted strings (e.g. `"hello world"`) +//// - `{word}`: a single unquoted word (e.g. `alice`) +//// - `{}`: any single token //// -//// This is useful for currency, percentages, and other formatted values. -//// -//// ## Capture Extraction -//// -//// Use the typed extraction helpers to get captured values: -//// -//// ```gleam -//// fn check_total(context: StepContext) -> AssertionResult { -//// // Pattern was "the total should be ${float}" -//// // Step text was "the total should be $19.99" -//// case get_float(context.captures, 0) { -//// Ok(amount) -> { -//// // amount is 19.99 (the $ prefix was matched but not captured) -//// AssertionOk -//// } -//// Error(msg) -> fail_with(msg) -//// } -//// } -//// ``` +//// Placeholders can include literal prefixes/suffixes. For example, `${float}` +//// matches `$19.99` but captures `19.99` as a `Float`. import dream_test/gherkin/step_trie.{ type CapturedValue, type StepMatch, type StepTrie, CapturedFloat, CapturedInt, @@ -108,14 +67,29 @@ pub type StepContext { /// Type alias for step handler functions. /// /// All step handlers have the same signature: they receive a StepContext -/// and return an AssertionResult. +/// and return `Result(AssertionResult, String)`, just like unit test bodies. +/// +/// This means you can use the exact same assertion style inside steps: +/// +/// ```gleam +/// fn step_should_have(context: StepContext) { +/// let expected = get_int(context.captures, 0) |> result.unwrap(0) +/// get_or(context.world, "cart", 0) +/// |> should +/// |> be_equal(expected) +/// |> or_fail_with("Cart count mismatch") +/// } +/// ``` /// pub type StepHandler = - fn(StepContext) -> dream_types.AssertionResult + fn(StepContext) -> Result(dream_types.AssertionResult, String) /// Step registry backed by radix trie. /// -/// Stores step definitions and provides O(words) lookup. +/// Stores step definitions and provides fast lookup. +/// +/// “Fast” here means lookup cost grows with the **length of the step text**, +/// not with how many step definitions you’ve registered. /// pub opaque type StepRegistry { StepRegistry(trie: StepTrie(StepHandler)) @@ -132,13 +106,16 @@ pub opaque type StepRegistry { /// ## Example /// /// ```gleam -/// let steps = new_registry() -/// |> given("I have {int} items", have_items) -/// |> when_("I add {int} more", add_items) -/// |> then_("I should have {int} total", check_total) +/// let steps = +/// new() +/// |> step("I have {int} items", step_int) +/// |> step("the price is ${float}", step_float) +/// |> step("the message is {string}", step_string) +/// |> step("the user is {word}", step_word) +/// |> step("everything works", step_pass) /// ``` /// -pub fn new_registry() -> StepRegistry { +pub fn new() -> StepRegistry { StepRegistry(trie: step_trie.new()) } @@ -152,18 +129,20 @@ pub fn new_registry() -> StepRegistry { /// - `pattern`: Step pattern with placeholders /// - `handler`: Handler function to execute /// +/// ## Returns +/// +/// A new registry containing the added step. +/// /// ## Example /// /// ```gleam -/// new_registry() -/// |> given("I have {int} items in my cart", have_items) -/// |> given("I am logged in as {string}", logged_in_as) +/// let registry = new() |> given("I have {int} items", step_pass) /// ``` /// pub fn given( - registry: StepRegistry, - pattern: String, - handler: StepHandler, + registry registry: StepRegistry, + pattern pattern: String, + handler handler: StepHandler, ) -> StepRegistry { let updated = step_trie.insert(registry.trie, "Given", pattern, handler) StepRegistry(trie: updated) @@ -181,18 +160,20 @@ pub fn given( /// - `pattern`: Step pattern with placeholders /// - `handler`: Handler function to execute /// +/// ## Returns +/// +/// A new registry containing the added step. +/// /// ## Example /// /// ```gleam -/// new_registry() -/// |> when_("I add {int} items of {string}", add_items) -/// |> when_("I click the {string} button", click_button) +/// let registry = new() |> when_("I add {int} items", step_pass) /// ``` /// pub fn when_( - registry: StepRegistry, - pattern: String, - handler: StepHandler, + registry registry: StepRegistry, + pattern pattern: String, + handler handler: StepHandler, ) -> StepRegistry { let updated = step_trie.insert(registry.trie, "When", pattern, handler) StepRegistry(trie: updated) @@ -210,18 +191,20 @@ pub fn when_( /// - `pattern`: Step pattern with placeholders /// - `handler`: Handler function to execute /// +/// ## Returns +/// +/// A new registry containing the added step. +/// /// ## Example /// /// ```gleam -/// new_registry() -/// |> then_("my cart should have {int} items", check_count) -/// |> then_("I should see {string}", check_text) +/// let registry = new() |> then_("I should have {int} items", step_pass) /// ``` /// pub fn then_( - registry: StepRegistry, - pattern: String, - handler: StepHandler, + registry registry: StepRegistry, + pattern pattern: String, + handler handler: StepHandler, ) -> StepRegistry { let updated = step_trie.insert(registry.trie, "Then", pattern, handler) StepRegistry(trie: updated) @@ -237,17 +220,24 @@ pub fn then_( /// - `pattern`: Step pattern with placeholders /// - `handler`: Handler function to execute /// +/// ## Returns +/// +/// A new registry containing the added step. +/// /// ## Example /// /// ```gleam -/// new_registry() -/// |> step("I wait {int} seconds", wait_seconds) +/// let steps = +/// new() +/// |> step("I have {int} items in my cart", step_have_items) +/// |> step("I add {int} more items", step_add_items) +/// |> step("I should have {int} items total", step_should_have) /// ``` /// pub fn step( - registry: StepRegistry, - pattern: String, - handler: StepHandler, + registry registry: StepRegistry, + pattern pattern: String, + handler handler: StepHandler, ) -> StepRegistry { let updated = step_trie.insert(registry.trie, "*", pattern, handler) StepRegistry(trie: updated) @@ -262,7 +252,26 @@ pub fn step( /// Searches the registry for a handler matching the given keyword and text. /// Returns the handler and captured values on success, or an error message. /// -/// This is O(words in step text), not O(number of step definitions). +/// ## Performance +/// +/// Lookup cost scales with the number of **tokens** in the step text after +/// tokenization (the same tokenization used by the step trie). +/// +/// Concretely: this is the **total number of tokens** in the input step text, +/// not the number of *unique* words. +/// +/// In practice: +/// - Tokens are usually “words” separated by spaces. +/// - Quoted strings are treated as a single token. +/// - Some punctuation/number boundaries are split so patterns like `{int}%` or +/// `${float}USD` can match predictably. +/// +/// Examples: +/// - `"I add 2 items"` → 4 tokens (`["I", "add", "2", "items"]`) +/// - `"the message is \"hello world\""` → 4 tokens (`["the", "message", "is", "\"hello world\""]`) +/// +/// This does **not** scale with the number of registered steps: you can register +/// hundreds of steps and lookup still stays proportional to the tokenized input. /// /// ## Parameters /// @@ -275,10 +284,23 @@ pub fn step( /// - `Ok(StepMatch)`: Contains matched handler and captured values /// - `Error(String)`: Error message if no match found /// +/// ## Example +/// +/// ```gleam +/// let registry = new() |> given("I have {int} items", step_pass) +/// +/// use matched <- result.try(find_step(registry, Given, "I have 3 items")) +/// +/// capture_count(matched.captures) +/// |> should +/// |> be_equal(1) +/// |> or_fail_with("expected exactly one capture") +/// ``` +/// pub fn find_step( - registry: StepRegistry, - keyword: StepKeyword, - text: String, + registry registry: StepRegistry, + keyword keyword: StepKeyword, + text text: String, ) -> Result(StepMatch(StepHandler), String) { let keyword_str = keyword_to_string(keyword) case step_trie.lookup(registry.trie, keyword_str, text) { @@ -314,15 +336,17 @@ fn keyword_to_string(keyword: StepKeyword) -> String { /// ## Example /// /// ```gleam -/// // Pattern: "I have {int} items" -/// // Step: "I have 42 items" -/// case get_int(context.captures, 0) { -/// Ok(count) -> // count = 42 -/// Error(msg) -> fail_with(msg) +/// fn step_int(context: StepContext) { +/// let value = get_int(context.captures, 0) |> result.unwrap(0) +/// put(context.world, "int", value) +/// Ok(succeed()) /// } /// ``` /// -pub fn get_int(captures: List(CapturedValue), index: Int) -> Result(Int, String) { +pub fn get_int( + captures captures: List(CapturedValue), + index index: Int, +) -> Result(Int, String) { case list_at(captures, index) { Some(CapturedInt(value)) -> Ok(value) Some(_) -> Error("Capture at index is not an integer") @@ -343,17 +367,16 @@ pub fn get_int(captures: List(CapturedValue), index: Int) -> Result(Int, String) /// ## Example /// /// ```gleam -/// // Pattern: "the price is {float} dollars" -/// // Step: "the price is 19.99 dollars" -/// case get_float(context.captures, 0) { -/// Ok(price) -> // price = 19.99 -/// Error(msg) -> fail_with(msg) +/// fn step_float(context: StepContext) { +/// let value = get_float(context.captures, 0) |> result.unwrap(0.0) +/// put(context.world, "float", value) +/// Ok(succeed()) /// } /// ``` /// pub fn get_float( - captures: List(CapturedValue), - index: Int, + captures captures: List(CapturedValue), + index index: Int, ) -> Result(Float, String) { case list_at(captures, index) { Some(CapturedFloat(value)) -> Ok(value) @@ -375,17 +398,16 @@ pub fn get_float( /// ## Example /// /// ```gleam -/// // Pattern: "I add items of {string}" -/// // Step: "I add items of \"Red Widget\"" -/// case get_string(context.captures, 0) { -/// Ok(product) -> // product = "Red Widget" -/// Error(msg) -> fail_with(msg) +/// fn step_string(context: StepContext) { +/// let value = get_string(context.captures, 0) |> result.unwrap("") +/// put(context.world, "string", value) +/// Ok(succeed()) /// } /// ``` /// pub fn get_string( - captures: List(CapturedValue), - index: Int, + captures captures: List(CapturedValue), + index index: Int, ) -> Result(String, String) { case list_at(captures, index) { Some(CapturedString(value)) -> Ok(value) @@ -408,17 +430,16 @@ pub fn get_string( /// ## Example /// /// ```gleam -/// // Pattern: "the user {word} exists" -/// // Step: "the user alice exists" -/// case get_word(context.captures, 0) { -/// Ok(username) -> // username = "alice" -/// Error(msg) -> fail_with(msg) +/// fn step_word(context: StepContext) { +/// let value = get_word(context.captures, 0) |> result.unwrap("") +/// put(context.world, "word", value) +/// Ok(succeed()) /// } /// ``` /// pub fn get_word( - captures: List(CapturedValue), - index: Int, + captures captures: List(CapturedValue), + index index: Int, ) -> Result(String, String) { case list_at(captures, index) { Some(CapturedWord(value)) -> Ok(value) @@ -439,11 +460,13 @@ pub fn get_word( /// ## Example /// /// ```gleam -/// let count = capture_count(context.captures) -/// // For "I add {int} items of {string}", count would be 2 +/// capture_count(matched.captures) +/// |> should +/// |> be_equal(1) +/// |> or_fail_with("expected exactly one capture") /// ``` /// -pub fn capture_count(captures: List(CapturedValue)) -> Int { +pub fn capture_count(captures captures: List(CapturedValue)) -> Int { list.length(captures) } diff --git a/src/dream_test/gherkin/types.gleam b/src/dream_test/gherkin/types.gleam index 7868788..c687f00 100644 --- a/src/dream_test/gherkin/types.gleam +++ b/src/dream_test/gherkin/types.gleam @@ -1,46 +1,18 @@ //// Gherkin types for dream_test. //// -//// This module defines the data structures representing parsed Gherkin -//// `.feature` files. These types are used by the parser and converted -//// to dream_test's `TestCase` and `TestSuite` types for execution. +//// These are the data structures produced by the Gherkin parser and consumed +//// by the feature/discovery APIs. Most users won’t construct them directly, +//// but they’re useful when you’re integrating your own parser or tooling. //// -//// ## Type Overview +//// ## Example //// -//// | Type | Purpose | -//// |-------------------|--------------------------------------------------| -//// | `Feature` | A parsed `.feature` file | -//// | `Scenario` | A single scenario or scenario outline | -//// | `Step` | A Given/When/Then step with text | -//// | `StepKeyword` | The keyword type (Given, When, Then, And, But) | -//// | `StepArgument` | Optional DocString or DataTable | -//// | `Background` | Steps to run before each scenario | -//// | `ExamplesTable` | Data table for Scenario Outline expansion | +//// Use this when building BDD suites (e.g. in a snippet `tests()` function). //// -//// ## Gherkin Syntax Reference -//// -//// ```gherkin -//// @tag1 @tag2 -//// Feature: Shopping Cart -//// As a customer -//// I want to manage my cart -//// -//// Background: -//// Given I am logged in -//// -//// Scenario: Adding items -//// Given I have an empty cart -//// When I add 3 items of "Widget" -//// Then my cart should have 3 items -//// -//// Scenario Outline: Multiple products -//// Given I have an empty cart -//// When I add items of "" -//// Then my cart should have items -//// -//// Examples: -//// | quantity | product | -//// | 1 | Widget | -//// | 5 | Gadget | +//// ```gleam +//// keyword_to_string(Given) +//// |> should +//// |> be_equal("Given") +//// |> or_fail_with("expected Given") //// ``` import gleam/option.{type Option} @@ -76,23 +48,13 @@ pub type StepKeyword { /// /// Multi-line text enclosed in triple quotes: /// -/// ```gherkin -/// Given a document with content: -/// """json -/// {"name": "example"} -/// """ -/// ``` +/// In Dream Test these values are represented as `DocString(...)`. /// /// ## DataTable /// /// Tabular data with pipe-delimited rows: /// -/// ```gherkin -/// Given the following users: -/// | name | email | -/// | Alice | alice@test.com | -/// | Bob | bob@test.com | -/// ``` +/// In Dream Test these values are represented as `DataTable(...)`. /// pub type StepArgument { /// Multi-line text content. @@ -115,10 +77,11 @@ pub type StepArgument { /// /// ## Example /// -/// ```gherkin -/// Given I have 5 items in my cart -/// When I remove 2 items -/// Then I should have 3 items +/// ```gleam +/// Step(keyword: Given, text: "I have 1 item", argument: None) +/// |> should +/// |> be_equal(Step(keyword: Given, text: "I have 1 item", argument: None)) +/// |> or_fail_with("expected Step to be constructible") /// ``` /// pub type Step { @@ -141,16 +104,17 @@ pub type Step { /// A Background defines common setup steps that are executed before /// every scenario in the feature. It's useful for shared preconditions. /// +/// Background steps run before each scenario, as if they were prepended. +/// /// ## Example /// -/// ```gherkin -/// Background: -/// Given I am logged in as "admin" -/// And I am on the dashboard +/// ```gleam +/// empty_background() +/// |> should +/// |> be_equal(Background(steps: [])) +/// |> or_fail_with("expected empty background") /// ``` /// -/// These steps run before each scenario, as if they were prepended. -/// pub type Background { Background(steps: List(Step)) } @@ -166,23 +130,12 @@ pub type Background { /// /// ## Example /// -/// ```gherkin -/// Examples: -/// | quantity | product | -/// | 1 | Widget | -/// | 5 | Gadget | -/// ``` -/// -/// With a scenario outline: -/// -/// ```gherkin -/// When I add items of "" +/// ```gleam +/// ExamplesTable(headers: ["quantity"], rows: [["1"], ["5"]]) +/// |> should +/// |> be_equal(ExamplesTable(headers: ["quantity"], rows: [["1"], ["5"]])) +/// |> or_fail_with("expected ExamplesTable to be constructible") /// ``` -/// -/// This expands to two scenarios: -/// - "When I add 1 items of \"Widget\"" -/// - "When I add 5 items of \"Gadget\"" -/// pub type ExamplesTable { ExamplesTable( /// Column headers (e.g., ["quantity", "product"]) @@ -200,30 +153,18 @@ pub type ExamplesTable { /// /// There are two variants: /// -/// ## Scenario +/// A `Scenario` has concrete step text; a `ScenarioOutline` is parameterized +/// with an examples table and uses `` values in step text. /// -/// A single test case with fixed steps: -/// -/// ```gherkin -/// Scenario: Adding items to cart -/// Given I have an empty cart -/// When I add 3 items -/// Then my cart should have 3 items -/// ``` -/// -/// ## ScenarioOutline -/// -/// A parameterized test with examples table: +/// ## Example /// -/// ```gherkin -/// Scenario Outline: Adding products -/// When I add items of "" -/// Then my cart should have items +/// ```gleam +/// let step = Step(keyword: Given, text: "I have 1 item", argument: None) /// -/// Examples: -/// | qty | name | -/// | 1 | Widget | -/// | 5 | Gadget | +/// Scenario(name: "Example scenario", tags: [], steps: [step]) +/// |> should +/// |> be_equal(Scenario(name: "Example scenario", tags: [], steps: [step])) +/// |> or_fail_with("expected Scenario to be constructible") /// ``` /// pub type Scenario { @@ -263,25 +204,28 @@ pub type Scenario { /// /// ## Example /// -/// ```gherkin -/// @shopping -/// Feature: Shopping Cart -/// As a customer -/// I want to manage items in my cart -/// So that I can purchase what I need -/// -/// Background: -/// Given I am logged in -/// -/// Scenario: Empty cart -/// Given I have an empty cart -/// Then my cart count should be 0 -/// -/// @slow -/// Scenario: Adding items -/// Given I have an empty cart -/// When I add 3 items of "Widget" -/// Then my cart should have 3 items +/// ```gleam +/// let step = Step(keyword: Given, text: "I have 1 item", argument: None) +/// let scenario = Scenario(name: "Example scenario", tags: [], steps: [step]) +/// +/// Feature( +/// name: "Example feature", +/// description: None, +/// tags: [], +/// background: None, +/// scenarios: [scenario], +/// ) +/// |> should +/// |> be_equal( +/// Feature( +/// name: "Example feature", +/// description: None, +/// tags: [], +/// background: None, +/// scenarios: [scenario], +/// ), +/// ) +/// |> or_fail_with("expected Feature to be constructible") /// ``` /// pub type Feature { @@ -308,11 +252,13 @@ pub type Feature { /// ## Example /// /// ```gleam -/// keyword_to_string(Given) // -> "Given" -/// keyword_to_string(And) // -> "And" +/// keyword_to_string(Given) +/// |> should +/// |> be_equal("Given") +/// |> or_fail_with("expected Given") /// ``` /// -pub fn keyword_to_string(keyword: StepKeyword) -> String { +pub fn keyword_to_string(keyword keyword: StepKeyword) -> String { case keyword { Given -> "Given" When -> "When" @@ -329,11 +275,13 @@ pub fn keyword_to_string(keyword: StepKeyword) -> String { /// ## Example /// /// ```gleam -/// keyword_from_string("Given") // -> Some(Given) -/// keyword_from_string("Hello") // -> None +/// keyword_from_string("Then") +/// |> should +/// |> be_equal(Some(Then)) +/// |> or_fail_with("expected Some(Then)") /// ``` /// -pub fn keyword_from_string(text: String) -> Option(StepKeyword) { +pub fn keyword_from_string(text text: String) -> Option(StepKeyword) { case text { "Given" -> option.Some(Given) "When" -> option.Some(When) @@ -352,14 +300,15 @@ pub fn keyword_from_string(text: String) -> Option(StepKeyword) { /// ## Example /// /// ```gleam -/// resolve_keyword(And, Given) // -> Given -/// resolve_keyword(But, Then) // -> Then -/// resolve_keyword(When, Given) // -> When (non-And/But unchanged) +/// resolve_keyword(And, Given) +/// |> should +/// |> be_equal(Given) +/// |> or_fail_with("expected And after Given to resolve to Given") /// ``` /// pub fn resolve_keyword( - keyword: StepKeyword, - previous: StepKeyword, + keyword keyword: StepKeyword, + previous previous: StepKeyword, ) -> StepKeyword { case keyword { And -> previous @@ -372,6 +321,15 @@ pub fn resolve_keyword( /// /// Useful as a default value or for testing. /// +/// ## Example +/// +/// ```gleam +/// empty_examples() +/// |> should +/// |> be_equal(ExamplesTable(headers: [], rows: [])) +/// |> or_fail_with("expected empty examples table") +/// ``` +/// pub fn empty_examples() -> ExamplesTable { ExamplesTable(headers: [], rows: []) } @@ -380,6 +338,15 @@ pub fn empty_examples() -> ExamplesTable { /// /// Useful as a default value or for testing. /// +/// ## Example +/// +/// ```gleam +/// empty_background() +/// |> should +/// |> be_equal(Background(steps: [])) +/// |> or_fail_with("expected empty background") +/// ``` +/// pub fn empty_background() -> Background { Background(steps: []) } diff --git a/src/dream_test/gherkin/world.gleam b/src/dream_test/gherkin/world.gleam index 9cc4fc2..8749c73 100644 --- a/src/dream_test/gherkin/world.gleam +++ b/src/dream_test/gherkin/world.gleam @@ -11,42 +11,64 @@ //// ## Example //// //// ```gleam -//// fn have_items(context: StepContext) -> AssertionResult { -//// case get_int(context.captures, 0) { -//// Ok(count) -> { -//// world.put(context.world, "cart_count", count) -//// AssertionOk -//// } -//// Error(msg) -> fail_with(msg) -//// } +//// import dream_test/gherkin/feature.{feature, given, scenario, then, when} +//// import dream_test/gherkin/steps.{type StepContext, get_float, step} +//// import dream_test/gherkin/world.{get_or, put} +//// import dream_test/matchers.{be_equal, or_fail_with, should, succeed} +//// import dream_test/reporters/bdd +//// import dream_test/reporters/progress +//// import dream_test/runner +//// import gleam/io +//// import gleam/result +//// +//// // NOTE: We annotate `StepContext` because record field access needs a known type. +//// fn step_have_balance(context: StepContext) { +//// // {float} captures the numeric value (even with $ prefix) +//// let balance = get_float(context.captures, 0) |> result.unwrap(0.0) +//// put(context.world, "balance", balance) +//// Ok(succeed()) //// } //// -//// fn check_total(context: StepContext) -> AssertionResult { -//// case world.get(context.world, "cart_count") { -//// Ok(count) -> { -//// count -//// |> should() -//// |> equal(expected) -//// |> or_fail_with("Cart count mismatch") -//// } -//// Error(_) -> fail_with("Cart not found in world") -//// } +//// fn step_withdraw(context: StepContext) { +//// let current = get_or(context.world, "balance", 0.0) +//// let amount = get_float(context.captures, 0) |> result.unwrap(0.0) +//// put(context.world, "balance", current -. amount) +//// Ok(succeed()) //// } -//// ``` //// -//// ## Type Safety +//// fn step_balance_is(context: StepContext) { +//// let expected = get_float(context.captures, 0) |> result.unwrap(0.0) +//// get_or(context.world, "balance", 0.0) +//// |> should +//// |> be_equal(expected) +//// |> or_fail_with("Balance mismatch") +//// } //// -//// The World stores values dynamically. When retrieving values, you're -//// responsible for ensuring type consistency: +//// pub fn register(registry) { +//// registry +//// |> step("I have a balance of ${float}", step_have_balance) +//// |> step("I withdraw ${float}", step_withdraw) +//// |> step("my balance should be ${float}", step_balance_is) +//// } //// -//// ```gleam -//// // Store an Int -//// world.put(world, "count", 42) +//// pub fn tests() { +//// let steps = steps.new() |> register() //// -//// // Retrieve as Int - caller ensures type matches -//// case world.get(world, "count") { -//// Ok(count) -> use_count(count) // count is inferred from usage -//// Error(_) -> handle_missing() +//// feature("Bank Account", steps, [ +//// scenario("Withdrawal", [ +//// given("I have a balance of $100.00"), +//// when("I withdraw $30.00"), +//// then("my balance should be $70.00"), +//// ]), +//// ]) +//// } +//// +//// pub fn main() { +//// runner.new([tests()]) +//// |> runner.progress_reporter(progress.new()) +//// |> runner.results_reporters([bdd.new()]) +//// |> runner.exit_on_failure() +//// |> runner.run() //// } //// ``` @@ -84,10 +106,16 @@ type EtsTable /// ## Example /// /// ```gleam -/// let world = new_world("shopping_cart::adding_items") +/// import dream_test/gherkin/world +/// +/// pub fn main() { +/// // In normal gherkin runs, the runner creates and cleans up the World for you. +/// let w = world.new_world("example_scenario") +/// world.cleanup(w) +/// } /// ``` /// -pub fn new_world(scenario_id: String) -> World { +pub fn new_world(scenario_id scenario_id: String) -> World { let table_name = "gherkin_world_" <> scenario_id <> "_" <> unique_id() let table = create_ets_table(table_name) World(id: scenario_id, table: table) @@ -102,7 +130,19 @@ pub fn new_world(scenario_id: String) -> World { /// /// - `world`: The World to clean up /// -pub fn cleanup(world: World) -> Nil { +/// ## Example +/// +/// ```gleam +/// import dream_test/gherkin/world +/// +/// pub fn main() { +/// // In normal gherkin runs, the runner creates and cleans up the World for you. +/// let w = world.new_world("example_scenario") +/// world.cleanup(w) +/// } +/// ``` +/// +pub fn cleanup(world world: World) -> Nil { delete_ets_table(world.table) } @@ -124,18 +164,74 @@ pub fn cleanup(world: World) -> Nil { /// ## Example /// /// ```gleam -/// world.put(world, "user", User(name: "Alice", age: 30)) -/// world.put(world, "count", 42) -/// world.put(world, "items", ["apple", "banana"]) +/// import dream_test/gherkin/feature.{feature, given, scenario, then, when} +/// import dream_test/gherkin/steps.{type StepContext, get_float, step} +/// import dream_test/gherkin/world.{get_or, put} +/// import dream_test/matchers.{be_equal, or_fail_with, should, succeed} +/// import dream_test/reporters/bdd +/// import dream_test/reporters/progress +/// import dream_test/runner +/// import gleam/io +/// import gleam/result +/// +/// // NOTE: We annotate `StepContext` because record field access needs a known type. +/// fn step_have_balance(context: StepContext) { +/// // {float} captures the numeric value (even with $ prefix) +/// let balance = get_float(context.captures, 0) |> result.unwrap(0.0) +/// put(context.world, "balance", balance) +/// Ok(succeed()) +/// } +/// +/// fn step_withdraw(context: StepContext) { +/// let current = get_or(context.world, "balance", 0.0) +/// let amount = get_float(context.captures, 0) |> result.unwrap(0.0) +/// put(context.world, "balance", current -. amount) +/// Ok(succeed()) +/// } +/// +/// fn step_balance_is(context: StepContext) { +/// let expected = get_float(context.captures, 0) |> result.unwrap(0.0) +/// get_or(context.world, "balance", 0.0) +/// |> should +/// |> be_equal(expected) +/// |> or_fail_with("Balance mismatch") +/// } +/// +/// pub fn register(registry) { +/// registry +/// |> step("I have a balance of ${float}", step_have_balance) +/// |> step("I withdraw ${float}", step_withdraw) +/// |> step("my balance should be ${float}", step_balance_is) +/// } +/// +/// pub fn tests() { +/// let steps = steps.new() |> register() +/// +/// feature("Bank Account", steps, [ +/// scenario("Withdrawal", [ +/// given("I have a balance of $100.00"), +/// when("I withdraw $30.00"), +/// then("my balance should be $70.00"), +/// ]), +/// ]) +/// } +/// +/// pub fn main() { +/// runner.new([tests()]) +/// |> runner.progress_reporter(progress.new()) +/// |> runner.results_reporters([bdd.new()]) +/// |> runner.exit_on_failure() +/// |> runner.run() +/// } /// ``` /// -pub fn put(world: World, key: String, value: a) -> Nil { +pub fn put(world world: World, key key: String, value value: a) -> Nil { ets_insert(world.table, key, value) } /// Retrieve a value from the World. /// -/// Returns `Ok(value)` if the key exists, `Error(Nil)` if not found. +/// Returns `Ok(value)` if the key exists, `Error(message)` if not found. /// The caller is responsible for ensuring type consistency. /// /// ## Parameters @@ -146,21 +242,66 @@ pub fn put(world: World, key: String, value: a) -> Nil { /// ## Returns /// /// - `Ok(value)`: Key exists, returns the stored value -/// - `Error(Nil)`: Key doesn't exist +/// - `Error(String)`: Key doesn't exist (human-readable message) /// /// ## Example /// /// ```gleam -/// case world.get(world, "user") { -/// Ok(user) -> process_user(user) -/// Error(_) -> handle_missing() +/// import dream_test/gherkin/feature.{feature, given, scenario, then} +/// import dream_test/gherkin/steps.{type StepContext, step} +/// import dream_test/gherkin/world.{get, put} +/// import dream_test/matchers.{be_equal, or_fail_with, should, succeed} +/// import dream_test/reporters/bdd +/// import dream_test/reporters/progress +/// import dream_test/runner +/// import gleam/io +/// +/// fn step_store(context: StepContext) { +/// put(context.world, "count", 42) +/// Ok(succeed()) +/// } +/// +/// fn step_count_is_42(context: StepContext) { +/// case get(context.world, "count") { +/// Ok(count) -> +/// count +/// |> should +/// |> be_equal(42) +/// |> or_fail_with("count mismatch") +/// Error(message) -> Error(message) +/// } +/// } +/// +/// pub fn register(registry) { +/// registry +/// |> step("count is stored", step_store) +/// |> step("count should be 42", step_count_is_42) +/// } +/// +/// pub fn tests() { +/// let steps = steps.new() |> register() +/// +/// feature("World: get", steps, [ +/// scenario("Reading a stored value", [ +/// given("count is stored"), +/// then("count should be 42"), +/// ]), +/// ]) +/// } +/// +/// pub fn main() { +/// runner.new([tests()]) +/// |> runner.progress_reporter(progress.new()) +/// |> runner.results_reporters([bdd.new()]) +/// |> runner.exit_on_failure() +/// |> runner.run() /// } /// ``` /// -pub fn get(world: World, key: String) -> Result(a, Nil) { +pub fn get(world world: World, key key: String) -> Result(a, String) { case ets_lookup(world.table, key) { Some(value) -> Ok(value) - None -> Error(Nil) + None -> Error("World key not found: " <> key) } } @@ -178,11 +319,68 @@ pub fn get(world: World, key: String) -> Result(a, Nil) { /// ## Example /// /// ```gleam -/// let count = world.get_or(world, "count", 0) -/// let items = world.get_or(world, "items", []) +/// import dream_test/gherkin/feature.{feature, given, scenario, then, when} +/// import dream_test/gherkin/steps.{type StepContext, get_float, step} +/// import dream_test/gherkin/world.{get_or, put} +/// import dream_test/matchers.{be_equal, or_fail_with, should, succeed} +/// import dream_test/reporters/bdd +/// import dream_test/reporters/progress +/// import dream_test/runner +/// import gleam/io +/// import gleam/result +/// +/// // NOTE: We annotate `StepContext` because record field access needs a known type. +/// fn step_have_balance(context: StepContext) { +/// // {float} captures the numeric value (even with $ prefix) +/// let balance = get_float(context.captures, 0) |> result.unwrap(0.0) +/// put(context.world, "balance", balance) +/// Ok(succeed()) +/// } +/// +/// fn step_withdraw(context: StepContext) { +/// let current = get_or(context.world, "balance", 0.0) +/// let amount = get_float(context.captures, 0) |> result.unwrap(0.0) +/// put(context.world, "balance", current -. amount) +/// Ok(succeed()) +/// } +/// +/// fn step_balance_is(context: StepContext) { +/// let expected = get_float(context.captures, 0) |> result.unwrap(0.0) +/// get_or(context.world, "balance", 0.0) +/// |> should +/// |> be_equal(expected) +/// |> or_fail_with("Balance mismatch") +/// } +/// +/// pub fn register(registry) { +/// registry +/// |> step("I have a balance of ${float}", step_have_balance) +/// |> step("I withdraw ${float}", step_withdraw) +/// |> step("my balance should be ${float}", step_balance_is) +/// } +/// +/// pub fn tests() { +/// let steps = steps.new() |> register() +/// +/// feature("Bank Account", steps, [ +/// scenario("Withdrawal", [ +/// given("I have a balance of $100.00"), +/// when("I withdraw $30.00"), +/// then("my balance should be $70.00"), +/// ]), +/// ]) +/// } +/// +/// pub fn main() { +/// runner.new([tests()]) +/// |> runner.progress_reporter(progress.new()) +/// |> runner.results_reporters([bdd.new()]) +/// |> runner.exit_on_failure() +/// |> runner.run() +/// } /// ``` /// -pub fn get_or(world: World, key: String, default: a) -> a { +pub fn get_or(world world: World, key key: String, default default: a) -> a { case get(world, key) { Ok(value) -> value Error(_) -> default @@ -201,13 +399,61 @@ pub fn get_or(world: World, key: String, default: a) -> a { /// ## Example /// /// ```gleam -/// case world.has(world, "user") { -/// True -> io.println("User exists") -/// False -> io.println("User not found") +/// import dream_test/gherkin/feature.{feature, given, scenario, then, when} +/// import dream_test/gherkin/steps.{type StepContext, step} +/// import dream_test/gherkin/world.{delete, has, put} +/// import dream_test/matchers.{be_equal, or_fail_with, should, succeed} +/// import dream_test/reporters/bdd +/// import dream_test/reporters/progress +/// import dream_test/runner +/// import gleam/io +/// +/// fn step_store(context: StepContext) { +/// put(context.world, "temp", True) +/// Ok(succeed()) +/// } +/// +/// fn step_delete(context: StepContext) { +/// delete(context.world, "temp") +/// Ok(succeed()) +/// } +/// +/// fn step_is_absent(context: StepContext) { +/// has(context.world, "temp") +/// |> should +/// |> be_equal(False) +/// |> or_fail_with("expected temp to be absent") +/// } +/// +/// pub fn register(registry) { +/// registry +/// |> step("temp is stored", step_store) +/// |> step("temp is deleted", step_delete) +/// |> step("temp should be absent", step_is_absent) +/// } +/// +/// pub fn tests() { +/// let steps = steps.new() |> register() +/// +/// feature("World: has + delete", steps, [ +/// scenario("Deleting a key", [ +/// given("temp is stored"), +/// when("temp is deleted"), +/// then("temp should be absent"), +/// ]), +/// ]) +/// } +/// +/// pub fn main() { +/// runner.new([tests()]) +/// |> runner.progress_reporter(progress.new()) +/// |> runner.results_reporters([bdd.new()]) +/// |> runner.exit_on_failure() +/// |> runner.run() /// } /// ``` /// -pub fn has(world: World, key: String) -> Bool { +pub fn has(world world: World, key key: String) -> Bool { case ets_lookup(world.table, key) { Some(_) -> True None -> False @@ -226,10 +472,61 @@ pub fn has(world: World, key: String) -> Bool { /// ## Example /// /// ```gleam -/// world.delete(world, "temporary_data") +/// import dream_test/gherkin/feature.{feature, given, scenario, then, when} +/// import dream_test/gherkin/steps.{type StepContext, step} +/// import dream_test/gherkin/world.{delete, has, put} +/// import dream_test/matchers.{be_equal, or_fail_with, should, succeed} +/// import dream_test/reporters/bdd +/// import dream_test/reporters/progress +/// import dream_test/runner +/// import gleam/io +/// +/// fn step_store(context: StepContext) { +/// put(context.world, "temp", True) +/// Ok(succeed()) +/// } +/// +/// fn step_delete(context: StepContext) { +/// delete(context.world, "temp") +/// Ok(succeed()) +/// } +/// +/// fn step_is_absent(context: StepContext) { +/// has(context.world, "temp") +/// |> should +/// |> be_equal(False) +/// |> or_fail_with("expected temp to be absent") +/// } +/// +/// pub fn register(registry) { +/// registry +/// |> step("temp is stored", step_store) +/// |> step("temp is deleted", step_delete) +/// |> step("temp should be absent", step_is_absent) +/// } +/// +/// pub fn tests() { +/// let steps = steps.new() |> register() +/// +/// feature("World: has + delete", steps, [ +/// scenario("Deleting a key", [ +/// given("temp is stored"), +/// when("temp is deleted"), +/// then("temp should be absent"), +/// ]), +/// ]) +/// } +/// +/// pub fn main() { +/// runner.new([tests()]) +/// |> runner.progress_reporter(progress.new()) +/// |> runner.results_reporters([bdd.new()]) +/// |> runner.exit_on_failure() +/// |> runner.run() +/// } /// ``` /// -pub fn delete(world: World, key: String) -> Nil { +pub fn delete(world world: World, key key: String) -> Nil { ets_delete(world.table, key) } @@ -245,7 +542,20 @@ pub fn delete(world: World, key: String) -> Nil { /// /// The scenario ID string. /// -pub fn scenario_id(world: World) -> String { +/// ## Example +/// +/// ```gleam +/// import dream_test/gherkin/world +/// +/// pub fn main() { +/// let w = world.new_world("example_scenario") +/// let id = world.scenario_id(w) +/// world.cleanup(w) +/// id +/// } +/// ``` +/// +pub fn scenario_id(world world: World) -> String { world.id } diff --git a/src/dream_test/matchers.gleam b/src/dream_test/matchers.gleam new file mode 100644 index 0000000..d7f13e7 --- /dev/null +++ b/src/dream_test/matchers.gleam @@ -0,0 +1,880 @@ +//// Matcher API for Dream Test. +//// +//// Matchers are small functions you pipe values through to produce the value a +//// Dream Test `it(...)` body returns: `Result(AssertionResult, String)`. +//// +//// You’ll typically: +//// +//// - Start with `should` (wrap a value for matching) +//// - Apply one or more matchers (like `be_equal`, `be_some`, `contain_string`) +//// - Finish with `or_fail_with("...")` to produce the final test result +//// +//// ## Available Matchers +//// +//// | Category | Matchers | +//// |----------------|-------------------------------------------------------------| +//// | **Equality** | `be_equal`, `not_equal` | +//// | **Boolean** | `be_true`, `be_false` | +//// | **Option** | `be_some`, `be_none` | +//// | **Result** | `be_ok`, `be_error` | +//// | **Collections**| `contain`, `not_contain`, `have_length`, `be_empty` | +//// | **Comparison** | `be_greater_than`, `be_less_than`, `be_at_least`, `be_at_most`, `be_between`, `be_in_range`, `be_greater_than_float`, `be_less_than_float` | +//// | **String** | `start_with`, `end_with`, `contain_string`, `match_regex` | +//// | **Snapshot** | `match_snapshot`, `match_snapshot_inspect` | +//// +//// ## Chaining Matchers +//// +//// Some matchers “unwrap” values: +//// - `be_some()` turns `Option(a)` into `a` +//// - `be_ok()` turns `Result(a, e)` into `a` +//// +//// That’s why you can chain checks after them. +//// +//// ## Explicit failures +//// +//// Sometimes you need to explicitly return “pass” or “fail” from a branch of a +//// `case` expression. Use `succeed()` / `fail_with("...")` for that. +//// +//// ## Imports +//// +//// You can import individual matchers, or import the whole module and qualify +//// with `matchers.`. The examples in these docs assume you imported the matcher +//// functions you’re using. + +import dream_test/matchers/boolean +import dream_test/matchers/collection +import dream_test/matchers/comparison +import dream_test/matchers/equality +import dream_test/matchers/option +import dream_test/matchers/result +import dream_test/matchers/snapshot +import dream_test/matchers/string +import dream_test/types.{ + type AssertionResult, type MatchResult, AssertionFailed, AssertionFailure, + AssertionOk, MatchFailed, MatchOk, +} +import gleam/option as gleam_option + +/// Start a matcher chain. +/// +/// This wraps a value so it can be piped through matchers. +/// Every matcher chain starts with `should`. +/// +/// ## Example +/// +/// ```gleam +/// True +/// |> should +/// |> be_true() +/// |> or_fail_with("expected True") +/// ``` +/// +/// ## Parameters +/// +/// - `value`: any value you want to make assertions about +/// +/// ## Returns +/// +/// A `MatchResult(a)` containing `value`. Subsequent matchers will either: +/// - preserve this value (for “checking” matchers), or +/// - transform it (for “unwrapping” matchers like `be_some` / `be_ok`). +pub fn should(value value: a) -> MatchResult(a) { + MatchOk(value) +} + +// ============================================================================= +// Equality Matchers +// ============================================================================= + +/// Assert that a value equals the expected value. +/// +/// Uses Gleam's structural equality (`==`). +/// +/// ## Example +/// +/// ```gleam +/// 2 + 3 +/// |> should +/// |> be_equal(5) +/// |> or_fail_with("2 + 3 should equal 5") +/// ``` +/// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(a)` produced by `should` (or a previous matcher) +/// - `expected`: the value you expect the actual value to equal +/// +/// ## Returns +/// +/// A `MatchResult(a)`: +/// - On success, the original value is preserved for further chaining. +/// - On failure, the chain becomes failed and later matchers are skipped. +pub const be_equal = equality.be_equal + +/// Assert that a value does not equal the unexpected value. +/// +/// ## Example +/// +/// ```gleam +/// 10 + 3 +/// |> should +/// |> not_equal(3) +/// |> or_fail_with("10 + 3 should not equal 3") +/// ``` +/// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(a)` produced by `should` (or a previous matcher) +/// - `unexpected`: the value you expect the actual value to *not* equal +/// +/// ## Returns +/// +/// A `MatchResult(a)`: +/// - On success, the original value is preserved for further chaining. +/// - On failure, the chain becomes failed and later matchers are skipped. +pub const not_equal = equality.not_equal + +// ============================================================================= +// Boolean Matchers +// ============================================================================= + +/// Assert that a value is `True`. +/// +/// ## Example +/// +/// ```gleam +/// True +/// |> should +/// |> be_true() +/// |> or_fail_with("expected True") +/// ``` +/// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(Bool)` produced by `should` (or a previous matcher) +/// +/// ## Returns +/// +/// A `MatchResult(Bool)` preserving the boolean for further chaining. +pub const be_true = boolean.be_true + +/// Assert that a value is `False`. +/// +/// ## Example +/// +/// ```gleam +/// False +/// |> should +/// |> be_false() +/// |> or_fail_with("expected False") +/// ``` +/// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(Bool)` produced by `should` (or a previous matcher) +/// +/// ## Returns +/// +/// A `MatchResult(Bool)` preserving the boolean for further chaining. +pub const be_false = boolean.be_false + +// ============================================================================= +// Option Matchers +// ============================================================================= + +/// Assert that an `Option` is `Some` and extract its value. +/// +/// If the assertion passes, the inner value is passed to subsequent matchers. +/// This enables chaining like `be_some() |> be_equal(42)`. +/// +/// ## Example +/// +/// ```gleam +/// Some(42) +/// |> should +/// |> be_some() +/// |> be_equal(42) +/// |> or_fail_with("expected Some(42)") +/// ``` +/// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(Option(a))` produced by `should` (or a previous matcher) +/// +/// ## Returns +/// +/// A `MatchResult(a)`: +/// - On `Some(value)`, the chain continues with the unwrapped `value`. +/// - On `None`, the chain becomes failed and later matchers are skipped. +pub const be_some = option.be_some + +/// Assert that an `Option` is `None`. +/// +/// ## Example +/// +/// ```gleam +/// None +/// |> should +/// |> be_none() +/// |> or_fail_with("expected None") +/// ``` +/// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(Option(a))` produced by `should` (or a previous matcher) +/// +/// ## Returns +/// +/// A `MatchResult(Nil)` that continues the chain with `Nil` on success. +pub const be_none = option.be_none + +// ============================================================================= +// Result Matchers +// ============================================================================= + +/// Assert that a `Result` is `Ok` and extract its value. +/// +/// If the assertion passes, the `Ok` value is passed to subsequent matchers. +/// This enables chaining like `be_ok() |> be_equal(42)`. +/// +/// ## Example +/// +/// ```gleam +/// Ok("hello") +/// |> should +/// |> be_ok() +/// |> be_equal("hello") +/// |> or_fail_with("expected Ok(\"hello\")") +/// ``` +/// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(Result(a, e))` produced by `should` (or a previous matcher) +/// +/// ## Returns +/// +/// A `MatchResult(a)` containing the unwrapped `Ok` value on success. +pub const be_ok = result.be_ok + +/// Assert that a `Result` is `Error` and extract the error value. +/// +/// If the assertion passes, the error value is passed to subsequent matchers. +/// +/// ## Example +/// +/// ```gleam +/// Error("nope") +/// |> should +/// |> be_error() +/// |> be_equal("nope") +/// |> or_fail_with("expected Error(\"nope\")") +/// ``` +/// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(Result(a, e))` produced by `should` (or a previous matcher) +/// +/// ## Returns +/// +/// A `MatchResult(e)` containing the unwrapped error value on success. +pub const be_error = result.be_error + +// ============================================================================= +// Collection Matchers +// ============================================================================= + +/// Assert that a list contains a specific item. +/// +/// ## Example +/// +/// ```gleam +/// [1, 2, 3] +/// |> should +/// |> contain(2) +/// |> or_fail_with("expected list to contain 2") +/// ``` +/// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(List(a))` produced by `should` (or a previous matcher) +/// - `expected_item`: the item that must be present in the list +/// +/// ## Returns +/// +/// A `MatchResult(List(a))` preserving the list for further chaining. +pub const contain = collection.contain + +/// Assert that a list does not contain a specific item. +/// +/// ## Example +/// +/// ```gleam +/// ["a", "b", "c"] +/// |> should +/// |> not_contain("d") +/// |> or_fail_with("expected list to not contain \"d\"") +/// ``` +/// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(List(a))` produced by `should` (or a previous matcher) +/// - `unexpected_item`: the item that must *not* be present in the list +/// +/// ## Returns +/// +/// A `MatchResult(List(a))` preserving the list for further chaining. +pub const not_contain = collection.not_contain + +/// Assert that a list has a specific length. +/// +/// ## Example +/// +/// ```gleam +/// [1, 2, 3] +/// |> should +/// |> have_length(3) +/// |> or_fail_with("expected list length 3") +/// ``` +/// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(List(a))` produced by `should` (or a previous matcher) +/// - `expected_length`: the exact length the list must have +/// +/// ## Returns +/// +/// A `MatchResult(List(a))` preserving the list for further chaining. +pub const have_length = collection.have_length + +/// Assert that a list is empty. +/// +/// ## Example +/// +/// ```gleam +/// [] +/// |> should +/// |> be_empty() +/// |> or_fail_with("expected empty list") +/// ``` +/// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(List(a))` produced by `should` (or a previous matcher) +/// +/// ## Returns +/// +/// A `MatchResult(List(a))` preserving the list for further chaining. +pub const be_empty = collection.be_empty + +// ============================================================================= +// Comparison Matchers (Int) +// ============================================================================= + +/// Assert that an integer is greater than a threshold. +/// +/// ## Example +/// +/// ```gleam +/// 10 +/// |> should +/// |> be_greater_than(0) +/// |> or_fail_with("expected 10 to be greater than 0") +/// ``` +/// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(Int)` produced by `should` (or a previous matcher) +/// - `threshold`: the value the actual integer must be greater than +/// +/// ## Returns +/// +/// A `MatchResult(Int)` preserving the integer for further chaining. +pub const be_greater_than = comparison.be_greater_than + +/// Assert that an integer is less than a threshold. +/// +/// ## Example +/// +/// ```gleam +/// 10 +/// |> should +/// |> be_less_than(100) +/// |> or_fail_with("expected 10 to be less than 100") +/// ``` +/// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(Int)` produced by `should` (or a previous matcher) +/// - `threshold`: the value the actual integer must be less than +/// +/// ## Returns +/// +/// A `MatchResult(Int)` preserving the integer for further chaining. +pub const be_less_than = comparison.be_less_than + +/// Assert that an integer is at least a minimum value (>=). +/// +/// ## Example +/// +/// ```gleam +/// 10 +/// |> should +/// |> be_at_least(10) +/// |> or_fail_with("expected 10 to be at least 10") +/// ``` +/// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(Int)` produced by `should` (or a previous matcher) +/// - `minimum`: the minimum allowed value (inclusive) +/// +/// ## Returns +/// +/// A `MatchResult(Int)` preserving the integer for further chaining. +pub const be_at_least = comparison.be_at_least + +/// Assert that an integer is at most a maximum value (<=). +/// +/// ## Example +/// +/// ```gleam +/// 10 +/// |> should +/// |> be_at_most(10) +/// |> or_fail_with("expected 10 to be at most 10") +/// ``` +/// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(Int)` produced by `should` (or a previous matcher) +/// - `maximum`: the maximum allowed value (inclusive) +/// +/// ## Returns +/// +/// A `MatchResult(Int)` preserving the integer for further chaining. +pub const be_at_most = comparison.be_at_most + +/// Assert that an integer is between two values (exclusive). +/// +/// The value must be strictly greater than `min` and strictly less than `max`. +/// +/// ## Example +/// +/// ```gleam +/// 5 +/// |> should +/// |> be_between(1, 10) +/// |> or_fail_with("expected 5 to be between 1 and 10") +/// ``` +/// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(Int)` produced by `should` (or a previous matcher) +/// - `min`: lower bound (exclusive) +/// - `max`: upper bound (exclusive) +/// +/// ## Returns +/// +/// A `MatchResult(Int)` preserving the integer for further chaining. +pub const be_between = comparison.be_between + +/// Assert that an integer is within a range (inclusive). +/// +/// The value must be >= `min` and <= `max`. +/// +/// ## Example +/// +/// ```gleam +/// 10 +/// |> should +/// |> be_in_range(0, 100) +/// |> or_fail_with("expected 10 to be in range 0..100") +/// ``` +/// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(Int)` produced by `should` (or a previous matcher) +/// - `min`: lower bound (inclusive) +/// - `max`: upper bound (inclusive) +/// +/// ## Returns +/// +/// A `MatchResult(Int)` preserving the integer for further chaining. +pub const be_in_range = comparison.be_in_range + +/// Assert that a float is greater than a threshold. +/// +/// ## Example +/// +/// ```gleam +/// 0.5 +/// |> should +/// |> be_greater_than_float(0.0) +/// |> or_fail_with("expected 0.5 to be greater than 0.0") +/// ``` +/// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(Float)` produced by `should` (or a previous matcher) +/// - `threshold`: the value the actual float must be greater than +/// +/// ## Returns +/// +/// A `MatchResult(Float)` preserving the float for further chaining. +pub const be_greater_than_float = comparison.be_greater_than_float + +/// Assert that a float is less than a threshold. +/// +/// ## Example +/// +/// ```gleam +/// 0.5 +/// |> should +/// |> be_less_than_float(1.0) +/// |> or_fail_with("expected 0.5 to be less than 1.0") +/// ``` +/// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(Float)` produced by `should` (or a previous matcher) +/// - `threshold`: the value the actual float must be less than +/// +/// ## Returns +/// +/// A `MatchResult(Float)` preserving the float for further chaining. +pub const be_less_than_float = comparison.be_less_than_float + +// ============================================================================= +// String Matchers +// ============================================================================= + +/// Assert that a string starts with a prefix. +/// +/// ## Example +/// +/// ```gleam +/// "hello world" +/// |> should +/// |> start_with("hello") +/// |> or_fail_with("expected string to start with \"hello\"") +/// ``` +/// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(String)` produced by `should` (or a previous matcher) +/// - `prefix`: required starting substring +/// +/// ## Returns +/// +/// A `MatchResult(String)` preserving the string for further chaining. +pub const start_with = string.start_with + +/// Assert that a string ends with a suffix. +/// +/// ## Example +/// +/// ```gleam +/// "hello.gleam" +/// |> should +/// |> end_with(".gleam") +/// |> or_fail_with("expected .gleam suffix") +/// ``` +/// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(String)` produced by `should` (or a previous matcher) +/// - `suffix`: required ending substring +/// +/// ## Returns +/// +/// A `MatchResult(String)` preserving the string for further chaining. +pub const end_with = string.end_with + +/// Assert that a string contains a substring. +/// +/// ## Example +/// +/// ```gleam +/// "hello world" +/// |> should +/// |> contain_string("world") +/// |> or_fail_with("expected substring match") +/// ``` +/// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(String)` produced by `should` (or a previous matcher) +/// - `substring`: required substring that must be present +/// +/// ## Returns +/// +/// A `MatchResult(String)` preserving the string for further chaining. +pub const contain_string = string.contain_string + +/// Assert that a string matches a regular expression. +/// +/// The pattern is compiled using `gleam/regexp.from_string`, and the assertion +/// passes if it matches **anywhere** in the string. Use `^...$` to require a +/// full match. +/// +/// If the pattern is invalid, this matcher fails (with an error message). +/// +/// ## Example +/// +/// ```gleam +/// "user-123" +/// |> should +/// |> match_regex("^user-\\d+$") +/// |> or_fail_with("expected an id like user-123") +/// ``` +pub const match_regex = string.match_regex + +// ============================================================================= +// Snapshot Matchers +// ============================================================================= + +/// Assert that a string matches the content of a snapshot file. +/// +/// - If snapshot **doesn't exist**: creates it and passes +/// - If snapshot **exists and matches**: passes +/// - If snapshot **exists but doesn't match**: fails +/// +/// **To update a snapshot:** delete the file and re-run the test. +/// +/// ## Example +/// +/// ```gleam +/// let path = "./test/tmp/match_snapshot_example.snap" +/// "hello" +/// |> should +/// |> match_snapshot(path) +/// |> or_fail_with("expected snapshot match") +/// ``` +/// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(String)` produced by `should` (or a previous matcher) +/// - `snapshot_path`: file path used to store/compare the snapshot +/// +/// ## Returns +/// +/// A `MatchResult(String)` preserving the string for further chaining. +pub const match_snapshot = snapshot.match_snapshot + +/// Assert that any value matches a snapshot (using string.inspect). +/// +/// Serializes the value using `string.inspect` and compares against +/// the stored snapshot. Useful for testing complex data structures. +/// +/// ## Example +/// +/// ```gleam +/// let path = "./test/tmp/match_snapshot_inspect_example.snap" +/// Some(1) +/// |> should +/// |> match_snapshot_inspect(path) +/// |> or_fail_with("expected inspect snapshot match") +/// ``` +/// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(value)` produced by `should` (or a previous matcher) +/// - `snapshot_path`: file path used to store/compare the snapshot +/// +/// ## Returns +/// +/// A `MatchResult(value)` preserving the unwrapped value for further chaining. +pub const match_snapshot_inspect = snapshot.match_snapshot_inspect + +/// Delete a snapshot file. +/// +/// Use this to force regeneration of a snapshot on the next test run. +/// +/// ## Example +/// +/// ```gleam +/// let path = "./test/tmp/clear_snapshot_example.snap" +/// +/// // Setup: create a snapshot file (no assertions during setup) +/// use _ <- result.try( +/// file.write(path, "hello") |> result.map_error(file.error_to_string), +/// ) +/// +/// clear_snapshot(path) +/// |> should +/// |> be_equal(Ok(Nil)) +/// |> or_fail_with("expected clear_snapshot to succeed") +/// ``` +/// +/// ## Parameters +/// +/// - `snapshot_path`: the file path to delete +/// +/// ## Returns +/// +/// `Result(Nil, String)`: +/// - `Ok(Nil)` if the snapshot was deleted (or didn't exist) +/// - `Error(message)` if deletion failed +pub const clear_snapshot = snapshot.clear_snapshot + +/// Delete all snapshot files in a directory. +/// +/// Deletes all files with the `.snap` extension in the given directory. +/// +/// ## Example +/// +/// ```gleam +/// let directory = "./test/tmp/clear_snapshots_in_directory_example" +/// let a = directory <> "/a.snap" +/// let b = directory <> "/b.snap" +/// +/// // Setup: create two snapshot files (no assertions during setup) +/// use _ <- result.try( +/// file.write(a, "a") |> result.map_error(file.error_to_string), +/// ) +/// use _ <- result.try( +/// file.write(b, "b") |> result.map_error(file.error_to_string), +/// ) +/// +/// clear_snapshots_in_directory(directory) +/// |> should +/// |> be_equal(Ok(2)) +/// |> or_fail_with("expected two deleted snapshots") +/// ``` +/// +/// ## Parameters +/// +/// - `directory`: the directory to scan (non-recursively) +/// +/// ## Returns +/// +/// `Result(Int, String)`: +/// - `Ok(count)` with the number of `.snap` files deleted +/// - `Error(message)` if deletion failed +pub const clear_snapshots_in_directory = snapshot.clear_snapshots_in_directory + +// ============================================================================= +// Terminal Operations +// ============================================================================= + +/// Complete a matcher chain and provide a failure message. +/// +/// This is the **terminal operation** that ends every matcher chain. It +/// converts the `MatchResult` into an `AssertionResult` that the test runner +/// understands. +/// +/// If the matcher passed, returns `AssertionOk`. If it failed, returns +/// `AssertionFailed` with the provided message. +/// +/// ## Example +/// +/// ```gleam +/// 2 + 3 +/// |> should +/// |> be_equal(5) +/// |> or_fail_with("2 + 3 should equal 5") +/// ``` +/// +/// ## Parameters +/// +/// - `result`: the `MatchResult(a)` produced by `should` and matchers +/// - `message`: message to show if the chain failed +/// +/// ## Returns +/// +/// A `Result(AssertionResult, String)` so test bodies can return it directly: +/// +/// - `Ok(AssertionOk)` when the chain passed +/// - `Ok(AssertionFailed(...))` when the chain failed +/// +/// (This function currently never returns `Error`, but the `Result` shape keeps +/// test bodies uniform for `dream_test/unit`: `fn() { ... } -> Result(AssertionResult, String)`.) +/// +/// ## Writing Good Messages +/// +/// Good failure messages explain **what should have happened**: +/// - ✓ "User should be authenticated after login" +/// - ✓ "Cart total should include tax" +/// - ✗ "wrong" +/// - ✗ "failed" +/// +pub fn or_fail_with( + result result: MatchResult(a), + message message: String, +) -> Result(AssertionResult, String) { + Ok(or_fail_with_assertion(result, message)) +} + +fn or_fail_with_assertion( + result: MatchResult(a), + message: String, +) -> AssertionResult { + case result { + MatchOk(_) -> AssertionOk + MatchFailed(failure) -> + AssertionFailed(AssertionFailure(..failure, message: message)) + } +} + +/// Explicitly fail a test with a message. +/// +/// Use this when you need to fail a test in a conditional branch where +/// the normal matcher chain doesn't apply. +/// +/// ## Example +/// +/// ```gleam +/// Ok(case 1 + 1 { +/// 2 -> succeed() +/// _ -> fail_with("expected 1 + 1 to be 2") +/// }) +/// ``` +/// +/// ## When to Use +/// +/// - In `case` branches that represent unexpected states +/// - When testing that something does NOT happen +/// - As a placeholder for unimplemented test branches +/// +/// ## Returns +/// +/// An `AssertionResult` you can wrap in `Ok(...)` from a test body. +/// +/// If you want to abort a test immediately (rather than “failing a matcher”), +/// return `Error("...")` from the test body instead. +/// +pub fn fail_with(message message: String) -> AssertionResult { + AssertionFailed(AssertionFailure( + operator: "fail_with", + message: message, + payload: gleam_option.None, + )) +} + +/// Explicitly mark a matcher chain as successful. +/// +/// Use this when you need to explicitly succeed in a conditional branch, +/// as the counterpart to `fail_with`. +/// +/// ## Example +/// +/// ```gleam +/// Ok(case 1 + 1 { +/// 2 -> succeed() +/// _ -> fail_with("expected 1 + 1 to be 2") +/// }) +/// ``` +/// +/// ## When to Use +/// +/// - In `case` branches where success is the expected outcome +/// - When all branches of a case must return an `AssertionResult` +/// - To make intent explicit rather than relying on implicit success +/// +/// ## Returns +/// +/// `AssertionOk`. +/// +pub fn succeed() -> AssertionResult { + AssertionOk +} diff --git a/src/dream_test/matchers/boolean.gleam b/src/dream_test/matchers/boolean.gleam index 23a2e90..069015a 100644 --- a/src/dream_test/matchers/boolean.gleam +++ b/src/dream_test/matchers/boolean.gleam @@ -1,22 +1,17 @@ //// Boolean matchers for dream_test. //// -//// These matchers check boolean values. -//// They're re-exported through `dream_test/assertions/should`. +//// These matchers check boolean values and are re-exported through +//// `dream_test/matchers`. //// -//// ## Usage +//// Use them in a matcher chain when you want to assert a boolean condition. //// -//// ```gleam -//// import dream_test/assertions/should.{should, be_true, be_false, or_fail_with} +//// ## Example //// -//// is_valid(input) -//// |> should() +//// ```gleam +//// True +//// |> should //// |> be_true() -//// |> or_fail_with("Input should be valid") -//// -//// is_empty(list) -//// |> should() -//// |> be_false() -//// |> or_fail_with("List should not be empty") +//// |> or_fail_with("expected True") //// ``` import dream_test/types.{ @@ -26,16 +21,29 @@ import gleam/option.{Some} /// Assert that a value is `True`. /// +/// Use this when your value is expected to be `True` and you want a useful +/// failure payload when it isn't. +/// /// ## Example /// /// ```gleam -/// is_valid(input) -/// |> should() +/// True +/// |> should /// |> be_true() -/// |> or_fail_with("Input should be valid") +/// |> or_fail_with("expected True") /// ``` /// -pub fn be_true(value_or_result: MatchResult(Bool)) -> MatchResult(Bool) { +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(Bool)` produced by `should` (or a previous matcher) +/// +/// ## Returns +/// +/// A `MatchResult(Bool)` preserving the boolean for further chaining. +/// +pub fn be_true( + value_or_result value_or_result: MatchResult(Bool), +) -> MatchResult(Bool) { case value_or_result { MatchFailed(failure) -> MatchFailed(failure) MatchOk(actual) -> check_is_true(actual) @@ -59,16 +67,29 @@ fn check_is_true(actual: Bool) -> MatchResult(Bool) { /// Assert that a value is `False`. /// +/// Use this when your value is expected to be `False` and you want a useful +/// failure payload when it isn't. +/// /// ## Example /// /// ```gleam -/// is_empty(list) -/// |> should() +/// False +/// |> should /// |> be_false() -/// |> or_fail_with("List should not be empty") +/// |> or_fail_with("expected False") /// ``` /// -pub fn be_false(value_or_result: MatchResult(Bool)) -> MatchResult(Bool) { +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(Bool)` produced by `should` (or a previous matcher) +/// +/// ## Returns +/// +/// A `MatchResult(Bool)` preserving the boolean for further chaining. +/// +pub fn be_false( + value_or_result value_or_result: MatchResult(Bool), +) -> MatchResult(Bool) { case value_or_result { MatchFailed(failure) -> MatchFailed(failure) MatchOk(actual) -> check_is_false(actual) diff --git a/src/dream_test/matchers/collection.gleam b/src/dream_test/matchers/collection.gleam index 31888f1..177c721 100644 --- a/src/dream_test/matchers/collection.gleam +++ b/src/dream_test/matchers/collection.gleam @@ -1,26 +1,18 @@ //// Collection matchers for dream_test. //// -//// These matchers work with lists. -//// They're re-exported through `dream_test/assertions/should`. +//// These matchers work with `List(a)` values and are re-exported through +//// `dream_test/matchers`. //// -//// ## Usage +//// Use them to assert collection properties like length, emptiness, and +//// membership. //// -//// ```gleam -//// import dream_test/assertions/should.{ -//// should, contain, not_contain, have_length, be_empty, or_fail_with, -//// } -//// -//// // Check if list contains an item -//// users -//// |> should() -//// |> contain(alice) -//// |> or_fail_with("Users should include Alice") +//// ## Example //// -//// // Check list length -//// get_results() -//// |> should() +//// ```gleam +//// [1, 2, 3] +//// |> should //// |> have_length(3) -//// |> or_fail_with("Should have 3 results") +//// |> or_fail_with("expected list length 3") //// ``` import dream_test/types.{ @@ -33,18 +25,32 @@ import gleam/string /// Assert that a list contains a specific item. /// +/// Use this when you want to assert membership while preserving the original +/// list for further checks. +/// /// ## Example /// /// ```gleam /// [1, 2, 3] -/// |> should() +/// |> should /// |> contain(2) -/// |> or_fail_with("List should contain 2") +/// |> or_fail_with("expected list to contain 2") /// ``` /// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(List(a))` produced by `should` (or a previous matcher) +/// - `expected_item`: the item that must be present in the list +/// +/// ## Returns +/// +/// A `MatchResult(List(a))`: +/// - On success, preserves the list for further chaining. +/// - On failure, the chain becomes failed and later matchers are skipped. +/// pub fn contain( - value_or_result: MatchResult(List(a)), - expected_item: a, + value_or_result value_or_result: MatchResult(List(a)), + expected_item expected_item: a, ) -> MatchResult(List(a)) { case value_or_result { MatchFailed(failure) -> MatchFailed(failure) @@ -77,18 +83,32 @@ fn check_contains( /// Assert that a list does not contain a specific item. /// +/// Use this when you want to assert absence while preserving the original list +/// for further checks. +/// /// ## Example /// /// ```gleam /// ["a", "b", "c"] -/// |> should() +/// |> should /// |> not_contain("d") -/// |> or_fail_with("List should not contain 'd'") +/// |> or_fail_with("expected list to not contain \"d\"") /// ``` /// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(List(a))` produced by `should` (or a previous matcher) +/// - `unexpected_item`: the item that must *not* be present in the list +/// +/// ## Returns +/// +/// A `MatchResult(List(a))`: +/// - On success, preserves the list for further chaining. +/// - On failure, the chain becomes failed and later matchers are skipped. +/// pub fn not_contain( - value_or_result: MatchResult(List(a)), - unexpected_item: a, + value_or_result value_or_result: MatchResult(List(a)), + unexpected_item unexpected_item: a, ) -> MatchResult(List(a)) { case value_or_result { MatchFailed(failure) -> MatchFailed(failure) @@ -121,18 +141,32 @@ fn check_not_contains( /// Assert that a list has a specific length. /// +/// Use this when you need to assert exact list length while preserving the list +/// for further checks. +/// /// ## Example /// /// ```gleam -/// get_users() -/// |> should() +/// [1, 2, 3] +/// |> should /// |> have_length(3) -/// |> or_fail_with("Should have 3 users") +/// |> or_fail_with("expected list length 3") /// ``` /// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(List(a))` produced by `should` (or a previous matcher) +/// - `expected_length`: the exact length the list must have +/// +/// ## Returns +/// +/// A `MatchResult(List(a))`: +/// - On success, preserves the list for further chaining. +/// - On failure, the chain becomes failed and later matchers are skipped. +/// pub fn have_length( - value_or_result: MatchResult(List(a)), - expected_length: Int, + value_or_result value_or_result: MatchResult(List(a)), + expected_length expected_length: Int, ) -> MatchResult(List(a)) { case value_or_result { MatchFailed(failure) -> MatchFailed(failure) @@ -167,16 +201,31 @@ fn check_length( /// Assert that a list is empty. /// +/// Use this when you want to assert there are no values while preserving the +/// list for further checks. +/// /// ## Example /// /// ```gleam -/// get_errors() -/// |> should() +/// [] +/// |> should /// |> be_empty() -/// |> or_fail_with("Should have no errors") +/// |> or_fail_with("expected empty list") /// ``` /// -pub fn be_empty(value_or_result: MatchResult(List(a))) -> MatchResult(List(a)) { +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(List(a))` produced by `should` (or a previous matcher) +/// +/// ## Returns +/// +/// A `MatchResult(List(a))`: +/// - On success, preserves the list for further chaining. +/// - On failure, the chain becomes failed and later matchers are skipped. +/// +pub fn be_empty( + value_or_result value_or_result: MatchResult(List(a)), +) -> MatchResult(List(a)) { case value_or_result { MatchFailed(failure) -> MatchFailed(failure) MatchOk(actual_list) -> check_is_empty(actual_list) diff --git a/src/dream_test/matchers/comparison.gleam b/src/dream_test/matchers/comparison.gleam index 7fbc24c..cbcb95e 100644 --- a/src/dream_test/matchers/comparison.gleam +++ b/src/dream_test/matchers/comparison.gleam @@ -1,34 +1,18 @@ //// Comparison matchers for dream_test. //// -//// These matchers compare numeric values. -//// They're re-exported through `dream_test/assertions/should`. +//// These matchers compare numeric values and are re-exported through +//// `dream_test/matchers`. //// -//// ## Integer Matchers +//// Use them to assert ordering relationships (greater-than, less-than, in a +//// range, etc.) while preserving the numeric value for further chaining. //// -//// ```gleam -//// import dream_test/assertions/should.{ -//// should, be_greater_than, be_less_than, be_at_least, -//// be_at_most, be_between, be_in_range, or_fail_with, -//// } -//// -//// count -//// |> should() -//// |> be_greater_than(0) -//// |> or_fail_with("Count should be positive") -//// -//// score -//// |> should() -//// |> be_in_range(0, 100) -//// |> or_fail_with("Score should be 0-100") -//// ``` -//// -//// ## Float Matchers +//// ## Example //// //// ```gleam -//// average -//// |> should() -//// |> be_greater_than_float(0.0) -//// |> or_fail_with("Average should be positive") +//// 10 +//// |> should +//// |> be_greater_than(0) +//// |> or_fail_with("expected 10 to be greater than 0") //// ``` import dream_test/types.{ @@ -40,18 +24,27 @@ import gleam/option.{Some} /// Assert that an integer is greater than a threshold. /// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(Int)` produced by `should` (or a previous matcher) +/// - `threshold`: the value the actual integer must be greater than +/// +/// ## Returns +/// +/// A `MatchResult(Int)` preserving the integer for further chaining. +/// /// ## Example /// /// ```gleam -/// count_items() -/// |> should() +/// 10 +/// |> should /// |> be_greater_than(0) -/// |> or_fail_with("Should have at least one item") +/// |> or_fail_with("expected 10 to be greater than 0") /// ``` /// pub fn be_greater_than( - value_or_result: MatchResult(Int), - threshold: Int, + value_or_result value_or_result: MatchResult(Int), + threshold threshold: Int, ) -> MatchResult(Int) { case value_or_result { MatchFailed(failure) -> MatchFailed(failure) @@ -81,18 +74,27 @@ fn check_greater_than(actual: Int, threshold: Int) -> MatchResult(Int) { /// Assert that an integer is less than a threshold. /// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(Int)` produced by `should` (or a previous matcher) +/// - `threshold`: the value the actual integer must be less than +/// +/// ## Returns +/// +/// A `MatchResult(Int)` preserving the integer for further chaining. +/// /// ## Example /// /// ```gleam -/// response_time_ms -/// |> should() +/// 10 +/// |> should /// |> be_less_than(100) -/// |> or_fail_with("Response should be under 100ms") +/// |> or_fail_with("expected 10 to be less than 100") /// ``` /// pub fn be_less_than( - value_or_result: MatchResult(Int), - threshold: Int, + value_or_result value_or_result: MatchResult(Int), + threshold threshold: Int, ) -> MatchResult(Int) { case value_or_result { MatchFailed(failure) -> MatchFailed(failure) @@ -122,18 +124,27 @@ fn check_less_than(actual: Int, threshold: Int) -> MatchResult(Int) { /// Assert that an integer is at least a minimum value (>=). /// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(Int)` produced by `should` (or a previous matcher) +/// - `minimum`: the minimum allowed value (inclusive) +/// +/// ## Returns +/// +/// A `MatchResult(Int)` preserving the integer for further chaining. +/// /// ## Example /// /// ```gleam -/// user.age -/// |> should() -/// |> be_at_least(18) -/// |> or_fail_with("User must be at least 18") +/// 10 +/// |> should +/// |> be_at_least(10) +/// |> or_fail_with("expected 10 to be at least 10") /// ``` /// pub fn be_at_least( - value_or_result: MatchResult(Int), - minimum: Int, + value_or_result value_or_result: MatchResult(Int), + minimum minimum: Int, ) -> MatchResult(Int) { case value_or_result { MatchFailed(failure) -> MatchFailed(failure) @@ -163,18 +174,27 @@ fn check_at_least(actual: Int, minimum: Int) -> MatchResult(Int) { /// Assert that an integer is at most a maximum value (<=). /// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(Int)` produced by `should` (or a previous matcher) +/// - `maximum`: the maximum allowed value (inclusive) +/// +/// ## Returns +/// +/// A `MatchResult(Int)` preserving the integer for further chaining. +/// /// ## Example /// /// ```gleam -/// password.length -/// |> should() -/// |> be_at_most(128) -/// |> or_fail_with("Password must be at most 128 characters") +/// 10 +/// |> should +/// |> be_at_most(10) +/// |> or_fail_with("expected 10 to be at most 10") /// ``` /// pub fn be_at_most( - value_or_result: MatchResult(Int), - maximum: Int, + value_or_result value_or_result: MatchResult(Int), + maximum maximum: Int, ) -> MatchResult(Int) { case value_or_result { MatchFailed(failure) -> MatchFailed(failure) @@ -206,19 +226,29 @@ fn check_at_most(actual: Int, maximum: Int) -> MatchResult(Int) { /// /// The value must be strictly greater than `min` and strictly less than `max`. /// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(Int)` produced by `should` (or a previous matcher) +/// - `min`: lower bound (exclusive) +/// - `max`: upper bound (exclusive) +/// +/// ## Returns +/// +/// A `MatchResult(Int)` preserving the integer for further chaining. +/// /// ## Example /// /// ```gleam -/// port -/// |> should() -/// |> be_between(1024, 65535) -/// |> or_fail_with("Port must be between 1024 and 65535") +/// 5 +/// |> should +/// |> be_between(1, 10) +/// |> or_fail_with("expected 5 to be between 1 and 10") /// ``` /// pub fn be_between( - value_or_result: MatchResult(Int), - min: Int, - max: Int, + value_or_result value_or_result: MatchResult(Int), + min min: Int, + max max: Int, ) -> MatchResult(Int) { case value_or_result { MatchFailed(failure) -> MatchFailed(failure) @@ -250,19 +280,29 @@ fn check_between(actual: Int, min: Int, max: Int) -> MatchResult(Int) { /// /// The value must be >= `min` and <= `max`. /// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(Int)` produced by `should` (or a previous matcher) +/// - `min`: lower bound (inclusive) +/// - `max`: upper bound (inclusive) +/// +/// ## Returns +/// +/// A `MatchResult(Int)` preserving the integer for further chaining. +/// /// ## Example /// /// ```gleam -/// score -/// |> should() +/// 10 +/// |> should /// |> be_in_range(0, 100) -/// |> or_fail_with("Score must be 0-100") +/// |> or_fail_with("expected 10 to be in range 0..100") /// ``` /// pub fn be_in_range( - value_or_result: MatchResult(Int), - min: Int, - max: Int, + value_or_result value_or_result: MatchResult(Int), + min min: Int, + max max: Int, ) -> MatchResult(Int) { case value_or_result { MatchFailed(failure) -> MatchFailed(failure) @@ -292,18 +332,27 @@ fn check_in_range(actual: Int, min: Int, max: Int) -> MatchResult(Int) { /// Assert that a float is greater than a threshold. /// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(Float)` produced by `should` (or a previous matcher) +/// - `threshold`: the value the actual float must be greater than +/// +/// ## Returns +/// +/// A `MatchResult(Float)` preserving the float for further chaining. +/// /// ## Example /// /// ```gleam -/// average -/// |> should() +/// 0.5 +/// |> should /// |> be_greater_than_float(0.0) -/// |> or_fail_with("Average should be positive") +/// |> or_fail_with("expected 0.5 to be greater than 0.0") /// ``` /// pub fn be_greater_than_float( - value_or_result: MatchResult(Float), - threshold: Float, + value_or_result value_or_result: MatchResult(Float), + threshold threshold: Float, ) -> MatchResult(Float) { case value_or_result { MatchFailed(failure) -> MatchFailed(failure) @@ -336,18 +385,27 @@ fn check_greater_than_float( /// Assert that a float is less than a threshold. /// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(Float)` produced by `should` (or a previous matcher) +/// - `threshold`: the value the actual float must be less than +/// +/// ## Returns +/// +/// A `MatchResult(Float)` preserving the float for further chaining. +/// /// ## Example /// /// ```gleam -/// error_rate -/// |> should() -/// |> be_less_than_float(0.01) -/// |> or_fail_with("Error rate should be under 1%") +/// 0.5 +/// |> should +/// |> be_less_than_float(1.0) +/// |> or_fail_with("expected 0.5 to be less than 1.0") /// ``` /// pub fn be_less_than_float( - value_or_result: MatchResult(Float), - threshold: Float, + value_or_result value_or_result: MatchResult(Float), + threshold threshold: Float, ) -> MatchResult(Float) { case value_or_result { MatchFailed(failure) -> MatchFailed(failure) diff --git a/src/dream_test/matchers/equality.gleam b/src/dream_test/matchers/equality.gleam index cd62334..18e3d3e 100644 --- a/src/dream_test/matchers/equality.gleam +++ b/src/dream_test/matchers/equality.gleam @@ -1,24 +1,15 @@ //// Equality matchers for dream_test. //// -//// These matchers compare values using Gleam's structural equality. -//// They're re-exported through `dream_test/assertions/should`. +//// These matchers compare values using Gleam's structural equality and are +//// re-exported through `dream_test/matchers`. //// -//// ## Usage +//// ## Example //// //// ```gleam -//// import dream_test/assertions/should.{should, equal, not_equal, or_fail_with} -//// -//// // Check equality -//// result -//// |> should() -//// |> equal(42) -//// |> or_fail_with("Should be 42") -//// -//// // Check inequality -//// result -//// |> should() -//// |> not_equal(0) -//// |> or_fail_with("Should not be zero") +//// 2 + 3 +//// |> should +//// |> be_equal(5) +//// |> or_fail_with("2 + 3 should equal 5") //// ``` import dream_test/types.{ @@ -35,13 +26,27 @@ import gleam/string /// ## Example /// /// ```gleam -/// add(2, 3) -/// |> should() -/// |> equal(5) +/// 2 + 3 +/// |> should +/// |> be_equal(5) /// |> or_fail_with("2 + 3 should equal 5") /// ``` /// -pub fn equal(value_or_result: MatchResult(a), expected: a) -> MatchResult(a) { +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(a)` produced by `should` (or a previous matcher) +/// - `expected`: the value you expect the actual value to equal +/// +/// ## Returns +/// +/// A `MatchResult(a)`: +/// - On success, preserves the original value for further chaining. +/// - On failure, the chain becomes failed and later matchers are skipped. +/// +pub fn be_equal( + value_or_result value_or_result: MatchResult(a), + expected expected: a, +) -> MatchResult(a) { case value_or_result { MatchFailed(failure) -> MatchFailed(failure) MatchOk(actual) -> check_equal(actual, expected) @@ -59,7 +64,7 @@ fn check_equal(actual: a, expected: a) -> MatchResult(a) { ) MatchFailed(AssertionFailure( - operator: "equal", + operator: "be_equal", message: "", payload: Some(payload), )) @@ -74,15 +79,26 @@ fn check_equal(actual: a, expected: a) -> MatchResult(a) { /// ## Example /// /// ```gleam -/// divide(10, 3) -/// |> should() +/// 10 + 3 +/// |> should /// |> not_equal(3) -/// |> or_fail_with("10/3 should not equal 3 exactly") +/// |> or_fail_with("10 + 3 should not equal 3") /// ``` /// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(a)` produced by `should` (or a previous matcher) +/// - `unexpected`: the value you expect the actual value to *not* equal +/// +/// ## Returns +/// +/// A `MatchResult(a)`: +/// - On success, preserves the original value for further chaining. +/// - On failure, the chain becomes failed and later matchers are skipped. +/// pub fn not_equal( - value_or_result: MatchResult(a), - unexpected: a, + value_or_result value_or_result: MatchResult(a), + unexpected unexpected: a, ) -> MatchResult(a) { case value_or_result { MatchFailed(failure) -> MatchFailed(failure) diff --git a/src/dream_test/matchers/option.gleam b/src/dream_test/matchers/option.gleam index 52f944e..b062ad6 100644 --- a/src/dream_test/matchers/option.gleam +++ b/src/dream_test/matchers/option.gleam @@ -1,22 +1,19 @@ //// Option matchers for dream_test. //// -//// These matchers work with `Option(a)` values and support chaining. -//// They're re-exported through `dream_test/assertions/should`. +//// These matchers work with `Option(a)` values and are re-exported through +//// `dream_test/matchers`. //// -//// ## Chaining +//// `be_some()` unwraps `Some(value)` so you can keep matching on the inner +//// value. `be_none()` asserts the option is empty. //// -//// The `be_some` matcher extracts the inner value, allowing you to chain -//// additional matchers: +//// ## Example //// //// ```gleam -//// import dream_test/assertions/should.{should, be_some, equal, or_fail_with} -//// -//// // Check that it's Some, then check the inner value -//// find_user(id) -//// |> should() +//// Some(42) +//// |> should //// |> be_some() -//// |> equal(expected_user) -//// |> or_fail_with("Should find the expected user") +//// |> be_equal(42) +//// |> or_fail_with("expected Some(42)") //// ``` import dream_test/types.{ @@ -28,27 +25,31 @@ import gleam/string /// Assert that an `Option` is `Some` and extract its value. /// /// If the assertion passes, the inner value is passed to subsequent matchers. +/// This enables chaining like `be_some() |> be_equal(42)`. /// /// ## Example /// /// ```gleam -/// find_user(id) -/// |> should() +/// Some(42) +/// |> should /// |> be_some() -/// |> or_fail_with("User should exist") +/// |> be_equal(42) +/// |> or_fail_with("expected Some(42)") /// ``` /// -/// ## Chaining +/// ## Parameters /// -/// ```gleam -/// Some(42) -/// |> should() -/// |> be_some() -/// |> equal(42) -/// |> or_fail_with("Should be Some(42)") -/// ``` +/// - `value_or_result`: the `MatchResult(Option(a))` produced by `should` (or a previous matcher) /// -pub fn be_some(value_or_result: MatchResult(Option(a))) -> MatchResult(a) { +/// ## Returns +/// +/// A `MatchResult(a)`: +/// - On `Some(value)`, the chain continues with the unwrapped `value`. +/// - On `None`, the chain becomes failed and later matchers are skipped. +/// +pub fn be_some( + value_or_result value_or_result: MatchResult(Option(a)), +) -> MatchResult(a) { case value_or_result { MatchFailed(failure) -> MatchFailed(failure) MatchOk(actual) -> check_is_some(actual) @@ -75,13 +76,23 @@ fn check_is_some(actual: Option(a)) -> MatchResult(a) { /// ## Example /// /// ```gleam -/// find_deleted_user(id) -/// |> should() +/// None +/// |> should /// |> be_none() -/// |> or_fail_with("Deleted user should not exist") +/// |> or_fail_with("expected None") /// ``` /// -pub fn be_none(value_or_result: MatchResult(Option(a))) -> MatchResult(Nil) { +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(Option(a))` produced by `should` (or a previous matcher) +/// +/// ## Returns +/// +/// A `MatchResult(Nil)` that continues the chain with `Nil` on success. +/// +pub fn be_none( + value_or_result value_or_result: MatchResult(Option(a)), +) -> MatchResult(Nil) { case value_or_result { MatchFailed(failure) -> MatchFailed(failure) MatchOk(actual) -> check_is_none(actual) diff --git a/src/dream_test/matchers/result.gleam b/src/dream_test/matchers/result.gleam index e88f692..072676a 100644 --- a/src/dream_test/matchers/result.gleam +++ b/src/dream_test/matchers/result.gleam @@ -1,29 +1,19 @@ //// Result matchers for dream_test. //// -//// These matchers work with `Result(a, e)` values and support chaining. -//// They're re-exported through `dream_test/assertions/should`. +//// These matchers work with `Result(a, e)` values and are re-exported through +//// `dream_test/matchers`. //// -//// ## Chaining +//// `be_ok()` unwraps `Ok(value)` so you can keep matching on the inner value. +//// `be_error()` unwraps `Error(value)` so you can match on the error value. //// -//// Both `be_ok` and `be_error` extract their inner values, allowing you to -//// chain additional matchers: +//// ## Example //// //// ```gleam -//// import dream_test/assertions/should.{should, be_ok, be_error, equal, or_fail_with} -//// -//// // Check that it's Ok, then check the inner value -//// parse_int("42") -//// |> should() +//// Ok("hello") +//// |> should //// |> be_ok() -//// |> equal(42) -//// |> or_fail_with("Should parse to 42") -//// -//// // Check that it's Error, then check the error value -//// validate(invalid_input) -//// |> should() -//// |> be_error() -//// |> equal(ValidationError("email required")) -//// |> or_fail_with("Should fail with email error") +//// |> be_equal("hello") +//// |> or_fail_with("expected Ok(\"hello\")") //// ``` import dream_test/types.{ @@ -35,27 +25,31 @@ import gleam/string /// Assert that a `Result` is `Ok` and extract its value. /// /// If the assertion passes, the `Ok` value is passed to subsequent matchers. +/// This enables chaining like `be_ok() |> be_equal("...")`. /// /// ## Example /// /// ```gleam -/// parse_int("42") -/// |> should() +/// Ok("hello") +/// |> should /// |> be_ok() -/// |> or_fail_with("Should parse successfully") +/// |> be_equal("hello") +/// |> or_fail_with("expected Ok(\"hello\")") /// ``` /// -/// ## Chaining +/// ## Parameters /// -/// ```gleam -/// Ok("hello") -/// |> should() -/// |> be_ok() -/// |> equal("hello") -/// |> or_fail_with("Should be Ok with 'hello'") -/// ``` +/// - `value_or_result`: the `MatchResult(Result(a, e))` produced by `should` (or a previous matcher) /// -pub fn be_ok(value_or_result: MatchResult(Result(a, e))) -> MatchResult(a) { +/// ## Returns +/// +/// A `MatchResult(a)`: +/// - On `Ok(value)`, the chain continues with the unwrapped `value`. +/// - On `Error(_)`, the chain becomes failed and later matchers are skipped. +/// +pub fn be_ok( + value_or_result value_or_result: MatchResult(Result(a, e)), +) -> MatchResult(a) { case value_or_result { MatchFailed(failure) -> MatchFailed(failure) MatchOk(actual) -> check_is_ok(actual) @@ -84,27 +78,31 @@ fn check_is_ok(actual: Result(a, e)) -> MatchResult(a) { /// Assert that a `Result` is `Error` and extract the error value. /// /// If the assertion passes, the error value is passed to subsequent matchers. +/// This enables chaining like `be_error() |> be_equal("...")`. /// /// ## Example /// /// ```gleam -/// parse_int("not a number") -/// |> should() +/// Error("nope") +/// |> should /// |> be_error() -/// |> or_fail_with("Should fail to parse") +/// |> be_equal("nope") +/// |> or_fail_with("expected Error(\"nope\")") /// ``` /// -/// ## Chaining +/// ## Parameters /// -/// ```gleam -/// Error("invalid") -/// |> should() -/// |> be_error() -/// |> equal("invalid") -/// |> or_fail_with("Should be Error with 'invalid'") -/// ``` +/// - `value_or_result`: the `MatchResult(Result(a, e))` produced by `should` (or a previous matcher) +/// +/// ## Returns +/// +/// A `MatchResult(e)`: +/// - On `Error(value)`, the chain continues with the unwrapped error `value`. +/// - On `Ok(_)`, the chain becomes failed and later matchers are skipped. /// -pub fn be_error(value_or_result: MatchResult(Result(a, e))) -> MatchResult(e) { +pub fn be_error( + value_or_result value_or_result: MatchResult(Result(a, e)), +) -> MatchResult(e) { case value_or_result { MatchFailed(failure) -> MatchFailed(failure) MatchOk(actual) -> check_is_error(actual) diff --git a/src/dream_test/matchers/snapshot.gleam b/src/dream_test/matchers/snapshot.gleam index c71a653..a05327a 100644 --- a/src/dream_test/matchers/snapshot.gleam +++ b/src/dream_test/matchers/snapshot.gleam @@ -1,96 +1,28 @@ //// Snapshot matchers for dream_test. //// -//// Snapshot testing compares a value against a stored "golden" file. -//// On first run, the snapshot is created automatically. On subsequent runs, -//// the value is compared against the stored snapshot—any difference is a failure. +//// Snapshot testing compares output against a stored “golden” file. //// -//// ## Why Snapshot Testing? +//// - On first run (missing snapshot), the snapshot is created and the test passes. +//// - On later runs, the output is compared against the file; differences fail. //// -//// Snapshot tests excel at detecting **unintended changes** in complex outputs: +//// Snapshot tests are useful when the output is large or awkward to specify by +//// hand (rendered HTML, JSON, error messages, logs, etc.). //// -//// - Rendered HTML/JSON/XML -//// - Error messages and logs -//// - Serialized data structures -//// - Any output where "expected" is hard to specify manually -//// -//// ## Basic Usage -//// -//// ```gleam -//// import dream_test/assertions/should.{should, match_snapshot, or_fail_with} -//// -//// it("renders user profile", fn() { -//// render_profile(user) -//// |> should() -//// |> match_snapshot("./test/snapshots/user_profile.snap") -//// |> or_fail_with("Profile should match snapshot") -//// }) -//// ``` -//// -//// ## How It Works -//// -//// | Scenario | Behavior | -//// |-----------------------|-------------------------------------------| -//// | Snapshot missing | Creates it, test **passes** | -//// | Snapshot matches | Test **passes** | -//// | Snapshot differs | Test **fails** with diff | -//// -//// ## Updating Snapshots -//// -//// When you intentionally change output, update snapshots by deleting them: -//// -//// ```bash -//// # Update one snapshot -//// rm ./test/snapshots/user_profile.snap -//// gleam test -//// -//// # Update all snapshots in a directory -//// rm ./test/snapshots/*.snap -//// gleam test -//// ``` -//// -//// Or use the helper functions: +//// ## Example //// //// ```gleam -//// // In a setup script or before tests -//// let _ = snapshot.clear_snapshot("./test/snapshots/user_profile.snap") -//// let _ = snapshot.clear_snapshots_in_directory("./test/snapshots") +//// let path = "./test/tmp/match_snapshot_example.snap" +//// "hello" +//// |> should +//// |> match_snapshot(path) +//// |> or_fail_with("expected snapshot match") //// ``` -//// -//// ## Snapshot File Organization -//// -//// Recommended structure: -//// -//// ```text -//// test/ -//// ├── snapshots/ -//// │ ├── api/ -//// │ │ ├── users_list.snap -//// │ │ └── user_detail.snap -//// │ └── components/ -//// │ ├── header.snap -//// │ └── footer.snap -//// └── my_test.gleam -//// ``` -//// -//// Use descriptive paths that mirror your test structure. -//// -//// ## Testing Non-Strings -//// -//// For complex data structures, use `match_snapshot_inspect`: -//// -//// ```gleam -//// build_complex_result() -//// |> should() -//// |> match_snapshot_inspect("./test/snapshots/complex.snap") -//// |> or_fail_with("Result should match snapshot") -//// ``` -//// -//// This uses Gleam's `string.inspect` to serialize the value. import dream_test/file import dream_test/types.{ type MatchResult, AssertionFailure, MatchFailed, MatchOk, SnapshotFailure, } +import gleam/list import gleam/option.{Some} import gleam/string @@ -114,55 +46,25 @@ import gleam/string /// - `value_or_result` - The `MatchResult(String)` from the assertion chain /// - `snapshot_path` - Path to the snapshot file (will be created if missing) /// -/// ## Examples -/// -/// ### Basic Usage +/// ## Example /// /// ```gleam -/// it("serializes user to JSON", fn() { -/// user_to_json(sample_user) -/// |> should() -/// |> match_snapshot("./test/snapshots/user.json") -/// |> or_fail_with("User JSON should match snapshot") -/// }) +/// let path = "./test/tmp/match_snapshot_example.snap" +/// "hello" +/// |> should +/// |> match_snapshot(path) +/// |> or_fail_with("expected snapshot match") /// ``` /// -/// ### With Transformation -/// -/// ```gleam -/// it("renders HTML correctly", fn() { -/// render_page(data) -/// |> string.trim() // Normalize whitespace -/// |> should() -/// |> match_snapshot("./test/snapshots/page.html") -/// |> or_fail_with("Page HTML should match snapshot") -/// }) -/// ``` -/// -/// ### Error Handling -/// -/// ```gleam -/// it("handles parse errors gracefully", fn() { -/// case parse(invalid_input) { -/// Ok(_) -> fail("Should have failed") -/// Error(msg) -> -/// msg -/// |> should() -/// |> match_snapshot("./test/snapshots/parse_error.snap") -/// |> or_fail_with("Error message should match snapshot") -/// } -/// }) -/// ``` -/// -/// ## Updating the Snapshot +/// ## Returns /// -/// ```bash -/// rm ./test/snapshots/user.json && gleam test -/// ``` +/// A `MatchResult(String)`: +/// - On success, preserves the original string for further chaining. +/// - On failure, the chain becomes failed and later matchers are skipped. /// pub fn match_snapshot( - value_or_result: MatchResult(String), - snapshot_path: String, + value_or_result value_or_result: MatchResult(String), + snapshot_path snapshot_path: String, ) -> MatchResult(String) { case value_or_result { MatchFailed(failure) -> MatchFailed(failure) @@ -187,52 +89,25 @@ pub fn match_snapshot( /// - `value_or_result` - The `MatchResult(value)` from the assertion chain /// - `snapshot_path` - Path to the snapshot file /// -/// ## Examples -/// -/// ### Testing a Record +/// ## Example /// /// ```gleam -/// it("parses config correctly", fn() { -/// parse_config(raw_toml) -/// |> should() -/// |> match_snapshot_inspect("./test/snapshots/config.snap") -/// |> or_fail_with("Parsed config should match snapshot") -/// }) -/// ``` -/// -/// ### Testing a List -/// -/// ```gleam -/// it("filters users correctly", fn() { -/// users -/// |> list.filter(is_active) -/// |> should() -/// |> match_snapshot_inspect("./test/snapshots/active_users.snap") -/// |> or_fail_with("Active users should match snapshot") -/// }) -/// ``` -/// -/// ## Snapshot Format -/// -/// The snapshot will contain the Gleam debug representation: -/// -/// ```text -/// User(name: "Alice", age: 30, active: True) -/// ``` -/// -/// ```text -/// [User(name: "Alice", age: 30), User(name: "Bob", age: 25)] +/// let path = "./test/tmp/match_snapshot_inspect_example.snap" +/// Some(1) +/// |> should +/// |> match_snapshot_inspect(path) +/// |> or_fail_with("expected inspect snapshot match") /// ``` /// -/// ## Note on Stability +/// ## Returns /// -/// The `string.inspect` output may change between Gleam versions. -/// If you need stable serialization, convert to JSON or another format -/// and use `match_snapshot` instead. +/// A `MatchResult(value)`: +/// - On success, preserves the original value for further chaining. +/// - On failure, the chain becomes failed and later matchers are skipped. /// pub fn match_snapshot_inspect( - value_or_result: MatchResult(value), - snapshot_path: String, + value_or_result value_or_result: MatchResult(value), + snapshot_path snapshot_path: String, ) -> MatchResult(value) { case value_or_result { MatchFailed(failure) -> MatchFailed(failure) @@ -258,35 +133,20 @@ pub fn match_snapshot_inspect( /// - `Ok(Nil)` - Snapshot was deleted (or didn't exist) /// - `Error(String)` - Human-readable error message /// -/// ## Examples -/// -/// ### Clear Before Test +/// ## Example /// /// ```gleam -/// // In a test setup -/// let _ = snapshot.clear_snapshot("./test/snapshots/user.snap") -/// // Next test will create a fresh snapshot -/// ``` +/// let path = "./test/tmp/clear_snapshot_example.snap" /// -/// ### Conditional Update +/// // Setup: create a snapshot file (no assertions during setup) +/// use _ <- result.try( +/// file.write(path, "hello") |> result.map_error(file.error_to_string), +/// ) /// -/// ```gleam -/// case env.get("UPDATE_SNAPSHOTS") { -/// Ok("true") -> { -/// let _ = snapshot.clear_snapshot("./test/snapshots/output.snap") -/// } -/// _ -> Nil -/// } -/// ``` -/// -/// ## Idempotent Behavior -/// -/// This function succeeds even if the file doesn't exist: -/// -/// ```gleam -/// // Both calls succeed -/// let _ = snapshot.clear_snapshot("./test/snapshots/new.snap") -/// let _ = snapshot.clear_snapshot("./test/snapshots/new.snap") +/// clear_snapshot(path) +/// |> should +/// |> be_equal(Ok(Nil)) +/// |> or_fail_with("expected clear_snapshot to succeed") /// ``` /// pub fn clear_snapshot(snapshot_path: String) -> Result(Nil, String) { @@ -310,31 +170,27 @@ pub fn clear_snapshot(snapshot_path: String) -> Result(Nil, String) { /// - `Ok(Int)` - Number of snapshot files deleted /// - `Error(String)` - Human-readable error message /// -/// ## Examples -/// -/// ### Clear All Snapshots -/// -/// ```gleam -/// case snapshot.clear_snapshots_in_directory("./test/snapshots") { -/// Ok(0) -> io.println("No snapshots to clear") -/// Ok(n) -> io.println("Cleared " <> int.to_string(n) <> " snapshots") -/// Error(msg) -> io.println("Error: " <> msg) -/// } -/// ``` -/// -/// ### Clear Subdirectory +/// ## Example /// /// ```gleam -/// // Clear only API snapshots -/// let _ = snapshot.clear_snapshots_in_directory("./test/snapshots/api") +/// let directory = "./test/tmp/clear_snapshots_in_directory_example" +/// let a = directory <> "/a.snap" +/// let b = directory <> "/b.snap" +/// +/// // Setup: create two snapshot files (no assertions during setup) +/// use _ <- result.try( +/// file.write(a, "a") |> result.map_error(file.error_to_string), +/// ) +/// use _ <- result.try( +/// file.write(b, "b") |> result.map_error(file.error_to_string), +/// ) +/// +/// clear_snapshots_in_directory(directory) +/// |> should +/// |> be_equal(Ok(2)) +/// |> or_fail_with("expected two deleted snapshots") /// ``` /// -/// ## Notes -/// -/// - Only deletes files with `.snap` extension -/// - Does **not** recurse into subdirectories -/// - Non-snapshot files are left untouched -/// pub fn clear_snapshots_in_directory(directory: String) -> Result(Int, String) { case file.delete_files_matching(directory, ".snap") { Ok(count) -> Ok(count) @@ -348,18 +204,43 @@ pub fn clear_snapshots_in_directory(directory: String) -> Result(Int, String) { fn check_snapshot(actual: String, snapshot_path: String) -> MatchResult(String) { case file.read(snapshot_path) { - Ok(expected) -> compare_snapshot(actual, expected, snapshot_path) + Ok(expected) -> + compare_snapshot( + actual, + normalize_snapshot_expected(expected), + snapshot_path, + ) Error(file.NotFound(_)) -> create_snapshot(actual, snapshot_path) Error(error) -> make_read_error_failure(snapshot_path, error) } } +fn normalize_snapshot_expected(expected: String) -> String { + expected + |> string.to_graphemes + |> list.reverse + |> drop_trailing_newlines + |> list.reverse + |> string.join("") +} + +fn drop_trailing_newlines(graphemes_reversed: List(String)) -> List(String) { + case graphemes_reversed { + ["\n", ..rest] -> drop_trailing_newlines(rest) + ["\r", ..rest] -> drop_trailing_newlines(rest) + other -> other + } +} + fn compare_snapshot( actual: String, expected: String, snapshot_path: String, ) -> MatchResult(String) { - case actual == expected { + // Normalize both sides so trailing newlines in either the snapshot file + // or the produced output don't create noisy, meaningless mismatches. + let normalized_actual = normalize_snapshot_expected(actual) + case normalized_actual == expected { True -> MatchOk(actual) False -> make_mismatch_failure(actual, expected, snapshot_path) } @@ -434,7 +315,12 @@ fn check_snapshot_inspect( let serialized = string.inspect(value) case file.read(snapshot_path) { Ok(expected) -> - compare_snapshot_inspect(value, serialized, expected, snapshot_path) + compare_snapshot_inspect( + value, + serialized, + normalize_snapshot_expected(expected), + snapshot_path, + ) Error(file.NotFound(_)) -> create_snapshot_inspect(value, serialized, snapshot_path) Error(error) -> make_read_error_failure_inspect(snapshot_path, error) diff --git a/src/dream_test/matchers/string.gleam b/src/dream_test/matchers/string.gleam index e2548ca..621378f 100644 --- a/src/dream_test/matchers/string.gleam +++ b/src/dream_test/matchers/string.gleam @@ -1,51 +1,50 @@ //// String matchers for dream_test. //// -//// These matchers work with strings. -//// They're re-exported through `dream_test/assertions/should`. +//// These matchers work with `String` values and are re-exported through +//// `dream_test/matchers`. //// -//// ## Usage +//// Use them to assert string structure (prefix/suffix/substring) while +//// preserving the original string for further chaining. //// -//// ```gleam -//// import dream_test/assertions/should.{ -//// should, start_with, end_with, contain_string, or_fail_with, -//// } -//// -//// greeting -//// |> should() -//// |> start_with("Hello") -//// |> or_fail_with("Greeting should start with Hello") +//// ## Example //// -//// filename -//// |> should() -//// |> end_with(".gleam") -//// |> or_fail_with("Should be a Gleam file") -//// -//// log_message -//// |> should() -//// |> contain_string("error") -//// |> or_fail_with("Log should mention error") +//// ```gleam +//// "hello world" +//// |> should +//// |> start_with("hello") +//// |> or_fail_with("expected string to start with \"hello\"") //// ``` import dream_test/types.{ type MatchResult, AssertionFailure, MatchFailed, MatchOk, StringMatchFailure, } -import gleam/option.{Some} +import gleam/option.{None, Some} +import gleam/regexp import gleam/string /// Assert that a string starts with a prefix. /// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(String)` produced by `should` (or a previous matcher) +/// - `prefix`: required starting substring +/// +/// ## Returns +/// +/// A `MatchResult(String)` preserving the string for further chaining. +/// /// ## Example /// /// ```gleam -/// greeting -/// |> should() -/// |> start_with("Hello") -/// |> or_fail_with("Greeting should start with Hello") +/// "hello world" +/// |> should +/// |> start_with("hello") +/// |> or_fail_with("expected string to start with \"hello\"") /// ``` /// pub fn start_with( - value_or_result: MatchResult(String), - prefix: String, + value_or_result value_or_result: MatchResult(String), + prefix prefix: String, ) -> MatchResult(String) { case value_or_result { MatchFailed(failure) -> MatchFailed(failure) @@ -75,18 +74,27 @@ fn check_starts_with(actual: String, prefix: String) -> MatchResult(String) { /// Assert that a string ends with a suffix. /// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(String)` produced by `should` (or a previous matcher) +/// - `suffix`: required ending substring +/// +/// ## Returns +/// +/// A `MatchResult(String)` preserving the string for further chaining. +/// /// ## Example /// /// ```gleam -/// filename -/// |> should() +/// "hello.gleam" +/// |> should /// |> end_with(".gleam") -/// |> or_fail_with("File should be a Gleam file") +/// |> or_fail_with("expected .gleam suffix") /// ``` /// pub fn end_with( - value_or_result: MatchResult(String), - suffix: String, + value_or_result value_or_result: MatchResult(String), + suffix suffix: String, ) -> MatchResult(String) { case value_or_result { MatchFailed(failure) -> MatchFailed(failure) @@ -116,18 +124,27 @@ fn check_ends_with(actual: String, suffix: String) -> MatchResult(String) { /// Assert that a string contains a substring. /// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(String)` produced by `should` (or a previous matcher) +/// - `substring`: required substring that must be present +/// +/// ## Returns +/// +/// A `MatchResult(String)` preserving the string for further chaining. +/// /// ## Example /// /// ```gleam -/// log_message -/// |> should() -/// |> contain_string("error") -/// |> or_fail_with("Log should mention error") +/// "hello world" +/// |> should +/// |> contain_string("world") +/// |> or_fail_with("expected substring match") /// ``` /// pub fn contain_string( - value_or_result: MatchResult(String), - substring: String, + value_or_result value_or_result: MatchResult(String), + substring substring: String, ) -> MatchResult(String) { case value_or_result { MatchFailed(failure) -> MatchFailed(failure) @@ -157,3 +174,95 @@ fn check_contains_string( } } } + +/// Assert that a string matches a regular expression. +/// +/// The regex pattern is compiled using `gleam/regexp.from_string`. +/// +/// The assertion passes if the pattern matches **anywhere** within the string +/// (it is not implicitly anchored). Use `^...$` if you want to require a full +/// string match. +/// +/// If the pattern is invalid, the matcher fails (with an error message, and no +/// structured payload). +/// +/// ## Parameters +/// +/// - `value_or_result`: the `MatchResult(String)` produced by `should` (or a previous matcher) +/// - `pattern`: the regular expression pattern string +/// +/// ## Returns +/// +/// A `MatchResult(String)` preserving the string for further chaining. +/// +/// ## Example +/// +/// ```gleam +/// import dream_test/matchers.{match_regex, or_fail_with, should} +/// +/// "user-123" +/// |> should +/// |> match_regex("^user-\\d+$") +/// |> or_fail_with("expected an id like user-123") +/// ``` +pub fn match_regex( + value_or_result value_or_result: MatchResult(String), + pattern pattern: String, +) -> MatchResult(String) { + case value_or_result { + MatchFailed(failure) -> MatchFailed(failure) + MatchOk(actual) -> check_matches_regex(actual, pattern) + } +} + +fn check_matches_regex(actual: String, pattern: String) -> MatchResult(String) { + case regexp.from_string(pattern) { + Ok(compiled_regexp) -> + check_matches_compiled_regex(actual, pattern, compiled_regexp) + Error(compile_error) -> + invalid_regex_pattern_failure(pattern, compile_error) + } +} + +fn check_matches_compiled_regex( + actual: String, + pattern: String, + compiled_regexp: regexp.Regexp, +) -> MatchResult(String) { + case regexp.check(compiled_regexp, actual) { + True -> MatchOk(actual) + False -> regex_no_match_failure(actual, pattern) + } +} + +fn regex_no_match_failure( + actual: String, + pattern: String, +) -> MatchResult(String) { + let payload = + StringMatchFailure( + actual: actual, + pattern: pattern, + operation: "match_regex", + ) + + MatchFailed(AssertionFailure( + operator: "match_regex", + message: "", + payload: Some(payload), + )) +} + +fn invalid_regex_pattern_failure( + pattern: String, + compile_error: regexp.CompileError, +) -> MatchResult(String) { + MatchFailed(AssertionFailure( + operator: "match_regex", + message: "invalid regex pattern: " + <> string.inspect(pattern) + <> " " + <> string.inspect(compile_error), + payload: None, + )) +} diff --git a/src/dream_test/parallel.gleam b/src/dream_test/parallel.gleam index 7eba330..c2cc47a 100644 --- a/src/dream_test/parallel.gleam +++ b/src/dream_test/parallel.gleam @@ -1,702 +1,1860 @@ -/// Parallel test execution with configurable concurrency. -/// -/// This module provides concurrent test execution while maintaining -/// deterministic result ordering. Tests run in isolated processes -/// using the sandbox module. +//// Unified Root/Node execution engine (parallel tests, sequential groups). +//// +//// This module executes `types.TestSuite(context)` which is an alias of +//// `types.Root(context)` in the unified tree model. +//// +//// NOTE: This module is intentionally event-agnostic; `runner` composes it +//// with reporters for live output. +//// +//// Most users should not call this module directly—prefer `dream_test/runner`. +//// This module is public so advanced users can embed the executor in other +//// tooling (custom runners, IDE integrations, etc.). +//// +//// ## Example +//// +//// ```gleam +//// import dream_test/matchers.{have_length, or_fail_with, should, succeed} +//// import dream_test/parallel +//// import dream_test/unit.{describe, it} +//// +//// pub fn tests() { +//// describe("Parallel executor", [ +//// it("can run a suite and return a list of results", fn() { +//// let suite = +//// describe("Suite", [ +//// it("a", fn() { Ok(succeed()) }), +//// it("b", fn() { Ok(succeed()) }), +//// ]) +//// +//// parallel.run_root_parallel(parallel.default_config(), suite) +//// |> should +//// |> have_length(2) +//// |> or_fail_with("expected two results") +//// }), +//// ]) +//// } +//// ``` + +import dream_test/reporters/progress +import dream_test/reporters/types as reporter_types +import dream_test/sandbox import dream_test/timing import dream_test/types.{ - type AssertionResult, type SingleTestConfig, type TestCase, type TestResult, - type TestSuite, type TestSuiteItem, AssertionFailed, AssertionOk, - AssertionSkipped, Failed, SetupFailed, Skipped, SuiteGroup, SuiteTest, - TestCase, TestResult, TimedOut, + type AssertionFailure, type AssertionResult, type Node, type Status, + type TestKind, type TestResult, type TestSuite, AfterAll, AfterEach, + AssertionFailed, AssertionFailure, AssertionOk, AssertionSkipped, BeforeAll, + BeforeEach, Failed, Group, Passed, Root, SetupFailed, Skipped, Test, + TestResult, TimedOut, Unit, } import gleam/erlang/process.{ - type Pid, type Selector, type Subject, kill, monitor, new_selector, - new_subject, select, selector_receive, send, spawn_unlinked, + type Pid, type Subject, kill, new_selector, new_subject, select, + selector_receive, send, spawn_unlinked, } import gleam/list import gleam/option.{type Option, None, Some} -import gleam/order import gleam/string -/// Configuration for parallel test execution. +/// Configuration for the parallel executor. +/// +/// Most users should configure execution via `dream_test/runner` instead of +/// calling `dream_test/parallel` directly. +/// +/// - `max_concurrency`: how many tests may run at once +/// - `default_timeout_ms`: default per-test timeout (used when a test doesn’t +/// specify its own timeout) +/// +/// ## Fields +/// +/// - `max_concurrency`: maximum number of tests to run concurrently within a group +/// - `default_timeout_ms`: timeout used when a test does not set its own timeout +/// +/// ## Example +/// +/// ```gleam +/// import dream_test/matchers.{have_length, or_fail_with, should, succeed} +/// import dream_test/parallel.{ParallelConfig} +/// import dream_test/unit.{describe, it} +/// +/// pub fn tests() { +/// let config = ParallelConfig(max_concurrency: 2, default_timeout_ms: 1000) +/// +/// describe("ParallelConfig", [ +/// it("can be constructed to customize execution", fn() { +/// let suite = +/// describe("Suite", [ +/// it("a", fn() { Ok(succeed()) }), +/// ]) +/// +/// parallel.run_root_parallel(config, suite) +/// |> should +/// |> have_length(1) +/// |> or_fail_with("expected one result") +/// }), +/// ]) +/// } +/// ``` pub type ParallelConfig { ParallelConfig(max_concurrency: Int, default_timeout_ms: Int) } -/// Default parallel configuration: 4 concurrent tests, 5 second timeout. -pub fn default_config() -> ParallelConfig { - ParallelConfig(max_concurrency: 4, default_timeout_ms: 5000) +/// Configuration for `run_root_parallel_with_reporter`. +/// +/// This is an advanced API intended for building custom runners that want to +/// drive a reporter while executing a single suite. +pub type RunRootParallelWithReporterConfig(context) { + RunRootParallelWithReporterConfig( + config: ParallelConfig, + suite: TestSuite(context), + progress_reporter: Option(progress.ProgressReporter), + write: fn(String) -> Nil, + total: Int, + completed: Int, + ) } -/// Internal state for tracking parallel execution. -type ExecutionState { - ExecutionState( - pending: List(IndexedTest), - running: List(RunningTest), - completed: List(IndexedResult), - results_subject: Subject(WorkerResult), +/// Result of `run_root_parallel_with_reporter`. +/// +/// This is returned as a semantic type (not a tuple) so callers can use named +/// fields and avoid positional destructuring. +pub type RunRootParallelWithReporterResult { + RunRootParallelWithReporterResult( + results: List(TestResult), + completed: Int, + progress_reporter: Option(progress.ProgressReporter), + ) +} + +type ExecuteNodeConfig(context) { + ExecuteNodeConfig( config: ParallelConfig, + scope: List(String), + inherited_tags: List(String), + context: context, + inherited_before_each: List(fn(context) -> Result(context, String)), + inherited_after_each: List(fn(context) -> Result(Nil, String)), + node: Node(context), + progress_reporter: Option(progress.ProgressReporter), + write: fn(String) -> Nil, + total: Int, + completed: Int, + results_rev: List(TestResult), ) } -/// A test paired with its original index for ordering. -type IndexedTest { - IndexedTest(index: Int, test_case: TestCase) +type IndexedResult { + IndexedResult(index: Int, result: TestResult) } -/// A running test with its worker info. type RunningTest { RunningTest( index: Int, - test_case: TestCase, - worker_pid: Pid, - worker_monitor: process.Monitor, + pid: Pid, + deadline_ms: Int, + name: String, + full_name: List(String), + tags: List(String), + kind: TestKind, ) } -/// A completed result paired with its original index. -type IndexedResult { - IndexedResult(index: Int, result: TestResult) +type RunParallelLoopConfig(context) { + RunParallelLoopConfig( + config: ParallelConfig, + subject: Subject(WorkerMessage), + scope: List(String), + inherited_tags: List(String), + context: context, + before_each_hooks: List(fn(context) -> Result(context, String)), + after_each_hooks: List(fn(context) -> Result(Nil, String)), + failures_rev: List(AssertionFailure), + pending: List(#(Int, Node(context))), + running: List(RunningTest), + pending_emit: List(IndexedResult), + emitted_rev: List(TestResult), + next_emit_index: Int, + progress_reporter: Option(progress.ProgressReporter), + write: fn(String) -> Nil, + total: Int, + completed: Int, + max_concurrency: Int, + ) +} + +type StartWorkersUpToLimitConfig(context) { + StartWorkersUpToLimitConfig( + config: ParallelConfig, + subject: Subject(WorkerMessage), + scope: List(String), + inherited_tags: List(String), + context: context, + before_each_hooks: List(fn(context) -> Result(context, String)), + after_each_hooks: List(fn(context) -> Result(Nil, String)), + failures_rev: List(AssertionFailure), + pending: List(#(Int, Node(context))), + running: List(RunningTest), + max_concurrency: Int, + ) +} + +type WaitForEventOrTimeoutConfig(context) { + WaitForEventOrTimeoutConfig( + config: ParallelConfig, + subject: Subject(WorkerMessage), + scope: List(String), + pending: List(#(Int, Node(context))), + running: List(RunningTest), + pending_emit: List(IndexedResult), + progress_reporter: Option(progress.ProgressReporter), + write: fn(String) -> Nil, + total: Int, + completed: Int, + ) } -/// Message from a worker process. -type WorkerResult { - WorkerCompleted(index: Int, test_run_result: TestRunResult, duration_ms: Int) - WorkerDown(index: Int, reason: String) +type WorkerMessage { + WorkerCompleted(index: Int, result: TestResult) + WorkerCrashed(index: Int, reason: String) } -/// Run tests in parallel with the given configuration. +@external(erlang, "sandbox_ffi", "run_catching") +fn run_catching(fn_to_run: fn() -> a) -> Result(a, String) + +/// Default executor configuration. /// -/// Tests are executed concurrently up to max_concurrency. -/// Results are returned in the same order as the input tests. -pub fn run_parallel( - config: ParallelConfig, - test_cases: List(TestCase), +/// Prefer configuring these values via `dream_test/runner` unless you are using +/// the executor directly. +/// +/// ## Example +/// +/// ```gleam +/// import dream_test/matchers.{have_length, or_fail_with, should, succeed} +/// import dream_test/parallel +/// import dream_test/unit.{describe, it} +/// +/// pub fn tests() { +/// describe("Parallel executor", [ +/// it("can run a suite and return a list of results", fn() { +/// let suite = +/// describe("Suite", [ +/// it("a", fn() { Ok(succeed()) }), +/// it("b", fn() { Ok(succeed()) }), +/// ]) +/// +/// parallel.run_root_parallel(parallel.default_config(), suite) +/// |> should +/// |> have_length(2) +/// |> or_fail_with("expected two results") +/// }), +/// ]) +/// } +/// ``` +/// +/// ## Parameters +/// +/// None. +/// +/// ## Returns +/// +/// A `ParallelConfig` with sensible defaults. +pub fn default_config() -> ParallelConfig { + ParallelConfig(max_concurrency: 4, default_timeout_ms: 5000) +} + +/// Run a single suite and return results. +/// +/// This does **not** drive a reporter. +/// +/// ## Example +/// +/// ```gleam +/// import dream_test/matchers.{have_length, or_fail_with, should, succeed} +/// import dream_test/parallel +/// import dream_test/unit.{describe, it} +/// +/// pub fn tests() { +/// describe("Parallel executor", [ +/// it("can run a suite and return a list of results", fn() { +/// let suite = +/// describe("Suite", [ +/// it("a", fn() { Ok(succeed()) }), +/// it("b", fn() { Ok(succeed()) }), +/// ]) +/// +/// parallel.run_root_parallel(parallel.default_config(), suite) +/// |> should +/// |> have_length(2) +/// |> or_fail_with("expected two results") +/// }), +/// ]) +/// } +/// ``` +/// +/// ## Parameters +/// +/// - `config`: execution settings (concurrency + default timeout) +/// - `suite`: the suite to execute +/// +/// ## Returns +/// +/// A list of `TestResult` values, in deterministic (declaration) order. +pub fn run_root_parallel( + config config: ParallelConfig, + suite suite: TestSuite(context), ) -> List(TestResult) { - let indexed_tests = index_tests(test_cases, 0, []) - let results_subject = new_subject() - - let initial_state = - ExecutionState( - pending: indexed_tests, - running: [], - completed: [], - results_subject: results_subject, + let Root(seed, tree) = suite + let executor_input = + ExecuteNodeConfig( config: config, + scope: [], + inherited_tags: [], + context: seed, + inherited_before_each: [], + inherited_after_each: [], + node: tree, + progress_reporter: None, + write: discard_write, + total: 0, + completed: 0, + results_rev: [], ) - let final_state = execute_loop(initial_state) - sort_results(final_state.completed) + let #(results_rev, _completed) = execute_node(executor_input) + list.reverse(results_rev) } -/// Add indices to tests for ordering. -fn index_tests( - tests: List(TestCase), - index: Int, - accumulated: List(IndexedTest), -) -> List(IndexedTest) { - case tests { - [] -> list.reverse(accumulated) - [head, ..tail] -> { - let indexed = IndexedTest(index: index, test_case: head) - index_tests(tail, index + 1, [indexed, ..accumulated]) - } - } +fn discard_write(_text: String) -> Nil { + Nil } -/// Main execution loop. -fn execute_loop(state: ExecutionState) -> ExecutionState { - let state_with_workers = start_workers_up_to_limit(state) +/// Run a single suite while driving a reporter. +/// +/// This is used by `dream_test/runner` internally. +/// +/// - `total` is the total number of tests across all suites in the run +/// - `completed` is how many tests have already completed before this suite +/// +/// ## Example +/// +/// ```gleam +/// import dream_test/matchers.{succeed} +/// import dream_test/parallel +/// import dream_test/types.{type TestSuite} +/// import dream_test/unit.{describe, it} +/// import gleam/option.{None} +/// +/// pub fn suite() -> TestSuite(Nil) { +/// describe("suite", [ +/// it("passes", fn() { Ok(succeed()) }), +/// ]) +/// } +/// +/// fn ignore(_text: String) { +/// Nil +/// } +/// +/// pub fn main() { +/// let total = 1 +/// let completed = 0 +/// let parallel_result = +/// parallel.run_root_parallel_with_reporter( +/// parallel.RunRootParallelWithReporterConfig( +/// config: parallel.default_config(), +/// suite: suite(), +/// progress_reporter: None, +/// write: ignore, +/// total: total, +/// completed: completed, +/// ), +/// ) +/// let parallel.RunRootParallelWithReporterResult( +/// results: results, +/// completed: completed_after_suite, +/// progress_reporter: _progress_reporter, +/// ) = parallel_result +/// results +/// } +/// ``` +/// +/// ## Parameters +/// +/// - `config`: execution settings for this suite +/// - `suite`: the suite to execute +/// - `progress_reporter`: optional progress reporter state (typically `Some(progress.new())`) +/// - `write`: output sink for any progress output (typically `io.print`) +/// - `total`: total number of tests in the overall run (across all suites) +/// - `completed`: number of tests already completed before this suite starts +/// +/// ## Returns +/// +/// A `RunRootParallelWithReporterResult` containing: +/// - `results`: this suite’s results, in deterministic (declaration) order +/// - `completed`: updated completed count after driving `TestFinished` events +/// - `progress_reporter`: progress reporter state (unchanged for the built-in progress reporter) +pub fn run_root_parallel_with_reporter( + config config: RunRootParallelWithReporterConfig(context), +) -> RunRootParallelWithReporterResult { + let RunRootParallelWithReporterConfig( + config: executor_config, + suite: suite, + progress_reporter: progress_reporter, + write: write, + total: total, + completed: completed, + ) = config + + let Root(seed, tree) = suite + let executor_input = + ExecuteNodeConfig( + config: executor_config, + scope: [], + inherited_tags: [], + context: seed, + inherited_before_each: [], + inherited_after_each: [], + node: tree, + progress_reporter: progress_reporter, + write: write, + total: total, + completed: completed, + results_rev: [], + ) - case is_execution_complete(state_with_workers) { - True -> state_with_workers - False -> { - let updated_state = wait_for_result(state_with_workers) - execute_loop(updated_state) - } - } + let #(results_rev, completed_after_suite) = execute_node(executor_input) + RunRootParallelWithReporterResult( + results: list.reverse(results_rev), + completed: completed_after_suite, + progress_reporter: progress_reporter, + ) } -/// Check if all tests are complete. -fn is_execution_complete(state: ExecutionState) -> Bool { - list.is_empty(state.pending) && list.is_empty(state.running) -} +// ============================================================================= +// Execution (sequential groups, tests executed with sandbox + timeout) +// ============================================================================= -/// Start workers up to the concurrency limit. -fn start_workers_up_to_limit(state: ExecutionState) -> ExecutionState { - let current_running = list.length(state.running) - let slots_available = state.config.max_concurrency - current_running +fn execute_node( + executor_input: ExecuteNodeConfig(context), +) -> #(List(TestResult), Int) { + let ExecuteNodeConfig( + config: config, + scope: scope, + inherited_tags: inherited_tags, + context: context, + inherited_before_each: inherited_before_each, + inherited_after_each: inherited_after_each, + node: node, + progress_reporter: progress_reporter, + write: write, + total: total, + completed: completed, + results_rev: base_results_rev, + ) = executor_input + + case node { + Group(name, tags, children) -> { + let group_scope = list.append(scope, [name]) + let combined_tags = list.append(inherited_tags, tags) + let empty_hooks = + GroupHooks( + before_all: [], + before_each: [], + after_each: [], + after_all: [], + ) + let #(hooks, tests, groups) = + collect_children(children, empty_hooks, [], []) + let #(ctx2, _before_each2, _after_each2, failures_rev) = + run_before_all_chain( + config, + group_scope, + context, + hooks.before_all, + inherited_before_each, + inherited_after_each, + [], + ) - case slots_available > 0 && !list.is_empty(state.pending) { - False -> state - True -> { - let state_with_worker = start_next_worker(state) - start_workers_up_to_limit(state_with_worker) + case list.is_empty(failures_rev) { + False -> { + // If before_all fails, do not execute any tests in this scope. + // Instead, mark all tests under this group as failed and skip bodies. + let group_results_rev = + fail_tests_due_to_before_all( + group_scope, + combined_tags, + tests, + groups, + failures_rev, + [], + ) + // These tests were never spawned as workers, so we must still emit + // progress events for them to keep the progress bar in sync with + // `total`. + let completed_after_failures = + emit_test_finished_progress_results( + progress_reporter, + write, + completed, + total, + list.reverse(group_results_rev), + ) + + let final_rev = + run_after_all_chain( + config, + group_scope, + ctx2, + hooks.after_all, + group_results_rev, + ) + + #(list.append(final_rev, base_results_rev), completed_after_failures) + } + + True -> { + let combined_before_each = + list.append(inherited_before_each, hooks.before_each) + let combined_after_each = + list.append(hooks.after_each, inherited_after_each) + + let #(group_results_rev, completed_after_tests) = + run_tests_in_group( + config, + group_scope, + combined_tags, + ctx2, + combined_before_each, + combined_after_each, + tests, + failures_rev, + progress_reporter, + write, + total, + completed, + ) + + let #(after_group_rev, completed_after_groups) = + run_child_groups_sequentially( + config, + group_scope, + combined_tags, + ctx2, + combined_before_each, + combined_after_each, + groups, + progress_reporter, + write, + total, + completed_after_tests, + group_results_rev, + ) + + let final_rev = + run_after_all_chain( + config, + group_scope, + ctx2, + hooks.after_all, + after_group_rev, + ) + + #(list.append(final_rev, base_results_rev), completed_after_groups) + } + } } + + _ -> #(base_results_rev, completed) } } -/// Start the next pending test as a worker. -fn start_next_worker(state: ExecutionState) -> ExecutionState { - case state.pending { - [] -> state - [indexed_test, ..remaining_pending] -> { - let running_test = - spawn_test_worker(state.results_subject, indexed_test, state.config) +fn fail_tests_due_to_before_all( + scope: List(String), + inherited_tags: List(String), + tests: List(Node(context)), + groups: List(Node(context)), + failures_rev: List(AssertionFailure), + acc_rev: List(TestResult), +) -> List(TestResult) { + let after_tests = + fail_test_nodes(scope, inherited_tags, tests, failures_rev, acc_rev) + fail_group_nodes(scope, inherited_tags, groups, failures_rev, after_tests) +} - ExecutionState(..state, pending: remaining_pending, running: [ - running_test, - ..state.running +fn fail_test_nodes( + scope: List(String), + inherited_tags: List(String), + nodes: List(Node(context)), + failures_rev: List(AssertionFailure), + acc_rev: List(TestResult), +) -> List(TestResult) { + case nodes { + [] -> acc_rev + [Test(name, tags, kind, _run, _timeout_ms), ..rest] -> { + let full_name = list.append(scope, [name]) + let all_tags = list.append(inherited_tags, tags) + let failures = list.reverse(failures_rev) + let result = + TestResult( + name: name, + full_name: full_name, + status: Failed, + duration_ms: 0, + tags: all_tags, + failures: failures, + kind: kind, + ) + fail_test_nodes(scope, inherited_tags, rest, failures_rev, [ + result, + ..acc_rev ]) } + [_other, ..rest] -> + fail_test_nodes(scope, inherited_tags, rest, failures_rev, acc_rev) } } -/// Spawn a worker process for a test. -fn spawn_test_worker( - results_subject: Subject(WorkerResult), - indexed_test: IndexedTest, - _config: ParallelConfig, -) -> RunningTest { - let test_index = indexed_test.index - let test_case = indexed_test.test_case - - let worker_pid = - spawn_unlinked(fn() { - let start_time = timing.now_ms() - let test_run_result = run_test_directly(test_case) - let duration_ms = timing.now_ms() - start_time - send( - results_subject, - WorkerCompleted(test_index, test_run_result, duration_ms), - ) - }) - - let worker_monitor = monitor(worker_pid) +fn fail_group_nodes( + scope: List(String), + inherited_tags: List(String), + nodes: List(Node(context)), + failures_rev: List(AssertionFailure), + acc_rev: List(TestResult), +) -> List(TestResult) { + case nodes { + [] -> acc_rev + [Group(name, tags, children), ..rest] -> { + let group_scope = list.append(scope, [name]) + let combined_tags = list.append(inherited_tags, tags) + let empty_hooks = + GroupHooks( + before_all: [], + before_each: [], + after_each: [], + after_all: [], + ) + let #(_hooks, tests, groups) = + collect_children(children, empty_hooks, [], []) + let next = + fail_tests_due_to_before_all( + group_scope, + combined_tags, + tests, + groups, + failures_rev, + acc_rev, + ) + fail_group_nodes(scope, inherited_tags, rest, failures_rev, next) + } + [_other, ..rest] -> + fail_group_nodes(scope, inherited_tags, rest, failures_rev, acc_rev) + } +} - RunningTest( - index: test_index, - test_case: test_case, - worker_pid: worker_pid, - worker_monitor: worker_monitor, +type GroupHooks(context) { + GroupHooks( + before_all: List(fn(context) -> Result(context, String)), + before_each: List(fn(context) -> Result(context, String)), + after_each: List(fn(context) -> Result(Nil, String)), + after_all: List(fn(context) -> Result(Nil, String)), ) } -/// Result of running a test with hooks. -/// Tracks whether the failure came from setup, test, or teardown. -type TestRunResult { - /// Test passed (all hooks and test body passed) - TestPassed - /// Test was skipped - TestSkipped - /// A before_each hook failed (test was not run) - SetupFailure(failure: types.AssertionFailure) - /// The test body failed - TestFailure(failure: types.AssertionFailure) - /// An after_each hook failed (test may have passed) - TeardownFailure(failure: types.AssertionFailure) -} +fn collect_children( + children: List(Node(context)), + hooks: GroupHooks(context), + tests_rev: List(Node(context)), + groups_rev: List(Node(context)), +) -> #(GroupHooks(context), List(Node(context)), List(Node(context))) { + let hooks0 = case hooks { + GroupHooks(..) -> hooks + } -/// Run a test case directly in the current process. -/// -/// Executes lifecycle hooks in the correct order: -/// 1. Run before_each hooks (outer to inner) -/// 2. If all hooks pass, run the test -/// 3. Run after_each hooks (inner to outer), even if test failed -fn run_test_directly(test_case: TestCase) -> TestRunResult { - case test_case { - TestCase(config) -> run_with_hooks(config) + case children { + [] -> #(hooks0, list.reverse(tests_rev), list.reverse(groups_rev)) + [child, ..rest] -> + case child { + BeforeAll(run) -> + collect_children( + rest, + GroupHooks( + ..hooks0, + before_all: list.append(hooks0.before_all, [run]), + ), + tests_rev, + groups_rev, + ) + BeforeEach(run) -> + collect_children( + rest, + GroupHooks( + ..hooks0, + before_each: list.append(hooks0.before_each, [run]), + ), + tests_rev, + groups_rev, + ) + AfterEach(run) -> + collect_children( + rest, + GroupHooks( + ..hooks0, + after_each: list.append(hooks0.after_each, [run]), + ), + tests_rev, + groups_rev, + ) + AfterAll(run) -> + collect_children( + rest, + GroupHooks( + ..hooks0, + after_all: list.append(hooks0.after_all, [run]), + ), + tests_rev, + groups_rev, + ) + Test(..) -> + collect_children(rest, hooks0, [child, ..tests_rev], groups_rev) + Group(..) -> + collect_children(rest, hooks0, tests_rev, [child, ..groups_rev]) + } } } -/// Run a test with its before_each and after_each hooks. -fn run_with_hooks(config: SingleTestConfig) -> TestRunResult { - // Run before_each hooks - let before_result = run_hooks(config.before_each_hooks) +fn run_before_all_chain( + config: ParallelConfig, + scope: List(String), + context: context, + hooks: List(fn(context) -> Result(context, String)), + inherited_before_each: List(fn(context) -> Result(context, String)), + inherited_after_each: List(fn(context) -> Result(Nil, String)), + failures_rev: List(AssertionFailure), +) -> #( + context, + List(fn(context) -> Result(context, String)), + List(fn(context) -> Result(Nil, String)), + List(AssertionFailure), +) { + case hooks { + [] -> #( + context, + list.append(inherited_before_each, []), + list.append([], inherited_after_each), + failures_rev, + ) + [hook, ..rest] -> + case run_hook_transform(config, scope, context, hook) { + Ok(next) -> + run_before_all_chain( + config, + scope, + next, + rest, + inherited_before_each, + inherited_after_each, + failures_rev, + ) + Error(message) -> #( + context, + inherited_before_each, + inherited_after_each, + [hook_failure("before_all", message), ..failures_rev], + ) + } + } +} - case before_result { - AssertionFailed(failure) -> SetupFailure(failure) - // Hooks returning AssertionSkipped are treated as passing - AssertionOk | AssertionSkipped -> run_test_and_after(config) +fn run_tests_in_group( + config: ParallelConfig, + scope: List(String), + inherited_tags: List(String), + context: context, + before_each_hooks: List(fn(context) -> Result(context, String)), + after_each_hooks: List(fn(context) -> Result(Nil, String)), + tests: List(Node(context)), + failures_rev: List(AssertionFailure), + progress_reporter: Option(progress.ProgressReporter), + write: fn(String) -> Nil, + total: Int, + completed: Int, +) -> #(List(TestResult), Int) { + let ParallelConfig(max_concurrency: max_concurrency, default_timeout_ms: _) = + config + case max_concurrency <= 1 { + True -> + run_tests_sequentially( + config, + scope, + inherited_tags, + context, + before_each_hooks, + after_each_hooks, + tests, + failures_rev, + progress_reporter, + write, + total, + completed, + [], + ) + False -> + run_tests_parallel( + config, + scope, + inherited_tags, + context, + before_each_hooks, + after_each_hooks, + tests, + failures_rev, + progress_reporter, + write, + total, + completed, + ) } } -fn run_test_and_after(config: SingleTestConfig) -> TestRunResult { - // Run the test - let test_result = config.run() +fn run_tests_parallel( + config: ParallelConfig, + scope: List(String), + inherited_tags: List(String), + context: context, + before_each_hooks: List(fn(context) -> Result(context, String)), + after_each_hooks: List(fn(context) -> Result(Nil, String)), + tests: List(Node(context)), + failures_rev: List(AssertionFailure), + progress_reporter: Option(progress.ProgressReporter), + write: fn(String) -> Nil, + total: Int, + completed: Int, +) -> #(List(TestResult), Int) { + let subject = new_subject() + let indexed = index_tests(tests, 0, []) + let ParallelConfig(max_concurrency: max_concurrency, default_timeout_ms: _) = + config + + run_parallel_loop(RunParallelLoopConfig( + config: config, + subject: subject, + scope: scope, + inherited_tags: inherited_tags, + context: context, + before_each_hooks: before_each_hooks, + after_each_hooks: after_each_hooks, + failures_rev: failures_rev, + pending: indexed, + running: [], + pending_emit: [], + emitted_rev: [], + next_emit_index: 0, + progress_reporter: progress_reporter, + write: write, + total: total, + completed: completed, + max_concurrency: max_concurrency, + )) +} - // Always run after_each hooks - let after_result = run_hooks(config.after_each_hooks) +fn index_tests( + nodes: List(Node(context)), + next_index: Int, + acc_rev: List(#(Int, Node(context))), +) -> List(#(Int, Node(context))) { + case nodes { + [] -> list.reverse(acc_rev) + [Test(..) as t, ..rest] -> + index_tests(rest, next_index + 1, [#(next_index, t), ..acc_rev]) + [_other, ..rest] -> index_tests(rest, next_index, acc_rev) + } +} - // Determine final result - case test_result, after_result { - AssertionSkipped, _ -> TestSkipped - AssertionFailed(failure), _ -> TestFailure(failure) - AssertionOk, AssertionFailed(failure) -> TeardownFailure(failure) - // Hooks returning AssertionSkipped are treated as passing - AssertionOk, AssertionOk -> TestPassed - AssertionOk, AssertionSkipped -> TestPassed +fn run_parallel_loop( + loop: RunParallelLoopConfig(context), +) -> #(List(TestResult), Int) { + let RunParallelLoopConfig( + config: config, + subject: subject, + scope: scope, + inherited_tags: inherited_tags, + context: context, + before_each_hooks: before_each_hooks, + after_each_hooks: after_each_hooks, + failures_rev: failures_rev, + pending: pending, + running: running, + pending_emit: pending_emit, + emitted_rev: emitted_rev, + next_emit_index: next_emit_index, + progress_reporter: progress_reporter, + write: write, + total: total, + completed: completed, + max_concurrency: max_concurrency, + ) = loop + + let #(pending2, running2) = + start_workers_up_to_limit(StartWorkersUpToLimitConfig( + config: config, + subject: subject, + scope: scope, + inherited_tags: inherited_tags, + context: context, + before_each_hooks: before_each_hooks, + after_each_hooks: after_each_hooks, + failures_rev: failures_rev, + pending: pending, + running: running, + max_concurrency: max_concurrency, + )) + + case list.is_empty(pending2) && list.is_empty(running2) { + True -> #( + list.append( + list.reverse(emit_all_results(pending_emit, next_emit_index, [])), + emitted_rev, + ), + completed, + ) + False -> { + let #(pending3, running3, pending_emit2, completed2) = + wait_for_event_or_timeout(WaitForEventOrTimeoutConfig( + config: config, + subject: subject, + scope: scope, + pending: pending2, + running: running2, + pending_emit: pending_emit, + progress_reporter: progress_reporter, + write: write, + total: total, + completed: completed, + )) + + let #(maybe_ready, remaining_emit) = + take_indexed_result(next_emit_index, pending_emit2, []) + + case maybe_ready { + None -> + run_parallel_loop( + RunParallelLoopConfig( + ..loop, + pending: pending3, + running: running3, + pending_emit: remaining_emit, + completed: completed2, + ), + ) + Some(IndexedResult(_, result)) -> + run_parallel_loop( + RunParallelLoopConfig( + ..loop, + pending: pending3, + running: running3, + pending_emit: remaining_emit, + emitted_rev: [result, ..emitted_rev], + next_emit_index: next_emit_index + 1, + completed: completed2, + ), + ) + } + } } } -/// Run a list of hooks sequentially, stopping on first failure. -fn run_hooks(hooks: List(fn() -> AssertionResult)) -> AssertionResult { - run_hooks_from_list(hooks) +fn emit_all_results( + pending_emit: List(IndexedResult), + next_index: Int, + acc_rev: List(TestResult), +) -> List(TestResult) { + let #(maybe_ready, remaining) = + take_indexed_result(next_index, pending_emit, []) + case maybe_ready { + None -> list.reverse(acc_rev) + Some(IndexedResult(_, result)) -> + emit_all_results(remaining, next_index + 1, [result, ..acc_rev]) + } } -fn run_hooks_from_list( - remaining: List(fn() -> AssertionResult), -) -> AssertionResult { - case remaining { - [] -> AssertionOk - [hook, ..rest] -> { - let result = hook() - case result { - // Hooks returning AssertionSkipped are treated as passing - AssertionOk | AssertionSkipped -> run_hooks_from_list(rest) - AssertionFailed(_) -> result +fn start_workers_up_to_limit( + input: StartWorkersUpToLimitConfig(context), +) -> #(List(#(Int, Node(context))), List(RunningTest)) { + let StartWorkersUpToLimitConfig( + config: config, + subject: subject, + scope: scope, + inherited_tags: inherited_tags, + context: context, + before_each_hooks: before_each_hooks, + after_each_hooks: after_each_hooks, + failures_rev: failures_rev, + pending: pending, + running: running, + max_concurrency: max_concurrency, + ) = input + + let slots = max_concurrency - list.length(running) + case slots > 0 && !list.is_empty(pending) { + False -> #(pending, running) + True -> + case pending { + [] -> #(pending, running) + [#(index, test_node), ..rest] -> { + let #(pid, deadline_ms) = + spawn_test_worker( + config, + subject, + scope, + inherited_tags, + context, + before_each_hooks, + after_each_hooks, + failures_rev, + index, + test_node, + ) + let #(name, full_name, tags, kind) = + running_test_metadata(scope, inherited_tags, test_node) + start_workers_up_to_limit( + StartWorkersUpToLimitConfig(..input, pending: rest, running: [ + RunningTest( + index: index, + pid: pid, + deadline_ms: deadline_ms, + name: name, + full_name: full_name, + tags: tags, + kind: kind, + ), + ..running + ]), + ) + } } - } } } -/// Wait for a result from any running worker. -fn wait_for_result(state: ExecutionState) -> ExecutionState { - let selector = build_results_selector(state) +fn running_test_metadata( + scope: List(String), + inherited_tags: List(String), + node: Node(context), +) -> #(String, List(String), List(String), TestKind) { + case node { + Test(name, tags, kind, _run, _timeout_ms) -> #( + name, + list.append(scope, [name]), + list.append(inherited_tags, tags), + kind, + ) + _ -> #("", list.append(scope, [""]), inherited_tags, Unit) + } +} + +fn spawn_test_worker( + config: ParallelConfig, + subject: Subject(WorkerMessage), + scope: List(String), + inherited_tags: List(String), + context: context, + before_each_hooks: List(fn(context) -> Result(context, String)), + after_each_hooks: List(fn(context) -> Result(Nil, String)), + failures_rev: List(AssertionFailure), + index: Int, + node: Node(context), +) -> #(Pid, Int) { + let start = timing.now_ms() + let timeout_ms = test_timeout_ms(config, node) + let pid = + spawn_unlinked(fn() { + case + run_catching(fn() { + execute_one_test_node( + config, + scope, + inherited_tags, + context, + before_each_hooks, + after_each_hooks, + failures_rev, + node, + ) + }) + { + Ok(result) -> + send(subject, WorkerCompleted(index: index, result: result)) + Error(reason) -> + send(subject, WorkerCrashed(index: index, reason: reason)) + } + }) + let deadline_ms = start + timeout_ms + #(pid, deadline_ms) +} - // Wait indefinitely - workers have their own timeouts - case selector_receive(selector, 60_000) { - Ok(worker_result) -> handle_worker_result(state, worker_result) - Error(Nil) -> handle_selector_timeout(state) +fn test_timeout_ms(config: ParallelConfig, node: Node(context)) -> Int { + let ParallelConfig(default_timeout_ms: default_timeout_ms, max_concurrency: _) = + config + case node { + Test(_, _, _, _, Some(ms)) -> ms + _ -> default_timeout_ms } } -/// Build a selector for worker results and monitor events. -fn build_results_selector(state: ExecutionState) -> Selector(WorkerResult) { - new_selector() - |> select(state.results_subject) - |> add_monitor_handlers(state.running) +fn wait_for_event_or_timeout( + input: WaitForEventOrTimeoutConfig(context), +) -> #(List(#(Int, Node(context))), List(RunningTest), List(IndexedResult), Int) { + let WaitForEventOrTimeoutConfig( + config: config, + subject: subject, + scope: scope, + pending: pending, + running: running, + pending_emit: pending_emit, + progress_reporter: progress_reporter, + write: write, + total: total, + completed: completed, + ) = input + + let selector = new_selector() |> select(subject) + + let now = timing.now_ms() + let next_timeout = next_deadline_timeout(running, now, 1000) + case selector_receive(selector, next_timeout) { + Ok(message) -> + handle_worker_message( + scope, + message, + pending, + running, + pending_emit, + progress_reporter, + write, + total, + completed, + ) + Error(Nil) -> + handle_timeouts( + config, + pending, + running, + pending_emit, + progress_reporter, + write, + total, + completed, + ) + } } -/// Add monitor handlers for all running workers. -fn add_monitor_handlers( - selector: Selector(WorkerResult), +fn handle_worker_message( + scope: List(String), + message: WorkerMessage, + pending: List(#(Int, Node(context))), running: List(RunningTest), -) -> Selector(WorkerResult) { - case running { - [] -> selector - [running_test, ..rest] -> { - let updated_selector = - process.select_specific_monitor( - selector, - running_test.worker_monitor, - fn(down) { WorkerDown(running_test.index, format_down_reason(down)) }, + pending_emit: List(IndexedResult), + progress_reporter: Option(progress.ProgressReporter), + write: fn(String) -> Nil, + total: Int, + completed: Int, +) -> #(List(#(Int, Node(context))), List(RunningTest), List(IndexedResult), Int) { + case message { + WorkerCompleted(index, result) -> { + let next_completed = + emit_test_finished_progress( + progress_reporter, + write, + completed, + total, + result, ) - add_monitor_handlers(updated_selector, rest) + #( + pending, + remove_running_by_index(running, index), + [IndexedResult(index: index, result: result), ..pending_emit], + next_completed, + ) + } + WorkerCrashed(index, reason) -> { + let failure = + hook_failure( + "crash", + "worker crashed in " <> string.join(scope, " > ") <> ": " <> reason, + ) + let #(name, full_name, tags, kind) = case + get_running_by_index(running, index) + { + Some(r) -> #(r.name, r.full_name, r.tags, r.kind) + None -> #("", list.append(scope, [""]), [], Unit) + } + let result = + TestResult( + name: name, + full_name: full_name, + status: Failed, + duration_ms: 0, + tags: tags, + failures: [failure], + kind: kind, + ) + let next_completed = + emit_test_finished_progress( + progress_reporter, + write, + completed, + total, + result, + ) + #( + pending, + remove_running_by_index(running, index), + [IndexedResult(index: index, result: result), ..pending_emit], + next_completed, + ) } } } -/// Format a down message reason as a string. -fn format_down_reason(down: process.Down) -> String { - case down.reason { - process.Normal -> "normal" - process.Killed -> "killed" - process.Abnormal(reason) -> string.inspect(reason) +fn get_running_by_index( + running: List(RunningTest), + index: Int, +) -> option.Option(RunningTest) { + case running { + [] -> None + [r, ..rest] -> + case r.index == index { + True -> Some(r) + False -> get_running_by_index(rest, index) + } } } -/// Handle a result from a worker. -fn handle_worker_result( - state: ExecutionState, - worker_result: WorkerResult, -) -> ExecutionState { - let index = get_worker_result_index(worker_result) - let running_test = find_running_test(state.running, index) +fn handle_timeouts( + _config: ParallelConfig, + pending: List(#(Int, Node(context))), + running: List(RunningTest), + pending_emit: List(IndexedResult), + progress_reporter: Option(progress.ProgressReporter), + write: fn(String) -> Nil, + total: Int, + completed: Int, +) -> #(List(#(Int, Node(context))), List(RunningTest), List(IndexedResult), Int) { + let now = timing.now_ms() + let #(timed_out, still_running) = partition_timeouts(running, now, [], []) + list.each(timed_out, fn(r) { kill(r.pid) }) + let timeout_results = list.map(timed_out, fn(r) { timeout_result(r) }) + let next_completed = + emit_test_finished_progress_list( + progress_reporter, + write, + completed, + total, + timeout_results, + ) + #( + pending, + still_running, + list.append(timeout_results, pending_emit), + next_completed, + ) +} + +fn timeout_result(running: RunningTest) -> IndexedResult { + let failure = hook_failure("timeout", "test timed out") + let RunningTest( + index: index, + pid: _pid, + deadline_ms: _deadline_ms, + name: name, + full_name: full_name, + tags: tags, + kind: kind, + ) = running + let result = + TestResult( + name: name, + full_name: full_name, + status: TimedOut, + duration_ms: 0, + tags: tags, + failures: [failure], + kind: kind, + ) + IndexedResult(index: index, result: result) +} + +fn emit_test_finished_progress( + progress_reporter: Option(progress.ProgressReporter), + write: fn(String) -> Nil, + completed: Int, + total: Int, + result: TestResult, +) -> Int { + let next_completed = completed + 1 + case progress_reporter { + None -> next_completed + Some(reporter) -> { + case + progress.handle_event( + reporter, + reporter_types.TestFinished( + completed: next_completed, + total: total, + result: result, + ), + ) + { + None -> Nil + Some(text) -> write(text) + } + next_completed + } + } +} - case running_test { - None -> state - // Spurious message, ignore - Some(running) -> { - let test_result = convert_worker_result(running, worker_result) - let indexed_result = IndexedResult(index: index, result: test_result) +fn emit_test_finished_progress_list( + progress_reporter: Option(progress.ProgressReporter), + write: fn(String) -> Nil, + completed: Int, + total: Int, + results: List(IndexedResult), +) -> Int { + case results { + [] -> completed + [IndexedResult(index: _index, result: result), ..rest] -> { + let next_completed = + emit_test_finished_progress( + progress_reporter, + write, + completed, + total, + result, + ) + emit_test_finished_progress_list( + progress_reporter, + write, + next_completed, + total, + rest, + ) + } + } +} - ExecutionState( - ..state, - running: remove_running_test(state.running, index), - completed: [indexed_result, ..state.completed], +fn emit_test_finished_progress_results( + progress_reporter: Option(progress.ProgressReporter), + write: fn(String) -> Nil, + completed: Int, + total: Int, + results: List(TestResult), +) -> Int { + case results { + [] -> completed + [result, ..rest] -> { + let next_completed = + emit_test_finished_progress( + progress_reporter, + write, + completed, + total, + result, + ) + emit_test_finished_progress_results( + progress_reporter, + write, + next_completed, + total, + rest, ) } } } -/// Extract the index from a worker result. -fn get_worker_result_index(result: WorkerResult) -> Int { - case result { - WorkerCompleted(index, _, _) -> index - WorkerDown(index, _) -> index +fn partition_timeouts( + running: List(RunningTest), + now: Int, + timed_out_rev: List(RunningTest), + still_rev: List(RunningTest), +) -> #(List(RunningTest), List(RunningTest)) { + case running { + [] -> #(list.reverse(timed_out_rev), list.reverse(still_rev)) + [r, ..rest] -> + case r.deadline_ms <= now { + True -> partition_timeouts(rest, now, [r, ..timed_out_rev], still_rev) + False -> partition_timeouts(rest, now, timed_out_rev, [r, ..still_rev]) + } } } -/// Find a running test by index. -fn find_running_test( +fn next_deadline_timeout( running: List(RunningTest), - index: Int, -) -> Option(RunningTest) { + now: Int, + fallback: Int, +) -> Int { case running { - [] -> None - [running_test, ..rest] -> find_running_test_check(running_test, rest, index) + [] -> fallback + [r, ..rest] -> min_deadline_timeout(rest, now, r.deadline_ms - now) } } -fn find_running_test_check( - running_test: RunningTest, - rest: List(RunningTest), - index: Int, -) -> Option(RunningTest) { - case running_test.index == index { - True -> Some(running_test) - False -> find_running_test(rest, index) +fn min_deadline_timeout( + running: List(RunningTest), + now: Int, + current: Int, +) -> Int { + case running { + [] -> + case current < 0 { + True -> 0 + False -> current + } + [r, ..rest] -> { + let d = r.deadline_ms - now + let next = case d < current { + True -> d + False -> current + } + min_deadline_timeout(rest, now, next) + } } } -/// Remove a running test by index. -fn remove_running_test( +fn remove_running_by_index( running: List(RunningTest), index: Int, ) -> List(RunningTest) { - list.filter(running, fn(running_test) { running_test.index != index }) + list.filter(running, fn(r) { r.index != index }) } -/// Convert a worker result to a TestResult. -fn convert_worker_result( - running_test: RunningTest, - worker_result: WorkerResult, -) -> TestResult { - case running_test.test_case { - TestCase(config) -> convert_worker_result_with_config(config, worker_result) - } -} - -fn convert_worker_result_with_config( - config: SingleTestConfig, - worker_result: WorkerResult, -) -> TestResult { - case worker_result { - WorkerCompleted(_, test_run_result, duration_ms) -> - test_run_result_to_test_result(config, test_run_result, duration_ms) - WorkerDown(_, reason) -> make_crashed_result(config, reason, 0) +fn take_indexed_result( + index: Int, + items: List(IndexedResult), + acc_rev: List(IndexedResult), +) -> #(Option(IndexedResult), List(IndexedResult)) { + case items { + [] -> #(None, list.reverse(acc_rev)) + [item, ..rest] -> + case item.index == index { + True -> #(Some(item), list.append(list.reverse(acc_rev), rest)) + False -> take_indexed_result(index, rest, [item, ..acc_rev]) + } } } -/// Convert a TestRunResult to a TestResult. -fn test_run_result_to_test_result( - config: SingleTestConfig, - test_run_result: TestRunResult, - duration_ms: Int, +fn execute_one_test_node( + config: ParallelConfig, + scope: List(String), + inherited_tags: List(String), + context: context, + before_each_hooks: List(fn(context) -> Result(context, String)), + after_each_hooks: List(fn(context) -> Result(Nil, String)), + failures_rev: List(AssertionFailure), + node: Node(context), ) -> TestResult { - case test_run_result { - TestPassed -> make_passed_result(config, duration_ms) - TestSkipped -> make_skipped_result(config, duration_ms) - SetupFailure(failure) -> - make_setup_failed_result(config, failure, duration_ms) - TestFailure(failure) -> make_failed_result(config, failure, duration_ms) - TeardownFailure(failure) -> make_failed_result(config, failure, duration_ms) - } -} - -fn make_passed_result(config: SingleTestConfig, duration_ms: Int) -> TestResult { - TestResult( - name: config.name, - full_name: config.full_name, - status: types.Passed, - duration_ms: duration_ms, - tags: config.tags, - failures: [], - kind: config.kind, - ) -} + // Fallback to sequential single-test logic by reusing existing code path. + case node { + Test(name, tags, kind, run, timeout_ms) -> { + let full_name = list.append(scope, [name]) + let all_tags = list.append(inherited_tags, tags) + let start = timing.now_ms() + + let #(ctx_after_setup, setup_status, setup_failures) = + run_before_each_list(config, scope, context, before_each_hooks, []) + + let assertion = case setup_status { + SetupFailed -> AssertionFailed(head_failure_or_unknown(setup_failures)) + _ -> + run_in_sandbox(config, timeout_ms, fn() { + case run(ctx_after_setup) { + Ok(a) -> a + Error(message) -> AssertionFailed(hook_failure("error", message)) + } + }) + } -fn make_skipped_result(config: SingleTestConfig, duration_ms: Int) -> TestResult { - TestResult( - name: config.name, - full_name: config.full_name, - status: Skipped, - duration_ms: duration_ms, - tags: config.tags, - failures: [], - kind: config.kind, - ) -} + let #(status, failures) = + assertion_to_status_and_failures( + assertion, + failures_rev, + setup_failures, + ) -fn make_setup_failed_result( - config: SingleTestConfig, - failure: types.AssertionFailure, - duration_ms: Int, -) -> TestResult { - TestResult( - name: config.name, - full_name: config.full_name, - status: SetupFailed, - duration_ms: duration_ms, - tags: config.tags, - failures: [failure], - kind: config.kind, - ) -} + let #(final_status, final_failures) = + run_after_each_list( + config, + scope, + ctx_after_setup, + after_each_hooks, + status, + failures, + ) -fn make_failed_result( - config: SingleTestConfig, - failure: types.AssertionFailure, - duration_ms: Int, -) -> TestResult { - TestResult( - name: config.name, - full_name: config.full_name, - status: Failed, - duration_ms: duration_ms, - tags: config.tags, - failures: [failure], - kind: config.kind, - ) -} + let duration = timing.now_ms() - start -/// Create a TestResult for a timed-out test. -fn make_timeout_result(config: SingleTestConfig, duration_ms: Int) -> TestResult { - TestResult( - name: config.name, - full_name: config.full_name, - status: TimedOut, - duration_ms: duration_ms, - tags: config.tags, - failures: [], - kind: config.kind, - ) + TestResult( + name: name, + full_name: full_name, + status: final_status, + duration_ms: duration, + tags: all_tags, + failures: list.reverse(final_failures), + kind: kind, + ) + } + _ -> + TestResult( + name: "", + full_name: list.append(scope, [""]), + status: Failed, + duration_ms: 0, + tags: [], + failures: [hook_failure("internal", "non-test node")], + kind: Unit, + ) + } } -/// Create a TestResult for a crashed test. -fn make_crashed_result( - config: SingleTestConfig, - reason: String, - duration_ms: Int, -) -> TestResult { - let failure = - types.AssertionFailure( - operator: "crash", - message: "Test process crashed: " <> reason, - payload: None, - ) +fn run_tests_sequentially( + config: ParallelConfig, + scope: List(String), + inherited_tags: List(String), + context: context, + before_each_hooks: List(fn(context) -> Result(context, String)), + after_each_hooks: List(fn(context) -> Result(Nil, String)), + tests: List(Node(context)), + failures_rev: List(AssertionFailure), + progress_reporter: Option(progress.ProgressReporter), + write: fn(String) -> Nil, + total: Int, + completed: Int, + acc_rev: List(TestResult), +) -> #(List(TestResult), Int) { + case tests { + [] -> #(acc_rev, completed) + [Test(name, tags, kind, run, timeout_ms), ..rest] -> { + let full_name = list.append(scope, [name]) + let all_tags = list.append(inherited_tags, tags) + let start = timing.now_ms() + + let #(ctx_after_setup, setup_status, setup_failures) = + run_before_each_list(config, scope, context, before_each_hooks, []) + + let assertion = case setup_status { + SetupFailed -> AssertionFailed(head_failure_or_unknown(setup_failures)) + _ -> + run_in_sandbox(config, timeout_ms, fn() { + case run(ctx_after_setup) { + Ok(a) -> a + Error(message) -> AssertionFailed(hook_failure("error", message)) + } + }) + } - TestResult( - name: config.name, - full_name: config.full_name, - status: Failed, - duration_ms: duration_ms, - tags: config.tags, - failures: [failure], - kind: config.kind, - ) -} + let #(status, failures) = + assertion_to_status_and_failures( + assertion, + failures_rev, + setup_failures, + ) -/// Handle selector timeout (should not happen in normal operation). -fn handle_selector_timeout(state: ExecutionState) -> ExecutionState { - // Kill all running workers and mark them as timed out - kill_all_running(state.running) + let #(final_status, final_failures) = + run_after_each_list( + config, + scope, + ctx_after_setup, + after_each_hooks, + status, + failures, + ) - let timeout_results = - list.map(state.running, fn(running_test) { - case running_test.test_case { - TestCase(config) -> { - // Use the configured timeout as the duration for timed-out tests - IndexedResult( - index: running_test.index, - result: make_timeout_result(config, state.config.default_timeout_ms), - ) - } - } - }) + let duration = timing.now_ms() - start + + let result = + TestResult( + name: name, + full_name: full_name, + status: final_status, + duration_ms: duration, + tags: all_tags, + failures: list.reverse(final_failures), + kind: kind, + ) - ExecutionState( - ..state, - running: [], - completed: list.append(timeout_results, state.completed), - ) -} + let next_completed = + emit_test_finished_progress( + progress_reporter, + write, + completed, + total, + result, + ) -/// Kill all running worker processes. -fn kill_all_running(running: List(RunningTest)) -> Nil { - case running { - [] -> Nil - [running_test, ..rest] -> { - kill(running_test.worker_pid) - kill_all_running(rest) + run_tests_sequentially( + config, + scope, + inherited_tags, + context, + before_each_hooks, + after_each_hooks, + rest, + failures_rev, + progress_reporter, + write, + total, + next_completed, + [result, ..acc_rev], + ) } - } -} -/// Sort completed results by their original index. -fn sort_results(completed: List(IndexedResult)) -> List(TestResult) { - completed - |> list.sort(fn(a, b) { compare_indices(a.index, b.index) }) - |> list.map(fn(indexed) { indexed.result }) + [_other, ..rest] -> + run_tests_sequentially( + config, + scope, + inherited_tags, + context, + before_each_hooks, + after_each_hooks, + rest, + failures_rev, + progress_reporter, + write, + total, + completed, + acc_rev, + ) + } } -/// Compare two indices for sorting. -fn compare_indices(a: Int, b: Int) -> order.Order { - case a < b { - True -> order.Lt - False -> compare_indices_not_less(a, b) +fn run_child_groups_sequentially( + config: ParallelConfig, + scope: List(String), + inherited_tags: List(String), + context: context, + before_each_hooks: List(fn(context) -> Result(context, String)), + after_each_hooks: List(fn(context) -> Result(Nil, String)), + groups: List(Node(context)), + progress_reporter: Option(progress.ProgressReporter), + write: fn(String) -> Nil, + total: Int, + completed: Int, + acc_rev: List(TestResult), +) -> #(List(TestResult), Int) { + case groups { + [] -> #(acc_rev, completed) + [group_node, ..rest] -> { + let #(next_rev, next_completed) = + execute_node(ExecuteNodeConfig( + config: config, + scope: scope, + inherited_tags: inherited_tags, + context: context, + inherited_before_each: before_each_hooks, + inherited_after_each: after_each_hooks, + node: group_node, + progress_reporter: progress_reporter, + write: write, + total: total, + completed: completed, + results_rev: acc_rev, + )) + run_child_groups_sequentially( + config, + scope, + inherited_tags, + context, + before_each_hooks, + after_each_hooks, + rest, + progress_reporter, + write, + total, + next_completed, + next_rev, + ) + } } } -fn compare_indices_not_less(a: Int, b: Int) -> order.Order { - case a > b { - True -> order.Gt - False -> order.Eq +fn run_after_all_chain( + config: ParallelConfig, + scope: List(String), + context: context, + hooks: List(fn(context) -> Result(Nil, String)), + acc_rev: List(TestResult), +) -> List(TestResult) { + case hooks { + [] -> acc_rev + [hook, ..rest] -> + case run_hook_teardown(config, scope, context, hook) { + Ok(_) -> run_after_all_chain(config, scope, context, rest, acc_rev) + Error(message) -> { + let result = + TestResult( + name: "", + full_name: list.append(scope, [""]), + status: Failed, + duration_ms: 0, + tags: [], + failures: [hook_failure("after_all", message)], + kind: Unit, + ) + [result, ..acc_rev] + } + } } } // ============================================================================= -// Suite Execution (with before_all/after_all support) +// Hook helpers // ============================================================================= -/// Run a test suite with before_all/after_all semantics. -/// -/// Execution flow for each group: -/// 1. Run before_all hooks sequentially -/// 2. If any fail, mark all tests in group as SetupFailed -/// 3. Run tests in parallel (with their before_each/after_each) -/// 4. Wait for all tests to complete -/// 5. Run after_all hooks sequentially -/// 6. Recurse for nested groups -/// -pub fn run_suite_parallel( +fn run_hook_transform( config: ParallelConfig, - suite: TestSuite, -) -> List(TestResult) { - run_suite_group(config, suite) -} - -fn run_suite_group(config: ParallelConfig, suite: TestSuite) -> List(TestResult) { - // Run before_all hooks - let before_all_result = run_hooks(suite.before_all_hooks) - - case before_all_result { - AssertionFailed(failure) -> - mark_all_items_as_setup_failed(suite.items, failure) - // Hooks returning AssertionSkipped are treated as passing - AssertionOk | AssertionSkipped -> { - // Run all items (tests and nested groups) - let results = run_suite_items(config, suite.items) - - // Run after_all hooks (regardless of test results) - let _after_all_result = run_hooks(suite.after_all_hooks) + scope: List(String), + context: context, + hook: fn(context) -> Result(context, String), +) -> Result(context, String) { + let ParallelConfig(default_timeout_ms: default_timeout_ms, max_concurrency: _) = + config + let sandbox_config = + sandbox.SandboxConfig( + timeout_ms: default_timeout_ms, + show_crash_reports: False, + ) - // Return results (after_all failures don't change test results) - results - } + case sandbox.run_isolated(sandbox_config, fn() { hook(context) }) { + sandbox.SandboxCompleted(result) -> result + sandbox.SandboxTimedOut -> + Error("hook timed out in " <> string.join(scope, " > ")) + sandbox.SandboxCrashed(reason) -> + Error("hook crashed in " <> string.join(scope, " > ") <> ": " <> reason) } } -fn mark_all_items_as_setup_failed( - items: List(TestSuiteItem), - failure: types.AssertionFailure, -) -> List(TestResult) { - mark_items_failed_from_list(items, failure, []) -} +fn run_hook_teardown( + config: ParallelConfig, + scope: List(String), + context: context, + hook: fn(context) -> Result(Nil, String), +) -> Result(Nil, String) { + let ParallelConfig(default_timeout_ms: default_timeout_ms, max_concurrency: _) = + config + let sandbox_config = + sandbox.SandboxConfig( + timeout_ms: default_timeout_ms, + show_crash_reports: False, + ) -fn mark_items_failed_from_list( - remaining: List(TestSuiteItem), - failure: types.AssertionFailure, - accumulated: List(TestResult), -) -> List(TestResult) { - case remaining { - [] -> list.reverse(accumulated) - [item, ..rest] -> { - let results = mark_item_as_setup_failed(item, failure) - let updated = list.append(list.reverse(results), accumulated) - mark_items_failed_from_list(rest, failure, updated) - } + case sandbox.run_isolated(sandbox_config, fn() { hook(context) }) { + sandbox.SandboxCompleted(result) -> result + sandbox.SandboxTimedOut -> + Error("hook timed out in " <> string.join(scope, " > ")) + sandbox.SandboxCrashed(reason) -> + Error("hook crashed in " <> string.join(scope, " > ") <> ": " <> reason) } } -fn mark_item_as_setup_failed( - item: TestSuiteItem, - failure: types.AssertionFailure, -) -> List(TestResult) { - case item { - SuiteTest(test_case) -> { - let TestCase(config) = test_case - // Duration is 0 for setup failures since the test never ran - [make_setup_failed_result(config, failure, 0)] - } - SuiteGroup(nested_suite) -> - mark_all_items_as_setup_failed(nested_suite.items, failure) +fn run_before_each_list( + config: ParallelConfig, + scope: List(String), + context: context, + hooks: List(fn(context) -> Result(context, String)), + failures_rev: List(AssertionFailure), +) -> #(context, Status, List(AssertionFailure)) { + case hooks { + [] -> #(context, Passed, failures_rev) + [hook, ..rest] -> + case run_hook_transform(config, scope, context, hook) { + Ok(next) -> + run_before_each_list(config, scope, next, rest, failures_rev) + Error(message) -> #(context, SetupFailed, [ + hook_failure("before_each", message), + ..failures_rev + ]) + } } } -fn run_suite_items( +fn run_after_each_list( config: ParallelConfig, - items: List(TestSuiteItem), -) -> List(TestResult) { - // Separate tests from nested groups - let tests = collect_tests_from_items(items, []) - let groups = collect_groups_from_items(items, []) - - // Run tests in parallel - let test_results = run_parallel(config, tests) - - // Run nested groups (each group runs its before_all/after_all) - let group_results = run_groups_sequentially(config, groups, []) - - // Combine results (tests first, then groups, preserving order) - list.append(test_results, group_results) -} - -fn collect_tests_from_items( - remaining: List(TestSuiteItem), - accumulated: List(TestCase), -) -> List(TestCase) { - case remaining { - [] -> list.reverse(accumulated) - [item, ..rest] -> { - let updated = case item { - SuiteTest(test_case) -> [test_case, ..accumulated] - SuiteGroup(_) -> accumulated + scope: List(String), + context: context, + hooks: List(fn(context) -> Result(Nil, String)), + status: Status, + failures_rev: List(AssertionFailure), +) -> #(Status, List(AssertionFailure)) { + case hooks { + [] -> #(status, failures_rev) + [hook, ..rest] -> + case run_hook_teardown(config, scope, context, hook) { + Ok(_) -> + run_after_each_list( + config, + scope, + context, + rest, + status, + failures_rev, + ) + Error(message) -> + run_after_each_list(config, scope, context, rest, Failed, [ + hook_failure("after_each", message), + ..failures_rev + ]) } - collect_tests_from_items(rest, updated) - } } } -fn collect_groups_from_items( - remaining: List(TestSuiteItem), - accumulated: List(TestSuite), -) -> List(TestSuite) { - case remaining { - [] -> list.reverse(accumulated) - [item, ..rest] -> { - let updated = case item { - SuiteTest(_) -> accumulated - SuiteGroup(suite) -> [suite, ..accumulated] - } - collect_groups_from_items(rest, updated) - } +fn hook_failure(operator: String, message: String) -> AssertionFailure { + AssertionFailure(operator: operator, message: message, payload: None) +} + +fn head_failure_or_unknown( + failures_rev: List(AssertionFailure), +) -> AssertionFailure { + case failures_rev { + [f, ..] -> f + [] -> hook_failure("before_each", "setup failed") + } +} + +fn assertion_to_status_and_failures( + result: AssertionResult, + inherited_failures_rev: List(AssertionFailure), + setup_failures_rev: List(AssertionFailure), +) -> #(Status, List(AssertionFailure)) { + case result { + AssertionOk -> #( + Passed, + list.append(setup_failures_rev, inherited_failures_rev), + ) + AssertionFailed(failure) -> #(Failed, [ + failure, + ..list.append(setup_failures_rev, inherited_failures_rev) + ]) + AssertionSkipped -> #( + Skipped, + list.append(setup_failures_rev, inherited_failures_rev), + ) } } -fn run_groups_sequentially( +fn run_in_sandbox( config: ParallelConfig, - remaining: List(TestSuite), - accumulated: List(TestResult), -) -> List(TestResult) { - case remaining { - [] -> list.reverse(accumulated) - [suite, ..rest] -> { - let results = run_suite_group(config, suite) - let updated = list.append(list.reverse(results), accumulated) - run_groups_sequentially(config, rest, updated) - } + timeout_override: Option(Int), + test_function: fn() -> AssertionResult, +) -> AssertionResult { + let ParallelConfig(default_timeout_ms: default_timeout_ms, max_concurrency: _) = + config + let timeout = case timeout_override { + Some(ms) -> ms + None -> default_timeout_ms + } + let sandbox_config = + sandbox.SandboxConfig(timeout_ms: timeout, show_crash_reports: False) + case sandbox.run_isolated(sandbox_config, test_function) { + sandbox.SandboxCompleted(assertion) -> assertion + sandbox.SandboxTimedOut -> + AssertionFailed(hook_failure("timeout", "test timed out")) + sandbox.SandboxCrashed(reason) -> + AssertionFailed(hook_failure("crash", reason)) } } diff --git a/src/dream_test/process.gleam b/src/dream_test/process.gleam index c2fa226..29da5d4 100644 --- a/src/dream_test/process.gleam +++ b/src/dream_test/process.gleam @@ -1,47 +1,29 @@ //// Process helpers for tests that need actors or async operations. //// -//// When tests run in isolated BEAM processes, any processes spawned by the -//// test are automatically terminated when the test ends. This module provides -//// helpers for common patterns. +//// When Dream Test runs a test, it runs in an isolated BEAM process. Any +//// processes you spawn inside a test can be *linked* to that test process, so +//// they automatically die when the test ends (pass, fail, timeout, crash). //// -//// ## Auto-Cleanup +//// This module gives you a few “batteries included” patterns: //// -//// Processes started with these helpers are linked to the test process. -//// When the test completes (pass, fail, or timeout), all linked processes -//// are automatically cleaned up. No manual teardown needed. +//// - A simple counter actor (`start_counter`) you can use to test stateful code. +//// - A generic actor starter (`start_actor`) + a pipe-friendly call helper (`call_actor`). +//// - “wait until ready” polling (`await_ready` / `await_some`) for async systems. +//// - A safe-ish random port helper (`unique_port`) for test servers. //// -//// ## Quick Start +//// ## Example +//// Use this inside an `it` block. //// //// ```gleam -//// import dream_test/process.{start_counter, get_count, increment} -//// import dream_test/unit.{describe, it} -//// import dream_test/assertions/should.{should, equal, or_fail_with} +//// let counter = process.start_counter() +//// process.increment(counter) +//// process.increment(counter) //// -//// pub fn tests() { -//// describe("Counter", [ -//// it("increments correctly", fn() { -//// let counter = start_counter() -//// increment(counter) -//// increment(counter) -//// -//// get_count(counter) -//// |> should() -//// |> equal(2) -//// |> or_fail_with("Counter should be 2") -//// }), -//// ]) -//// } +//// process.get_count(counter) +//// |> should +//// |> be_equal(2) +//// |> or_fail_with("expected counter to be 2") //// ``` -//// -//// ## Available Helpers -//// -//// | Helper | Purpose | -//// |---------------------|----------------------------------------------| -//// | `start_counter` | Simple counter actor for testing state | -//// | `start_actor` | Generic actor with custom state and handler | -//// | `unique_port` | Generate random port for test servers | -//// | `await_ready` | Poll until a condition is true | -//// | `await_some` | Poll until a function returns Ok | import gleam/erlang/process.{type Subject} import gleam/int @@ -49,12 +31,22 @@ import gleam/otp/actor /// Messages for the built-in counter actor. /// -/// Use these with the counter functions or send them directly: +/// Most of the time you’ll use the helpers (`increment`, `set_count`, etc), but +/// you can also send the messages directly when it’s convenient. +/// +/// ## Example +/// Use this inside an `it` block. /// /// ```gleam -/// let counter = start_counter() -/// process.send(counter, Increment) -/// process.send(counter, SetCount(100)) +/// let counter = process.start_counter() +/// +/// erlang_process.send(counter, process.Increment) +/// erlang_process.send(counter, process.SetCount(10)) +/// +/// process.get_count(counter) +/// |> should +/// |> be_equal(10) +/// |> or_fail_with("expected counter to be 10 after SetCount") /// ``` /// pub type CounterMessage { @@ -66,16 +58,26 @@ pub type CounterMessage { /// Start a counter actor initialized to 0. /// -/// The counter is linked to the test process and will be automatically -/// cleaned up when the test ends. +/// The counter is linked to the test process and will be automatically cleaned +/// up when the test ends. +/// +/// ## Returns +/// +/// A `Subject(CounterMessage)` you can pass to the other counter helpers (or +/// send messages to directly). /// /// ## Example +/// Use this inside an `it` block. /// /// ```gleam -/// let counter = start_counter() -/// increment(counter) -/// increment(counter) -/// get_count(counter) // -> 2 +/// let counter = process.start_counter() +/// process.increment(counter) +/// process.increment(counter) +/// +/// process.get_count(counter) +/// |> should +/// |> be_equal(2) +/// |> or_fail_with("expected counter to be 2") /// ``` /// pub fn start_counter() -> Subject(CounterMessage) { @@ -84,15 +86,28 @@ pub fn start_counter() -> Subject(CounterMessage) { /// Start a counter actor with a specific initial value. /// +/// ## Parameters +/// +/// - `initial`: The initial count value. +/// +/// ## Returns +/// +/// A `Subject(CounterMessage)` for the started counter. +/// /// ## Example +/// Use this inside an `it` block. /// /// ```gleam -/// let counter = start_counter_with(100) -/// decrement(counter) -/// get_count(counter) // -> 99 +/// let counter = process.start_counter_with(10) +/// process.decrement(counter) +/// +/// process.get_count(counter) +/// |> should +/// |> be_equal(9) +/// |> or_fail_with("expected counter to be 9 after decrement") /// ``` /// -pub fn start_counter_with(initial: Int) -> Subject(CounterMessage) { +pub fn start_counter_with(initial initial: Int) -> Subject(CounterMessage) { let assert Ok(started) = actor.new(initial) |> actor.on_message(handle_counter_message) @@ -120,15 +135,29 @@ fn handle_counter_message( /// /// This is a synchronous call that blocks until the counter responds. /// +/// ## Parameters +/// +/// - `counter`: The counter actor to query. +/// +/// ## Returns +/// +/// The current counter value. +/// /// ## Example /// +/// Use this inside an `it` block. +/// /// ```gleam -/// let counter = start_counter() -/// increment(counter) -/// let value = get_count(counter) // -> 1 +/// let counter = process.start_counter() +/// process.increment(counter) +/// +/// process.get_count(counter) +/// |> should +/// |> be_equal(1) +/// |> or_fail_with("expected counter to be 1") /// ``` /// -pub fn get_count(counter: Subject(CounterMessage)) -> Int { +pub fn get_count(counter counter: Subject(CounterMessage)) -> Int { actor.call(counter, waiting: 1000, sending: GetCount) } @@ -136,16 +165,30 @@ pub fn get_count(counter: Subject(CounterMessage)) -> Int { /// /// This is an asynchronous send—it returns immediately. /// +/// ## Parameters +/// +/// - `counter`: The counter actor to increment. +/// +/// ## Returns +/// +/// `Nil`. (The message is sent asynchronously.) +/// /// ## Example /// +/// Use this inside an `it` block. +/// /// ```gleam -/// let counter = start_counter() -/// increment(counter) -/// increment(counter) -/// get_count(counter) // -> 2 +/// let counter = process.start_counter() +/// process.increment(counter) +/// process.increment(counter) +/// +/// process.get_count(counter) +/// |> should +/// |> be_equal(2) +/// |> or_fail_with("expected counter to be 2") /// ``` /// -pub fn increment(counter: Subject(CounterMessage)) -> Nil { +pub fn increment(counter counter: Subject(CounterMessage)) -> Nil { process.send(counter, Increment) } @@ -153,15 +196,29 @@ pub fn increment(counter: Subject(CounterMessage)) -> Nil { /// /// This is an asynchronous send—it returns immediately. /// +/// ## Parameters +/// +/// - `counter`: The counter actor to decrement. +/// +/// ## Returns +/// +/// `Nil`. (The message is sent asynchronously.) +/// /// ## Example /// +/// Use this inside an `it` block. +/// /// ```gleam -/// let counter = start_counter_with(10) -/// decrement(counter) -/// get_count(counter) // -> 9 +/// let counter = process.start_counter_with(10) +/// process.decrement(counter) +/// +/// process.get_count(counter) +/// |> should +/// |> be_equal(9) +/// |> or_fail_with("expected counter to be 9 after decrement") /// ``` /// -pub fn decrement(counter: Subject(CounterMessage)) -> Nil { +pub fn decrement(counter counter: Subject(CounterMessage)) -> Nil { process.send(counter, Decrement) } @@ -169,15 +226,33 @@ pub fn decrement(counter: Subject(CounterMessage)) -> Nil { /// /// This is an asynchronous send—it returns immediately. /// +/// ## Parameters +/// +/// - `counter`: The counter actor to set. +/// - `value`: The new value to set. +/// +/// ## Returns +/// +/// `Nil`. (The message is sent asynchronously.) +/// /// ## Example /// +/// Use this inside an `it` block. +/// /// ```gleam -/// let counter = start_counter() -/// set_count(counter, 42) -/// get_count(counter) // -> 42 +/// let counter = process.start_counter() +/// process.set_count(counter, 42) +/// +/// process.get_count(counter) +/// |> should +/// |> be_equal(42) +/// |> or_fail_with("expected counter to be 42 after set_count") /// ``` /// -pub fn set_count(counter: Subject(CounterMessage), value: Int) -> Nil { +pub fn set_count( + counter counter: Subject(CounterMessage), + value value: Int, +) -> Nil { process.send(counter, SetCount(value)) } @@ -188,6 +263,17 @@ pub fn set_count(counter: Subject(CounterMessage), value: Int) -> Nil { /// - `Port(n)` - Use a specific port number /// - `RandomPort` - Pick a random available port (recommended) /// +/// ## Example +/// +/// Use this anywhere you need a port selection value. +/// +/// ```gleam +/// process.Port(1234) +/// |> should +/// |> be_equal(process.Port(1234)) +/// |> or_fail_with("expected PortSelection to be constructible") +/// ``` +/// pub type PortSelection { /// Use a specific port number. Port(Int) @@ -206,10 +292,13 @@ pub type PortSelection { /// /// ## Example /// +/// Use this in test setup, before starting a server. +/// /// ```gleam -/// let port = unique_port() -/// start_server(port) -/// // port is something like 34521 +/// process.unique_port() +/// |> should +/// |> be_between(10_000, 60_000) +/// |> or_fail_with("expected unique_port to be within 10k..60k") /// ``` /// pub fn unique_port() -> Int { @@ -228,33 +317,27 @@ pub fn unique_port() -> Int { /// - `initial_state` - The actor's starting state /// - `handler` - Function `fn(state, message) -> actor.Next(state, message)` /// +/// ## Returns +/// +/// A `Subject(msg)` you can send messages to (or call with `call_actor`). +/// /// ## Example /// /// ```gleam -/// pub type TodoMessage { -/// Add(String) -/// GetAll(Subject(List(String))) -/// } -/// -/// let todos = start_actor([], fn(items, msg) { -/// case msg { -/// Add(item) -> actor.continue([item, ..items]) -/// GetAll(reply) -> { -/// process.send(reply, items) -/// actor.continue(items) -/// } -/// } -/// }) -/// -/// process.send(todos, Add("Write tests")) -/// process.send(todos, Add("Run tests")) -/// let items = call_actor(todos, GetAll, 1000) -/// // items == ["Run tests", "Write tests"] +/// let todos = process.start_actor([], handle_todo_message) +/// +/// erlang_process.send(todos, Add("Write tests")) +/// erlang_process.send(todos, Add("Run tests")) +/// +/// process.call_actor(todos, GetAll, 1000) +/// |> should +/// |> be_equal(["Write tests", "Run tests"]) +/// |> or_fail_with("expected items to be preserved in insertion order") /// ``` /// pub fn start_actor( - initial_state: state, - handler: fn(state, msg) -> actor.Next(state, msg), + initial_state initial_state: state, + handler handler: fn(state, msg) -> actor.Next(state, msg), ) -> Subject(msg) { let assert Ok(started) = actor.new(initial_state) @@ -275,20 +358,23 @@ pub fn start_actor( /// - `make_message` - Function that creates the message given a reply subject /// - `timeout_ms` - How long to wait for a response /// +/// ## Returns +/// +/// The reply value from the actor. +/// /// ## Example /// /// ```gleam -/// pub type Msg { -/// GetValue(Subject(Int)) -/// } -/// -/// let value = call_actor(my_actor, GetValue, 1000) +/// process.call_actor(todos, GetAll, 1000) +/// |> should +/// |> be_equal(["Write tests", "Run tests"]) +/// |> or_fail_with("expected items to be preserved in insertion order") /// ``` /// pub fn call_actor( - subject: Subject(msg), - make_message: fn(Subject(reply)) -> msg, - timeout_ms: Int, + subject subject: Subject(msg), + make_message make_message: fn(Subject(reply)) -> msg, + timeout_ms timeout_ms: Int, ) -> reply { actor.call(subject, waiting: timeout_ms, sending: make_message) } @@ -309,8 +395,10 @@ pub fn call_actor( /// ## Example /// /// ```gleam -/// // Check every 100ms for up to 10 seconds -/// PollConfig(timeout_ms: 10_000, interval_ms: 100) +/// process.default_poll_config() +/// |> should +/// |> be_equal(process.PollConfig(timeout_ms: 5000, interval_ms: 50)) +/// |> or_fail_with("expected default_poll_config to be 5000ms/50ms") /// ``` /// pub type PollConfig { @@ -329,6 +417,19 @@ pub type PollConfig { /// /// Good for operations that might take a few seconds. /// +/// ## Returns +/// +/// `PollConfig(timeout_ms: 5000, interval_ms: 50)`. +/// +/// ## Example +/// +/// ```gleam +/// process.default_poll_config() +/// |> should +/// |> be_equal(process.PollConfig(timeout_ms: 5000, interval_ms: 50)) +/// |> or_fail_with("expected default_poll_config to be 5000ms/50ms") +/// ``` +/// pub fn default_poll_config() -> PollConfig { PollConfig(timeout_ms: 5000, interval_ms: 50) } @@ -340,6 +441,19 @@ pub fn default_poll_config() -> PollConfig { /// /// Good for fast local operations like servers starting. /// +/// ## Returns +/// +/// `PollConfig(timeout_ms: 1000, interval_ms: 10)`. +/// +/// ## Example +/// +/// ```gleam +/// process.quick_poll_config() +/// |> should +/// |> be_equal(process.PollConfig(timeout_ms: 1000, interval_ms: 10)) +/// |> or_fail_with("expected quick_poll_config to be 1000ms/10ms") +/// ``` +/// pub fn quick_poll_config() -> PollConfig { PollConfig(timeout_ms: 1000, interval_ms: 10) } @@ -348,6 +462,11 @@ pub fn quick_poll_config() -> PollConfig { /// /// Either the condition was met (`Ready`) or we gave up (`TimedOut`). /// +/// ## Constructors +/// +/// - `Ready(value)`: The condition was met. +/// - `TimedOut`: The timeout elapsed. +/// pub type PollResult(a) { /// The condition was met and returned this value. Ready(a) @@ -366,23 +485,28 @@ pub type PollResult(a) { /// - Waiting for a file to appear /// - Waiting for a service to become healthy /// +/// ## Parameters +/// +/// - `config`: Poll timeout + interval. +/// - `check`: A zero-arg function returning `Bool`. +/// +/// ## Returns +/// +/// `Ready(True)` when the check returns `True`, otherwise `TimedOut`. +/// /// ## Example /// /// ```gleam -/// // Wait for server to be ready -/// case await_ready(quick_poll_config(), fn() { is_port_open(port) }) { -/// Ready(True) -> { -/// // Server is up, proceed with test -/// make_request(port) -/// |> should() -/// |> be_ok() -/// |> or_fail_with("Request should succeed") -/// } -/// TimedOut -> fail_with("Server didn't start in time") -/// } +/// process.await_ready(process.quick_poll_config(), always_true) +/// |> should +/// |> be_equal(process.Ready(True)) +/// |> or_fail_with("expected await_ready to return Ready(True)") /// ``` /// -pub fn await_ready(config: PollConfig, check: fn() -> Bool) -> PollResult(Bool) { +pub fn await_ready( + config config: PollConfig, + check check: fn() -> Bool, +) -> PollResult(Bool) { poll_until_true(config.timeout_ms, config.interval_ms, check) } @@ -397,24 +521,27 @@ pub fn await_ready(config: PollConfig, check: fn() -> Bool) -> PollResult(Bool) /// - Waiting for an async job to complete /// - Waiting for a resource to become available /// +/// ## Parameters +/// +/// - `config`: Poll timeout + interval. +/// - `check`: A zero-arg function returning `Result(value, error)`. +/// +/// ## Returns +/// +/// `Ready(value)` when the check returns `Ok(value)`, otherwise `TimedOut`. +/// /// ## Example /// /// ```gleam -/// // Wait for user to appear in database -/// case await_some(default_poll_config(), fn() { find_user(user_id) }) { -/// Ready(user) -> { -/// user.name -/// |> should() -/// |> equal("Alice") -/// |> or_fail_with("User should be Alice") -/// } -/// TimedOut -> fail_with("User never appeared in database") -/// } +/// process.await_some(process.default_poll_config(), always_ok_42) +/// |> should +/// |> be_equal(process.Ready(42)) +/// |> or_fail_with("expected await_some to return Ready(42)") /// ``` /// pub fn await_some( - config: PollConfig, - check: fn() -> Result(a, e), + config config: PollConfig, + check check: fn() -> Result(a, e), ) -> PollResult(a) { poll_until_ok(config.timeout_ms, config.interval_ms, check) } diff --git a/src/dream_test/reporter/bdd.gleam b/src/dream_test/reporter/bdd.gleam deleted file mode 100644 index 5dc093d..0000000 --- a/src/dream_test/reporter/bdd.gleam +++ /dev/null @@ -1,541 +0,0 @@ -//// BDD-style test reporter for dream_test. -//// -//// This reporter formats test results in a hierarchical, spec-like format -//// that mirrors your `describe`/`it` structure. It's inspired by RSpec, Jest, -//// and Mocha. -//// -//// ## Example Output -//// -//// ```text -//// Calculator -//// add -//// ✓ adds positive numbers (1ms) -//// ✓ handles zero -//// subtract -//// ✓ subtracts positive numbers -//// ✗ handles negative results (2ms) -//// equal -//// Message: Should handle negative subtraction -//// Expected: -5 -//// Actual: 5 -//// -//// Summary: 4 run, 1 failed, 3 passed in 3ms -//// ``` -//// -//// ## Usage -//// -//// ```gleam -//// import dream_test/unit.{describe, it, to_test_cases} -//// import dream_test/runner.{run_all} -//// import dream_test/reporter/bdd.{report} -//// import gleam/io -//// -//// pub fn main() { -//// tests() -//// |> to_test_cases("my_test") -//// |> run_all() -//// |> report(io.print) -//// } -//// ``` -//// -//// ## Status Markers -//// -//// | Status | Marker | Meaning | -//// |-------------|--------|--------------------------------| -//// | Passed | ✓ | All assertions succeeded | -//// | Failed | ✗ | One or more assertions failed | -//// | Skipped | - | Test was skipped | -//// | Pending | ~ | Test is a placeholder | -//// | TimedOut | ! | Test exceeded timeout | -//// | SetupFailed | ⚠ | A setup hook failed | - -import dream_test/reporter/gherkin as gherkin_reporter -import dream_test/timing -import dream_test/types.{ - type AssertionFailure, type Status, type TestResult, EqualityFailure, Failed, - Passed, Pending, SetupFailed, Skipped, SnapshotFailure, TimedOut, -} -import gleam/int -import gleam/list -import gleam/option.{Some} -import gleam/order -import gleam/string - -/// Format test results as a BDD-style report string. -/// -/// Returns the complete report including: -/// - Hierarchical test results with status markers -/// - Failure details with messages and diffs -/// - Summary line with counts -/// -/// Gherkin tests are automatically formatted using the Gherkin reporter style. -/// -/// Use this when you need the report as a string (e.g., for testing the -/// reporter itself or writing to a file). -/// -/// ## Example -/// -/// ```gleam -/// let report_string = format(results) -/// file.write("test-results.txt", report_string) -/// ``` -/// -pub fn format(results: List(TestResult)) -> String { - // Split results by kind - let #(gherkin_results, unit_results) = partition_by_kind(results) - - // Format each group with appropriate reporter - let unit_text = format_unit_results(unit_results) - let gherkin_text = format_gherkin_results(gherkin_results) - - // Combine with single summary - let summary_text = format_summary(results) - string.concat([unit_text, gherkin_text, "\n", summary_text]) -} - -fn partition_by_kind( - results: List(TestResult), -) -> #(List(TestResult), List(TestResult)) { - list.partition(results, gherkin_reporter.is_gherkin_result) -} - -fn format_unit_results(results: List(TestResult)) -> String { - case results { - [] -> "" - _ -> { - // Sort results by full_name to group tests from the same describe block together. - // This ensures consistent output regardless of parallel execution order. - let sorted = list.sort(results, compare_by_full_name) - format_all_results(sorted, [], "") - } - } -} - -fn compare_by_full_name(a: TestResult, b: TestResult) -> order.Order { - compare_string_lists(a.full_name, b.full_name) -} - -fn compare_string_lists(a: List(String), b: List(String)) -> order.Order { - case a, b { - [], [] -> order.Eq - [], _ -> order.Lt - _, [] -> order.Gt - [head_a, ..rest_a], [head_b, ..rest_b] -> - case string.compare(head_a, head_b) { - order.Eq -> compare_string_lists(rest_a, rest_b) - other -> other - } - } -} - -fn format_gherkin_results(results: List(TestResult)) -> String { - case results { - [] -> "" - _ -> { - let formatted = gherkin_reporter.format(results) - // Remove the gherkin reporter's own summary (we'll use combined summary) - remove_summary_line(formatted) - } - } -} - -fn remove_summary_line(text: String) -> String { - // Find and remove the summary line (may have trailing empty lines) - let lines = string.split(text, "\n") - let without_summary = remove_gherkin_summary(lines, []) - string.join(without_summary, "\n") -} - -fn remove_gherkin_summary( - lines: List(String), - accumulated: List(String), -) -> List(String) { - case lines { - [] -> list.reverse(accumulated) - [line] -> { - // Check if this is the summary line or trailing empty line - case string.starts_with(line, "Summary:") || line == "" { - True -> list.reverse(accumulated) - False -> list.reverse([line, ..accumulated]) - } - } - [line, ..rest] -> { - // Skip summary lines anywhere in the list - case string.starts_with(line, "Summary:") { - True -> remove_gherkin_summary(rest, accumulated) - False -> remove_gherkin_summary(rest, [line, ..accumulated]) - } - } - } -} - -/// Print test results using a provided writer function. -/// -/// This is the main entry point for most test runs. The writer function -/// receives the formatted report string and can print it, log it, or -/// handle it however needed. -/// -/// ## Example -/// -/// ```gleam -/// // Print to stdout -/// results |> report(io.print) -/// -/// // Print each line separately (for flushing) -/// results |> report(io.println) -/// -/// // Custom handling -/// results |> report(fn(s) { logger.info(s) }) -/// ``` -/// -/// ## Parameters -/// -/// - `results` - List of test results from the runner -/// - `write` - Function that handles the formatted output string -/// -/// ## Returns -/// -/// Returns the input results unchanged, enabling pipeline composition: -/// -/// ```gleam -/// to_test_cases("my_test", tests()) -/// |> run_all() -/// |> report(io.print) -/// |> exit_on_failure() -/// ``` -/// -pub fn report( - results: List(TestResult), - write: fn(String) -> Nil, -) -> List(TestResult) { - write(format(results)) - results -} - -fn format_all_results( - results: List(TestResult), - previous_path: List(String), - accumulated: String, -) -> String { - case results { - [] -> accumulated - [result, ..rest] -> { - let formatted = format_one_result(result, previous_path) - let updated = string.concat([accumulated, formatted]) - let new_path = extract_describe_segments(result.full_name) - format_all_results(rest, new_path, updated) - } - } -} - -fn format_one_result(result: TestResult, previous_path: List(String)) -> String { - let current_path = extract_describe_segments(result.full_name) - let common_depth = count_common_prefix(previous_path, current_path, 0) - let new_segments = list.drop(current_path, common_depth) - let headers = format_header_segments(new_segments, common_depth, "") - let test_line = format_test_line(result) - string.concat([headers, test_line]) -} - -fn count_common_prefix( - previous: List(String), - current: List(String), - depth: Int, -) -> Int { - case previous, current { - [prev_head, ..prev_rest], [curr_head, ..curr_rest] -> - count_common_prefix_check( - prev_head, - curr_head, - prev_rest, - curr_rest, - depth, - ) - _, _ -> depth - } -} - -fn count_common_prefix_check( - prev_head: String, - curr_head: String, - prev_rest: List(String), - curr_rest: List(String), - depth: Int, -) -> Int { - case prev_head == curr_head { - True -> count_common_prefix(prev_rest, curr_rest, depth + 1) - False -> depth - } -} - -fn extract_describe_segments(full_name: List(String)) -> List(String) { - case list.reverse(full_name) { - [] -> [] - [_] -> [] - [_, ..rest] -> list.reverse(rest) - } -} - -fn format_header_segments( - segments: List(String), - depth: Int, - accumulated: String, -) -> String { - case segments { - [] -> accumulated - [segment, ..rest] -> { - let indent = build_indent(depth) - let header = string.concat([indent, segment, "\n"]) - let updated = string.concat([accumulated, header]) - format_header_segments(rest, depth + 1, updated) - } - } -} - -fn format_test_line(result: TestResult) -> String { - let depth = calculate_test_depth(result.full_name) - let indent = build_indent(depth) - let marker = status_marker(result.status) - let name = extract_test_name(result.full_name) - let duration = format_duration(result.duration_ms) - let test_line = string.concat([indent, marker, " ", name, duration, "\n"]) - let failure_text = format_failure_details(result, depth) - string.concat([test_line, failure_text]) -} - -fn format_duration(duration_ms: Int) -> String { - case duration_ms { - // Don't show timing for very fast tests (< 1ms) - ms if ms <= 0 -> "" - ms -> " (" <> timing.format_duration_ms(ms) <> ")" - } -} - -fn calculate_test_depth(full_name: List(String)) -> Int { - case full_name { - [] -> 0 - [_] -> 0 - _ -> list.length(full_name) - 1 - } -} - -fn build_indent(level: Int) -> String { - build_indent_recursive(level, "") -} - -fn build_indent_recursive(level: Int, accumulated: String) -> String { - case level { - 0 -> accumulated - n -> build_indent_recursive(n - 1, string.concat([accumulated, " "])) - } -} - -fn extract_test_name(full_name: List(String)) -> String { - case list.reverse(full_name) { - [last, ..] -> last - [] -> "" - } -} - -fn status_marker(status: Status) -> String { - case status { - Passed -> "✓" - Failed -> "✗" - Skipped -> "-" - Pending -> "~" - TimedOut -> "!" - SetupFailed -> "⚠" - } -} - -fn format_failure_details(result: TestResult, indent_level: Int) -> String { - case result.status { - Failed -> format_all_failures(result.failures, indent_level, "") - SetupFailed -> format_all_failures(result.failures, indent_level, "") - _ -> "" - } -} - -fn format_all_failures( - failures: List(AssertionFailure), - indent_level: Int, - accumulated: String, -) -> String { - case failures { - [] -> accumulated - [failure, ..rest] -> { - let formatted = format_one_failure(failure, indent_level) - let updated = string.concat([accumulated, formatted]) - format_all_failures(rest, indent_level, updated) - } - } -} - -fn format_one_failure(failure: AssertionFailure, indent_level: Int) -> String { - let base_indent = build_indent(indent_level) - - let header = string.concat([base_indent, " ", failure.operator, "\n"]) - let message_text = format_failure_message(failure.message, base_indent) - let payload_text = format_failure_payload(failure.payload, base_indent) - - string.concat([header, message_text, payload_text]) -} - -fn format_failure_message(message: String, base_indent: String) -> String { - case message { - "" -> "" - _ -> string.concat([base_indent, " Message: ", message, "\n"]) - } -} - -fn format_failure_payload( - payload: option.Option(types.FailurePayload), - base_indent: String, -) -> String { - case payload { - Some(EqualityFailure(actual, expected)) -> - string.concat([ - base_indent, - " Expected: ", - expected, - "\n", - base_indent, - " Actual: ", - actual, - "\n", - ]) - Some(SnapshotFailure(actual, expected, snapshot_path, is_missing)) -> - format_snapshot_failure( - actual, - expected, - snapshot_path, - is_missing, - base_indent, - ) - _ -> "" - } -} - -fn format_snapshot_failure( - actual: String, - expected: String, - snapshot_path: String, - is_missing: Bool, - base_indent: String, -) -> String { - case is_missing { - True -> - string.concat([ - base_indent, - " Snapshot missing: ", - snapshot_path, - "\n", - ]) - False -> - string.concat([ - base_indent, - " Snapshot: ", - snapshot_path, - "\n", - base_indent, - " Expected: ", - expected, - "\n", - base_indent, - " Actual: ", - actual, - "\n", - ]) - } -} - -fn format_summary(results: List(TestResult)) -> String { - let total = list.length(results) - let failed = count_by_status(results, Failed) - let skipped = count_by_status(results, Skipped) - let pending = count_by_status(results, Pending) - let timed_out = count_by_status(results, TimedOut) - let setup_failed = count_by_status(results, SetupFailed) - let passed = total - failed - skipped - pending - timed_out - setup_failed - let total_duration = sum_durations(results, 0) - - string.concat([ - "Summary: ", - int.to_string(total), - " run, ", - int.to_string(failed), - " failed, ", - int.to_string(passed), - " passed", - build_summary_suffix(skipped, pending, timed_out, setup_failed), - " in ", - timing.format_duration_ms(total_duration), - "\n", - ]) -} - -fn sum_durations(results: List(TestResult), total: Int) -> Int { - case results { - [] -> total - [result, ..rest] -> sum_durations(rest, total + result.duration_ms) - } -} - -fn count_by_status(results: List(TestResult), wanted: Status) -> Int { - count_matching_status(results, wanted, 0) -} - -fn count_matching_status( - results: List(TestResult), - wanted: Status, - count: Int, -) -> Int { - case results { - [] -> count - [result, ..rest] -> { - let next_count = increment_if_matches(result.status, wanted, count) - count_matching_status(rest, wanted, next_count) - } - } -} - -fn increment_if_matches(status: Status, wanted: Status, count: Int) -> Int { - case status == wanted { - True -> count + 1 - False -> count - } -} - -fn build_summary_suffix( - skipped: Int, - pending: Int, - timed_out: Int, - setup_failed: Int, -) -> String { - let parts = - [] - |> add_summary_part_if_nonzero(skipped, " skipped") - |> add_summary_part_if_nonzero(pending, " pending") - |> add_summary_part_if_nonzero(timed_out, " timed out") - |> add_summary_part_if_nonzero(setup_failed, " setup failed") - - format_summary_parts(parts) -} - -fn format_summary_parts(parts: List(String)) -> String { - case parts { - [] -> "" - _ -> string.concat([", ", string.join(parts, ", ")]) - } -} - -fn add_summary_part_if_nonzero( - parts: List(String), - count: Int, - label: String, -) -> List(String) { - case count { - 0 -> parts - _ -> [string.concat([int.to_string(count), label]), ..parts] - } -} diff --git a/src/dream_test/reporters/bdd.gleam b/src/dream_test/reporters/bdd.gleam new file mode 100644 index 0000000..573cf16 --- /dev/null +++ b/src/dream_test/reporters/bdd.gleam @@ -0,0 +1,1045 @@ +//// BDD-style test report formatting. +//// +//// This module formats `TestResult` values in a hierarchical, spec-like layout +//// that mirrors your `describe` / `it` structure. It’s primarily used by +//// the end-of-run BDD results reporter (`runner.results_reporters([bdd.new()])`), +//// but it’s also useful directly when you want formatted output as a `String` +//// (e.g. snapshot tests) or when you’re formatting results incrementally in a +//// custom runner. + +import dream_test/reporters/gherkin as gherkin_reporter +import dream_test/reporters/types as reporter_types +import dream_test/timing +import dream_test/types.{ + type AssertionFailure, type Status, type TestResult, BooleanFailure, + CollectionFailure, ComparisonFailure, CustomMatcherFailure, EqualityFailure, + Failed, OptionFailure, Passed, Pending, ResultFailure, SetupFailed, Skipped, + SnapshotFailure, StringMatchFailure, TimedOut, +} +import gleam/int +import gleam/list +import gleam/option.{Some} +import gleam/order +import gleam/string + +/// The result of formatting a single test result incrementally. +/// +/// ## Fields +/// +/// - `text`: The output to print for this test result (including any new suite/group headers) +/// - `new_path`: The describe/group path for this result. Pass this as `previous_path` +/// when formatting the next result. +pub type FormatIncrementalResult { + FormatIncrementalResult(text: String, new_path: List(String)) +} + +/// The result of formatting a single test result incrementally as separate parts. +/// +/// ## Fields +/// +/// - `headers`: Any new suite/group lines required for this result +/// - `test_line`: The test line (and any failure details) for this result +/// - `new_path`: The describe/group path for this result. Pass this as `previous_path` +/// when formatting the next result. +pub type FormatIncrementalPartsResult { + FormatIncrementalPartsResult( + headers: String, + test_line: String, + new_path: List(String), + ) +} + +// ============================================================================ +// Results reporter builder (end-of-run) +// ============================================================================ + +/// Create a BDD results reporter. +/// +/// This reporter prints at the end of the run, using the traversal-ordered results +/// provided by the runner. +pub fn new() -> reporter_types.ResultsReporter { + reporter_types.Bdd(reporter_types.BddReporterConfig( + color: False, + mode: reporter_types.BddFull, + )) +} + +/// Enable ANSI color output for the BDD report. +pub fn color( + reporter reporter: reporter_types.ResultsReporter, +) -> reporter_types.ResultsReporter { + case reporter { + reporter_types.Bdd(reporter_types.BddReporterConfig(color: _c, mode: mode)) -> + reporter_types.Bdd(reporter_types.BddReporterConfig( + color: True, + mode: mode, + )) + other -> other + } +} + +/// Print only the summary line at the end of the run. +pub fn summary_only( + reporter reporter: reporter_types.ResultsReporter, +) -> reporter_types.ResultsReporter { + case reporter { + reporter_types.Bdd(reporter_types.BddReporterConfig(color: color, mode: _)) -> + reporter_types.Bdd(reporter_types.BddReporterConfig( + color: color, + mode: reporter_types.BddSummaryOnly, + )) + other -> other + } +} + +/// Print only repeated failures and the summary line at the end of the run. +pub fn failures_only( + reporter reporter: reporter_types.ResultsReporter, +) -> reporter_types.ResultsReporter { + case reporter { + reporter_types.Bdd(reporter_types.BddReporterConfig(color: color, mode: _)) -> + reporter_types.Bdd(reporter_types.BddReporterConfig( + color: color, + mode: reporter_types.BddFailuresOnly, + )) + other -> other + } +} + +/// Render the end-of-run BDD output for the given results. +pub fn render( + config config: reporter_types.BddReporterConfig, + results results: List(TestResult), +) -> String { + let reporter_types.BddReporterConfig(color: color, mode: mode) = config + render_bdd_sections(color, mode, results) +} + +fn render_bdd_sections( + color: Bool, + mode: reporter_types.BddOutputMode, + results: List(TestResult), +) -> String { + let summary = format_summary(color, results) + + case mode { + reporter_types.BddSummaryOnly -> summary + reporter_types.BddFailuresOnly -> { + let failures = filter_failures(results, []) + let failures_text = format_all_results(color, failures, [], "") + string.concat([failures_text, "\n", summary]) + } + reporter_types.BddFull -> { + let results_text = format_all_results(color, results, [], "") + let failures = filter_failures(results, []) + let failures_text = case failures { + [] -> "" + _ -> + "\n" + <> format_failures_header(color) + <> "\n" + <> format_all_results(color, failures, [], "") + } + string.concat([results_text, failures_text, "\n", summary]) + } + } +} + +fn filter_failures( + results: List(TestResult), + acc_rev: List(TestResult), +) -> List(TestResult) { + case results { + [] -> list.reverse(acc_rev) + [result, ..rest] -> { + case is_failure_status(result.status) { + True -> filter_failures(rest, [result, ..acc_rev]) + False -> filter_failures(rest, acc_rev) + } + } + } +} + +fn is_failure_status(status: Status) -> Bool { + case status { + Failed -> True + SetupFailed -> True + TimedOut -> True + _ -> False + } +} + +fn ansi_wrap(code: String, text: String) -> String { + string.concat(["\u{1b}[", code, "m", text, "\u{1b}[0m"]) +} + +fn ansi_dim(text: String) -> String { + ansi_wrap("2", text) +} + +fn ansi_red(text: String) -> String { + ansi_wrap("31", text) +} + +fn ansi_green(text: String) -> String { + ansi_wrap("32", text) +} + +fn ansi_yellow(text: String) -> String { + ansi_wrap("33", text) +} + +fn ansi_cyan(text: String) -> String { + ansi_wrap("36", text) +} + +fn ansi_magenta(text: String) -> String { + ansi_wrap("35", text) +} + +fn ansi_bold(text: String) -> String { + ansi_wrap("1", text) +} + +fn maybe(color: Bool, apply: fn(String) -> String, text: String) -> String { + case color { + True -> apply(text) + False -> text + } +} + +fn format_failures_header(color: Bool) -> String { + maybe(color, fn(t) { ansi_bold(ansi_red(t)) }, "Failures:") +} + +/// Format test results as a BDD-style report string. +/// +/// Returns the complete report including: +/// - Hierarchical test results with status markers +/// - Failure details with messages and diffs +/// - Summary line with counts +/// +/// Gherkin tests are automatically formatted using the Gherkin reporter style. +/// +/// Use this when you need the report as a string (e.g., for testing the +/// reporter itself or writing to a file). +/// +/// ## Parameters +/// +/// - `results`: The test results to format +/// +/// ## Returns +/// +/// A single `String` containing the formatted report and trailing summary line. +/// +/// ## Example +/// +/// ```gleam +/// let report = bdd.format(sample_results()) +/// +/// report +/// |> should +/// |> match_snapshot("./test/snapshots/bdd_format_report.snap") +/// |> or_fail_with("expected formatted report snapshot match") +/// ``` +/// +pub fn format(results results: List(TestResult)) -> String { + // Split results by kind + let #(gherkin_results, unit_results) = partition_by_kind(results) + + // Format each group with appropriate reporter + let unit_text = format_unit_results(False, unit_results) + let gherkin_text = format_gherkin_results(gherkin_results) + + // Combine with single summary + let summary_text = format_summary(False, results) + string.concat([unit_text, gherkin_text, "\n", summary_text]) +} + +fn partition_by_kind( + results: List(TestResult), +) -> #(List(TestResult), List(TestResult)) { + list.partition(results, gherkin_reporter.is_gherkin_result) +} + +fn format_unit_results(color: Bool, results: List(TestResult)) -> String { + case results { + [] -> "" + _ -> { + // Sort results by full_name to group tests from the same describe block together. + // This ensures consistent output regardless of parallel execution order. + let sorted = list.sort(results, compare_by_full_name) + format_all_results(color, sorted, [], "") + } + } +} + +fn compare_by_full_name(a: TestResult, b: TestResult) -> order.Order { + compare_string_lists(a.full_name, b.full_name) +} + +fn compare_string_lists(a: List(String), b: List(String)) -> order.Order { + case a, b { + [], [] -> order.Eq + [], _ -> order.Lt + _, [] -> order.Gt + [head_a, ..rest_a], [head_b, ..rest_b] -> + case string.compare(head_a, head_b) { + order.Eq -> compare_string_lists(rest_a, rest_b) + other -> other + } + } +} + +fn format_gherkin_results(results: List(TestResult)) -> String { + case results { + [] -> "" + _ -> { + let formatted = gherkin_reporter.format(results) + // Remove the gherkin reporter's own summary (we'll use combined summary) + remove_summary_line(formatted) + } + } +} + +fn remove_summary_line(text: String) -> String { + // Find and remove the summary line (may have trailing empty lines) + let lines = string.split(text, "\n") + let without_summary = remove_gherkin_summary(lines, []) + string.join(without_summary, "\n") +} + +fn remove_gherkin_summary( + lines: List(String), + accumulated: List(String), +) -> List(String) { + case lines { + [] -> list.reverse(accumulated) + [line] -> { + // Check if this is the summary line or trailing empty line + case string.starts_with(line, "Summary:") || line == "" { + True -> list.reverse(accumulated) + False -> list.reverse([line, ..accumulated]) + } + } + [line, ..rest] -> { + // Skip summary lines anywhere in the list + case string.starts_with(line, "Summary:") { + True -> remove_gherkin_summary(rest, accumulated) + False -> remove_gherkin_summary(rest, [line, ..accumulated]) + } + } + } +} + +/// Print test results using a provided writer function. +/// +/// This is the main entry point for most test runs. The writer function +/// receives the formatted report string and can print it, log it, or +/// handle it however needed. +/// +/// ## Example +/// +/// ```gleam +/// bdd.report([passing_result()], write_bdd_report_to_file) +/// +/// use text <- result.try( +/// file.read("test/tmp/bdd_report.txt") +/// |> result.map_error(file.error_to_string), +/// ) +/// +/// text +/// |> should +/// |> match_snapshot("./test/snapshots/bdd_report_file_output.snap") +/// |> or_fail_with("expected report output snapshot match") +/// ``` +/// +/// ## Parameters +/// +/// - `results`: List of test results from the runner +/// - `write`: Function that handles the formatted output string +/// +/// ## Returns +/// +/// Returns the input results unchanged, enabling pipeline composition. +/// +pub fn report( + results results: List(TestResult), + write write: fn(String) -> Nil, +) -> List(TestResult) { + write(format(results)) + results +} + +/// Format a single test result as BDD output, suitable for streaming. +/// +/// This is designed to be used with `ReporterEvent.TestFinished` so that BDD +/// output can be printed as tests complete. +/// +/// - `previous_path` should be the describe-path (all but the leaf test name) +/// of the previously printed result. +/// - Returns the formatted output for this result, and the updated +/// describe-path to use as `previous_path` for the next call. +/// +/// ## Parameters +/// +/// - `result`: The test result to format +/// - `previous_path`: The previous describe/group path (from the prior call) +/// +/// ## Returns +/// +/// A `FormatIncrementalResult` containing the text to print and the new +/// describe/group path to pass as `previous_path` for the next call. +pub fn format_incremental( + result result: TestResult, + previous_path previous_path: List(String), +) -> FormatIncrementalResult { + let text = format_one_result_with_test_indent(False, result, previous_path, 0) + let new_path = extract_describe_segments(result.full_name) + FormatIncrementalResult(text: text, new_path: new_path) +} + +/// ## Example +/// +/// ```gleam +/// use first <- result.try(first_result([passing_result()])) +/// +/// let bdd.FormatIncrementalResult(text: _text, new_path: new_path) = +/// bdd.format_incremental(first, []) +/// +/// new_path +/// |> should +/// |> be_equal(["Example Suite"]) +/// |> or_fail_with("expected new_path to be the describe path") +/// ``` +/// Format a single test result as BDD output, allowing an extra indent level +/// for the test line. +/// +/// ## Parameters +/// +/// - `result`: The test result to format +/// - `previous_path`: The previous describe/group path (from the prior call) +/// - `extra_test_indent`: Additional indentation to apply to the test line +/// +/// ## Returns +/// +/// A `FormatIncrementalResult` containing the text to print and the new +/// describe/group path to pass as `previous_path` for the next call. +pub fn format_incremental_with_test_indent( + result result: TestResult, + previous_path previous_path: List(String), + extra_test_indent extra_test_indent: Int, +) -> FormatIncrementalResult { + let text = + format_one_result_with_test_indent( + False, + result, + previous_path, + extra_test_indent, + ) + let new_path = extract_describe_segments(result.full_name) + FormatIncrementalResult(text: text, new_path: new_path) +} + +/// ## Example +/// +/// ```gleam +/// let result = passing_result() +/// let bdd.FormatIncrementalResult(text: text, new_path: _new_path) = +/// bdd.format_incremental_with_test_indent(result, [], 0) +/// +/// text +/// |> should +/// |> match_snapshot( +/// "./test/snapshots/bdd_format_incremental_with_test_indent.snap", +/// ) +/// |> or_fail_with("expected incremental output snapshot match") +/// ``` +/// Format a single incremental result as two parts: +/// - `headers`: any new describe/group lines required for this result +/// - `test_line`: the test line (and any failure details) +/// +/// This allows callers to insert additional lines (like lifecycle hooks) +/// between headers and the test line while maintaining correct ordering. +/// +/// ## Parameters +/// +/// - `result`: The test result to format +/// - `previous_path`: The previous describe/group path (from the prior call) +/// - `extra_test_indent`: Additional indentation to apply to the test line +/// +/// ## Returns +/// +/// A `FormatIncrementalPartsResult` containing: +/// - `headers`: any new describe/group lines required for this result +/// - `test_line`: the test line (and any failure details) +/// - `new_path`: the updated describe/group path to pass as `previous_path` for the next call +pub fn format_incremental_parts_with_test_indent( + result result: TestResult, + previous_path previous_path: List(String), + extra_test_indent extra_test_indent: Int, +) -> FormatIncrementalPartsResult { + let #(headers, test_line) = + format_one_result_parts_with_test_indent( + False, + result, + previous_path, + extra_test_indent, + ) + let new_path = extract_describe_segments(result.full_name) + FormatIncrementalPartsResult( + headers: headers, + test_line: test_line, + new_path: new_path, + ) +} + +/// ## Example +/// +/// ```gleam +/// let result = passing_result() +/// let text = +/// bdd.format_incremental_parts_with_test_indent(result, [], 0) +/// |> incremental_parts_text +/// +/// text +/// |> should +/// |> match_snapshot( +/// "./test/snapshots/bdd_format_incremental_parts_with_test_indent.snap", +/// ) +/// |> or_fail_with("expected incremental parts snapshot match") +/// ``` +/// Format only the trailing summary line (no per-test output). +/// +/// ## Parameters +/// +/// - `results`: The test results to summarize +/// +/// ## Returns +/// +/// A single summary line as a `String` (including a trailing newline). +pub fn format_summary_only(results results: List(TestResult)) -> String { + format_summary(False, results) +} + +/// ## Example +/// +/// ```gleam +/// let summary = bdd.format_summary_only([passing_result()]) +/// +/// summary +/// |> should +/// |> match_snapshot("./test/snapshots/bdd_format_summary_only.snap") +/// |> or_fail_with("expected summary snapshot match") +/// ``` +fn format_all_results( + color: Bool, + results: List(TestResult), + previous_path: List(String), + accumulated: String, +) -> String { + case results { + [] -> accumulated + [result, ..rest] -> { + let formatted = format_one_result(color, result, previous_path) + let updated = string.concat([accumulated, formatted]) + let new_path = extract_describe_segments(result.full_name) + format_all_results(color, rest, new_path, updated) + } + } +} + +fn format_one_result_with_test_indent( + color: Bool, + result: TestResult, + previous_path: List(String), + extra_test_indent: Int, +) -> String { + let #(headers, test_line) = + format_one_result_parts_with_test_indent( + color, + result, + previous_path, + extra_test_indent, + ) + string.concat([headers, test_line]) +} + +fn format_one_result( + color: Bool, + result: TestResult, + previous_path: List(String), +) -> String { + format_one_result_with_test_indent(color, result, previous_path, 0) +} + +fn format_one_result_parts_with_test_indent( + color: Bool, + result: TestResult, + previous_path: List(String), + extra_test_indent: Int, +) -> #(String, String) { + let current_path = extract_describe_segments(result.full_name) + let common_depth = count_common_prefix(previous_path, current_path, 0) + let new_segments = list.drop(current_path, common_depth) + let is_gherkin = gherkin_reporter.is_gherkin_result(result) + let headers = + format_header_segments_for_kind( + color, + new_segments, + common_depth, + is_gherkin, + "", + ) + let test_line = + format_test_line_with_indent_for_kind( + color, + result, + extra_test_indent, + is_gherkin, + ) + #(headers, test_line) +} + +fn format_header_segments_for_kind( + color: Bool, + segments: List(String), + depth: Int, + is_gherkin: Bool, + accumulated: String, +) -> String { + case segments { + [] -> accumulated + [segment, ..rest] -> { + let indent = build_indent(depth) + let label = case is_gherkin && depth == 0 { + True -> "Feature: " <> segment + False -> segment + } + let styled = maybe(color, fn(t) { ansi_bold(ansi_cyan(t)) }, label) + let header = string.concat([indent, styled, "\n"]) + let updated = string.concat([accumulated, header]) + format_header_segments_for_kind( + color, + rest, + depth + 1, + is_gherkin, + updated, + ) + } + } +} + +fn count_common_prefix( + previous: List(String), + current: List(String), + depth: Int, +) -> Int { + case previous, current { + [prev_head, ..prev_rest], [curr_head, ..curr_rest] -> + count_common_prefix_check( + prev_head, + curr_head, + prev_rest, + curr_rest, + depth, + ) + _, _ -> depth + } +} + +fn count_common_prefix_check( + prev_head: String, + curr_head: String, + prev_rest: List(String), + curr_rest: List(String), + depth: Int, +) -> Int { + case prev_head == curr_head { + True -> count_common_prefix(prev_rest, curr_rest, depth + 1) + False -> depth + } +} + +fn extract_describe_segments(full_name: List(String)) -> List(String) { + case list.reverse(full_name) { + [] -> [] + [_] -> [] + [_, ..rest] -> list.reverse(rest) + } +} + +fn format_test_line_with_indent_for_kind( + color: Bool, + result: TestResult, + extra_indent: Int, + is_gherkin: Bool, +) -> String { + let depth = calculate_test_depth(result.full_name) + let indent = build_indent(depth + extra_indent) + let marker = status_marker(color, result.status) + let name = case is_gherkin { + True -> "Scenario: " <> extract_test_name(result.full_name) + False -> extract_test_name(result.full_name) + } + let duration = format_duration(result.duration_ms) + let colored_name = case result.status { + Passed -> name + Failed -> maybe(color, ansi_red, name) + SetupFailed -> maybe(color, ansi_red, name) + TimedOut -> maybe(color, ansi_red, name) + Skipped -> maybe(color, ansi_yellow, name) + Pending -> maybe(color, ansi_yellow, name) + } + let test_line = + string.concat([indent, marker, " ", colored_name, duration, "\n"]) + let failure_text = format_failure_details(color, result, depth + extra_indent) + string.concat([test_line, failure_text]) +} + +fn format_duration(duration_ms: Int) -> String { + case duration_ms { + // Don't show timing for very fast tests (< 1ms) + ms if ms <= 0 -> "" + ms -> " (" <> timing.format_duration_ms(ms) <> ")" + } +} + +fn calculate_test_depth(full_name: List(String)) -> Int { + case full_name { + [] -> 0 + [_] -> 0 + _ -> list.length(full_name) - 1 + } +} + +fn build_indent(level: Int) -> String { + build_indent_recursive(level, "") +} + +fn build_indent_recursive(level: Int, accumulated: String) -> String { + case level { + 0 -> accumulated + n -> build_indent_recursive(n - 1, string.concat([accumulated, " "])) + } +} + +fn extract_test_name(full_name: List(String)) -> String { + case list.reverse(full_name) { + [last, ..] -> last + [] -> "" + } +} + +fn status_marker(color: Bool, status: Status) -> String { + case status { + Passed -> maybe(color, ansi_green, "✓") + Failed -> maybe(color, ansi_red, "✗") + Skipped -> maybe(color, ansi_yellow, "-") + Pending -> maybe(color, ansi_yellow, "~") + TimedOut -> maybe(color, ansi_red, "!") + SetupFailed -> maybe(color, ansi_red, "⚠") + } +} + +fn format_failure_details( + color: Bool, + result: TestResult, + indent_level: Int, +) -> String { + case result.status { + Failed -> format_all_failures(color, result.failures, indent_level, "") + SetupFailed -> format_all_failures(color, result.failures, indent_level, "") + TimedOut -> format_all_failures(color, result.failures, indent_level, "") + _ -> "" + } +} + +fn format_all_failures( + color: Bool, + failures: List(AssertionFailure), + indent_level: Int, + accumulated: String, +) -> String { + case failures { + [] -> accumulated + [failure, ..rest] -> { + let formatted = format_one_failure(color, failure, indent_level) + let updated = string.concat([accumulated, formatted]) + format_all_failures(color, rest, indent_level, updated) + } + } +} + +fn format_one_failure( + color: Bool, + failure: AssertionFailure, + indent_level: Int, +) -> String { + let base_indent = build_indent(indent_level) + + let operator_text = + maybe(color, fn(t) { ansi_bold(ansi_magenta(t)) }, failure.operator) + let header = string.concat([base_indent, " ", operator_text, "\n"]) + let message_text = format_failure_message(color, failure.message, base_indent) + let payload_text = format_failure_payload(color, failure.payload, base_indent) + + string.concat([header, message_text, payload_text]) +} + +fn format_failure_message( + color: Bool, + message: String, + base_indent: String, +) -> String { + case message { + "" -> "" + _ -> { + let label = maybe(color, ansi_dim, " Message: ") + let msg = maybe(color, ansi_red, message) + string.concat([base_indent, label, msg, "\n"]) + } + } +} + +fn format_failure_payload( + color: Bool, + payload: option.Option(types.FailurePayload), + base_indent: String, +) -> String { + case payload { + Some(BooleanFailure(actual, expected)) -> { + let expected_text = case expected { + True -> "True" + False -> "False" + } + let actual_text = case actual { + True -> "True" + False -> "False" + } + format_expected_actual(color, base_indent, expected_text, actual_text) + } + Some(EqualityFailure(actual, expected)) -> + format_expected_actual(color, base_indent, expected, actual) + Some(OptionFailure(actual, expected_some)) -> { + let expected_text = case expected_some { + True -> "Some(_)" + False -> "None" + } + format_expected_actual(color, base_indent, expected_text, actual) + } + Some(ResultFailure(actual, expected_ok)) -> { + let expected_text = case expected_ok { + True -> "Ok(_)" + False -> "Error(_)" + } + format_expected_actual(color, base_indent, expected_text, actual) + } + Some(CollectionFailure(actual, expected, operation)) -> { + let op_line = + base_indent + <> maybe(color, ansi_dim, " Operation: ") + <> maybe(color, ansi_cyan, operation) + <> "\n" + op_line <> format_expected_actual(color, base_indent, expected, actual) + } + Some(ComparisonFailure(actual, expected, operator)) -> { + let op_line = + base_indent + <> maybe(color, ansi_dim, " Operator: ") + <> maybe(color, ansi_cyan, operator) + <> "\n" + op_line <> format_expected_actual(color, base_indent, expected, actual) + } + Some(StringMatchFailure(actual, pattern, operation)) -> { + let op_line = + base_indent + <> maybe(color, ansi_dim, " Operation: ") + <> maybe(color, ansi_cyan, operation) + <> "\n" + let pat_line = + base_indent + <> maybe(color, ansi_dim, " Pattern: ") + <> maybe(color, ansi_green, pattern) + <> "\n" + let actual_line = + base_indent + <> maybe(color, ansi_dim, " Actual: ") + <> maybe(color, ansi_red, actual) + <> "\n" + op_line <> pat_line <> actual_line + } + Some(SnapshotFailure(actual, expected, snapshot_path, is_missing)) -> + format_snapshot_failure( + color, + actual, + expected, + snapshot_path, + is_missing, + base_indent, + ) + Some(CustomMatcherFailure(actual, description)) -> { + let desc_line = + base_indent + <> maybe(color, ansi_dim, " Description: ") + <> maybe(color, ansi_cyan, description) + <> "\n" + let actual_line = + base_indent + <> maybe(color, ansi_dim, " Actual: ") + <> maybe(color, ansi_red, actual) + <> "\n" + desc_line <> actual_line + } + _ -> "" + } +} + +fn format_expected_actual( + color: Bool, + base_indent: String, + expected: String, + actual: String, +) -> String { + let expected_label = maybe(color, ansi_dim, " Expected: ") + let actual_label = maybe(color, ansi_dim, " Actual: ") + string.concat([ + base_indent, + expected_label, + maybe(color, ansi_green, expected), + "\n", + base_indent, + actual_label, + maybe(color, ansi_red, actual), + "\n", + ]) +} + +fn format_snapshot_failure( + color: Bool, + actual: String, + expected: String, + snapshot_path: String, + is_missing: Bool, + base_indent: String, +) -> String { + case is_missing { + True -> + string.concat([ + base_indent, + maybe(color, ansi_dim, " Snapshot missing: "), + maybe(color, ansi_yellow, snapshot_path), + "\n", + ]) + False -> + string.concat([ + base_indent, + maybe(color, ansi_dim, " Snapshot: "), + maybe(color, ansi_cyan, snapshot_path), + "\n", + format_expected_actual(color, base_indent, expected, actual), + ]) + } +} + +fn format_summary(color: Bool, results: List(TestResult)) -> String { + let total = list.length(results) + let failed = count_by_status(results, Failed) + let skipped = count_by_status(results, Skipped) + let pending = count_by_status(results, Pending) + let timed_out = count_by_status(results, TimedOut) + let setup_failed = count_by_status(results, SetupFailed) + let passed = total - failed - skipped - pending - timed_out - setup_failed + let total_duration = sum_durations(results, 0) + + string.concat([ + maybe(color, fn(t) { ansi_bold(ansi_cyan(t)) }, "Summary: "), + maybe(color, ansi_cyan, int.to_string(total)), + " run, ", + maybe(color, ansi_red, int.to_string(failed)), + " failed, ", + maybe(color, ansi_green, int.to_string(passed)), + " passed", + build_summary_suffix(color, skipped, pending, timed_out, setup_failed), + " in ", + maybe(color, ansi_cyan, timing.format_duration_ms(total_duration)), + "\n", + ]) +} + +fn sum_durations(results: List(TestResult), total: Int) -> Int { + case results { + [] -> total + [result, ..rest] -> sum_durations(rest, total + result.duration_ms) + } +} + +fn count_by_status(results: List(TestResult), wanted: Status) -> Int { + count_matching_status(results, wanted, 0) +} + +fn count_matching_status( + results: List(TestResult), + wanted: Status, + count: Int, +) -> Int { + case results { + [] -> count + [result, ..rest] -> { + let next_count = increment_if_matches(result.status, wanted, count) + count_matching_status(rest, wanted, next_count) + } + } +} + +fn increment_if_matches(status: Status, wanted: Status, count: Int) -> Int { + case status == wanted { + True -> count + 1 + False -> count + } +} + +fn build_summary_suffix( + color: Bool, + skipped: Int, + pending: Int, + timed_out: Int, + setup_failed: Int, +) -> String { + let parts = + [] + |> add_summary_part_if_nonzero(color, skipped, " skipped", ansi_yellow) + |> add_summary_part_if_nonzero(color, pending, " pending", ansi_yellow) + |> add_summary_part_if_nonzero(color, timed_out, " timed out", ansi_red) + |> add_summary_part_if_nonzero( + color, + setup_failed, + " setup failed", + ansi_red, + ) + + format_summary_parts(parts) +} + +fn format_summary_parts(parts: List(String)) -> String { + case parts { + [] -> "" + _ -> string.concat([", ", string.join(parts, ", ")]) + } +} + +fn add_summary_part_if_nonzero( + parts: List(String), + color: Bool, + count: Int, + label: String, + paint: fn(String) -> String, +) -> List(String) { + case count { + 0 -> parts + _ -> [ + string.concat([paint(int.to_string(count)), maybe(color, ansi_dim, label)]), + ..parts + ] + } +} diff --git a/src/dream_test/reporter/gherkin.gleam b/src/dream_test/reporters/gherkin.gleam similarity index 77% rename from src/dream_test/reporter/gherkin.gleam rename to src/dream_test/reporters/gherkin.gleam index f64e2fb..8143807 100644 --- a/src/dream_test/reporter/gherkin.gleam +++ b/src/dream_test/reporters/gherkin.gleam @@ -1,47 +1,14 @@ -//// Gherkin-style test reporter for dream_test. +//// Gherkin-style report formatting. //// -//// This reporter formats Gherkin test results in a Cucumber-like format -//// that mirrors the Given/When/Then structure of your scenarios. +//// This module formats `TestResult` values with kind `GherkinScenario(_ )` in a +//// Cucumber-like layout (Feature → Scenario), so you can print or persist a +//// human-readable report for BDD runs. //// -//// ## Example Output +//// Most users won’t call this module directly—`dream_test/reporters` wires it +//// in automatically—but it’s useful when you want: //// -//// ```text -//// Feature: Shopping Cart -//// Scenario: Adding items ✓ (2ms) -//// Scenario: Removing items ✗ (3ms) -//// ✗ equal -//// Message: Item count mismatch -//// Expected: 7 -//// Actual: 10 -//// -//// Summary: 2 run, 1 failed, 1 passed in 5ms -//// ``` -//// -//// ## Usage -//// -//// ```gleam -//// import dream_test/gherkin/feature -//// import dream_test/runner -//// import dream_test/reporter/gherkin as gherkin_reporter -//// import gleam/io -//// -//// pub fn main() { -//// my_feature_suite() -//// |> runner.run_suite() -//// |> gherkin_reporter.report(io.print) -//// } -//// ``` -//// -//// ## Status Markers -//// -//// | Status | Marker | Meaning | -//// |-------------|--------|--------------------------------| -//// | Passed | ✓ | All steps succeeded | -//// | Failed | ✗ | One or more steps failed | -//// | Skipped | - | Scenario was skipped | -//// | Pending | ~ | Scenario is a placeholder | -//// | TimedOut | ! | Scenario exceeded timeout | -//// | SetupFailed | ⚠ | A setup hook failed | +//// - a Gherkin report as a `String` (`format`) +//// - to write the report using a custom function (`report`) import dream_test/timing import dream_test/types.{ @@ -69,11 +36,20 @@ import gleam/string /// ## Example /// /// ```gleam -/// let report_string = format(results) -/// file.write("cucumber-results.txt", report_string) +/// gherkin_reporter.format(sample_results()) +/// |> should +/// |> match_snapshot("./test/snapshots/gherkin_format_report.snap") +/// |> or_fail_with("expected formatted report snapshot match") /// ``` /// -pub fn format(results: List(TestResult)) -> String { +/// ## Parameters +/// +/// - `results`: The test results to format +/// +/// ## Returns +/// +/// The formatted report as a `String`. +pub fn format(results results: List(TestResult)) -> String { let formatted_results = format_all_results(results, "", "") let summary_text = format_summary(results) string.concat([formatted_results, "\n", summary_text]) @@ -86,21 +62,22 @@ pub fn format(results: List(TestResult)) -> String { /// ## Example /// /// ```gleam -/// results |> report(io.print) +/// let results = runner.new([tests()]) |> runner.run() +/// gherkin_reporter.report(results, io.print) /// ``` /// /// ## Parameters /// -/// - `results` - List of test results from the runner -/// - `write` - Function that handles the formatted output string +/// - `results`: List of test results from the runner +/// - `write`: Function that handles the formatted output string /// /// ## Returns /// /// Returns the input results unchanged, enabling pipeline composition. /// pub fn report( - results: List(TestResult), - write: fn(String) -> Nil, + results results: List(TestResult), + write write: fn(String) -> Nil, ) -> List(TestResult) { write(format(results)) results @@ -113,11 +90,20 @@ pub fn report( /// ## Example /// /// ```gleam -/// let gherkin_results = list.filter(results, is_gherkin_result) -/// let unit_results = list.filter(results, fn(r) { !is_gherkin_result(r) }) +/// gherkin_reporter.is_gherkin_result(gherkin_result()) +/// |> should +/// |> be_equal(True) +/// |> or_fail_with("expected True for gherkin results") /// ``` /// -pub fn is_gherkin_result(result: TestResult) -> Bool { +/// ## Parameters +/// +/// - `result`: A single `TestResult` to inspect +/// +/// ## Returns +/// +/// `True` when `result.kind` is `GherkinScenario(_)`, otherwise `False`. +pub fn is_gherkin_result(result result: TestResult) -> Bool { case result.kind { GherkinScenario(_) -> True _ -> False @@ -324,10 +310,26 @@ fn add_summary_part_if_nonzero( } fn count_by_status(results: List(TestResult), wanted: Status) -> Int { - list.fold(results, 0, fn(count, result) { - case result.status == wanted { - True -> count + 1 - False -> count + count_matching_status(results, wanted, 0) +} + +fn count_matching_status( + results: List(TestResult), + wanted: Status, + count: Int, +) -> Int { + case results { + [] -> count + [result, ..rest] -> { + let next_count = increment_if_matches(result.status, wanted, count) + count_matching_status(rest, wanted, next_count) } - }) + } +} + +fn increment_if_matches(status: Status, wanted: Status, count: Int) -> Int { + case status == wanted { + True -> count + 1 + False -> count + } } diff --git a/src/dream_test/reporter/json.gleam b/src/dream_test/reporters/json.gleam similarity index 63% rename from src/dream_test/reporter/json.gleam rename to src/dream_test/reporters/json.gleam index 682eff1..3868be0 100644 --- a/src/dream_test/reporter/json.gleam +++ b/src/dream_test/reporters/json.gleam @@ -3,64 +3,47 @@ //// This reporter outputs test results as JSON for CI/CD integration, //// test aggregation, and tooling. //// -//// ## Example Output +//// The JSON object includes: //// -//// ```json -//// { -//// "version": "1.0", -//// "timestamp_ms": 1733151045123, -//// "duration_ms": 315, -//// "system": { -//// "os": "darwin", -//// "otp_version": "27", -//// "gleam_version": "0.67.0" -//// }, -//// "summary": { -//// "total": 3, -//// "passed": 2, -//// "failed": 1, -//// "skipped": 0, -//// "pending": 0, -//// "timed_out": 0, -//// "setup_failed": 0 -//// }, -//// "tests": [ -//// { -//// "name": "adds numbers", -//// "full_name": ["Calculator", "add", "adds numbers"], -//// "status": "passed", -//// "duration_ms": 2, -//// "tags": [], -//// "kind": "unit", -//// "failures": [] -//// } -//// ] -//// } -//// ``` +//// - `version`: schema version +//// - `timestamp_ms`: when the report was created +//// - `duration_ms`: total duration (sum of test durations) +//// - `system`: `os`, `otp_version`, `gleam_version` +//// - `summary`: counts by status +//// - `tests`: per-test details (name, full_name, status, duration_ms, tags, kind, failures) //// //// ## Usage //// //// ```gleam -//// import dream_test/reporter/json -//// import gleam/io +//// import dream_test/matchers.{succeed} +//// import dream_test/reporters/json +//// import dream_test/reporters/progress +//// import dream_test/runner +//// import dream_test/unit.{describe, it} //// -//// pub fn main() { -//// to_test_cases("my_test", tests()) -//// |> run_all() -//// |> json.report(io.print) -//// |> exit_on_failure() +//// pub fn tests() { +//// describe("JSON Reporter", [ +//// it("outputs JSON format", fn() { +//// // The json reporter prints machine-readable JSON at the end of the run. +//// Ok(succeed()) +//// }), +//// it("includes test metadata", fn() { +//// // JSON output includes name, full_name, status, duration, tags +//// Ok(succeed()) +//// }), +//// ]) //// } -//// ``` //// -//// ## Combining with BDD Reporter -//// -//// ```gleam -//// results -//// |> bdd.report(io.print) // Human-readable to stdout -//// |> json.report(write_to_file) // JSON to file -//// |> exit_on_failure() +//// pub fn main() { +//// runner.new([tests()]) +//// |> runner.progress_reporter(progress.new()) +//// |> runner.results_reporters([json.new()]) +//// |> runner.exit_on_failure() +//// |> runner.run() +//// } //// ``` +import dream_test/reporters/types as reporter_types import dream_test/types.{ type AssertionFailure, type FailurePayload, type Status, type TestKind, type TestResult, BooleanFailure, CollectionFailure, ComparisonFailure, @@ -77,18 +60,67 @@ import gleam/string // Public API // ============================================================================ +/// Create a JSON results reporter (printed at the end of the run). +pub fn new() -> reporter_types.ResultsReporter { + reporter_types.Json(reporter_types.JsonReporterConfig(pretty: False)) +} + +/// Enable pretty-printed JSON output. +pub fn pretty( + reporter reporter: reporter_types.ResultsReporter, +) -> reporter_types.ResultsReporter { + case reporter { + reporter_types.Json(_config) -> + reporter_types.Json(reporter_types.JsonReporterConfig(pretty: True)) + other -> other + } +} + +/// Render JSON output for a completed run. +pub fn render( + config config: reporter_types.JsonReporterConfig, + results results: List(TestResult), +) -> String { + let reporter_types.JsonReporterConfig(pretty: pretty) = config + case pretty { + True -> format_pretty(results) + False -> format(results) + } +} + /// Format test results as a compact JSON string. /// /// Returns a single-line JSON string suitable for machine parsing. /// +/// ## Parameters +/// +/// - `results`: The test results to encode into the JSON report +/// +/// ## Returns +/// +/// A compact (single-line) JSON string. +/// /// ## Example /// /// ```gleam -/// let json_string = format(results) -/// file.write("test-results.json", json_string) +/// import dream_test/matchers.{succeed} +/// import dream_test/reporters/json +/// import dream_test/runner +/// import dream_test/unit.{describe, it} +/// +/// fn example_suite() { +/// describe("Example Suite", [ +/// it("passes", fn() { Ok(succeed()) }), +/// ]) +/// } +/// +/// pub fn main() { +/// let results = runner.new([example_suite()]) |> runner.run() +/// json.format(results) +/// } /// ``` /// -pub fn format(results: List(TestResult)) -> String { +pub fn format(results results: List(TestResult)) -> String { build_report_json(results) |> json.to_string } @@ -97,14 +129,35 @@ pub fn format(results: List(TestResult)) -> String { /// /// Returns an indented, human-readable JSON string with 2-space indentation. /// +/// ## Parameters +/// +/// - `results`: The test results to encode into the JSON report +/// +/// ## Returns +/// +/// A pretty-printed JSON string with 2-space indentation. +/// /// ## Example /// /// ```gleam -/// let json_string = format_pretty(results) -/// io.println(json_string) +/// import dream_test/matchers.{succeed} +/// import dream_test/reporters/json +/// import dream_test/runner +/// import dream_test/unit.{describe, it} +/// +/// fn example_suite() { +/// describe("Example Suite", [ +/// it("passes", fn() { Ok(succeed()) }), +/// ]) +/// } +/// +/// pub fn main() { +/// let results = runner.new([example_suite()]) |> runner.run() +/// json.format_pretty(results) +/// } /// ``` /// -pub fn format_pretty(results: List(TestResult)) -> String { +pub fn format_pretty(results results: List(TestResult)) -> String { build_report_json(results) |> json.to_string |> prettify_json @@ -115,31 +168,38 @@ pub fn format_pretty(results: List(TestResult)) -> String { /// This is the main entry point for JSON reporting. The writer function /// receives the JSON string and can print it, log it, or write it to a file. /// -/// ## Example +/// ## Parameters /// -/// ```gleam -/// // Print to stdout -/// results |> json.report(io.print) -/// -/// // Write to file -/// results |> json.report(fn(s) { file.write("results.json", s) }) -/// ``` +/// - `results`: The test results to encode and write +/// - `write`: Output sink for the JSON string (for example `io.print`) /// /// ## Returns /// -/// Returns the input results unchanged, enabling pipeline composition: +/// Returns the input `results` unchanged, enabling pipeline composition. +/// +/// ## Example /// /// ```gleam -/// to_test_cases("my_test", tests()) -/// |> run_all() -/// |> bdd.report(io.print) -/// |> json.report(write_to_file) -/// |> exit_on_failure() -/// ``` +/// import dream_test/matchers.{succeed} +/// import dream_test/reporters/json +/// import dream_test/runner +/// import dream_test/unit.{describe, it} +/// import gleam/io /// +/// fn example_suite() { +/// describe("Example Suite", [ +/// it("passes", fn() { Ok(succeed()) }), +/// ]) +/// } +/// +/// pub fn main() { +/// let results = runner.new([example_suite()]) |> runner.run() +/// results |> json.report(io.print) +/// } +/// ``` pub fn report( - results: List(TestResult), - write: fn(String) -> Nil, + results results: List(TestResult), + write write: fn(String) -> Nil, ) -> List(TestResult) { write(format(results)) results @@ -149,15 +209,39 @@ pub fn report( /// /// Same as `report` but with indented, human-readable output. /// +/// ## Parameters +/// +/// - `results`: The test results to encode and write +/// - `write`: Output sink for the JSON string (for example `io.print`) +/// +/// ## Returns +/// +/// Returns the input `results` unchanged, enabling pipeline composition. +/// /// ## Example /// /// ```gleam -/// results |> json.report_pretty(io.print) +/// import dream_test/matchers.{succeed} +/// import dream_test/reporters/json +/// import dream_test/runner +/// import dream_test/unit.{describe, it} +/// import gleam/io +/// +/// fn example_suite() { +/// describe("Example Suite", [ +/// it("passes", fn() { Ok(succeed()) }), +/// ]) +/// } +/// +/// pub fn main() { +/// let results = runner.new([example_suite()]) |> runner.run() +/// results |> json.report_pretty(io.print) +/// } /// ``` /// pub fn report_pretty( - results: List(TestResult), - write: fn(String) -> Nil, + results results: List(TestResult), + write write: fn(String) -> Nil, ) -> List(TestResult) { write(format_pretty(results)) results @@ -303,16 +387,34 @@ fn kind_to_string(kind: TestKind) -> String { } fn count_by_status(results: List(TestResult), wanted: Status) -> Int { - list.fold(results, 0, fn(count, result) { - case result.status == wanted { - True -> count + 1 - False -> count - } - }) + count_by_status_loop(results, wanted, 0) +} + +fn count_by_status_loop( + results: List(TestResult), + wanted: Status, + count: Int, +) -> Int { + case results { + [] -> count + [result, ..rest] -> + case result.status == wanted { + True -> count_by_status_loop(rest, wanted, count + 1) + False -> count_by_status_loop(rest, wanted, count) + } + } } fn calculate_total_duration(results: List(TestResult)) -> Int { - list.fold(results, 0, fn(total, result) { total + result.duration_ms }) + calculate_total_duration_loop(results, 0) +} + +fn calculate_total_duration_loop(results: List(TestResult), total: Int) -> Int { + case results { + [] -> total + [result, ..rest] -> + calculate_total_duration_loop(rest, total + result.duration_ms) + } } // ============================================================================ diff --git a/src/dream_test/reporters/progress.gleam b/src/dream_test/reporters/progress.gleam new file mode 100644 index 0000000..5aeac52 --- /dev/null +++ b/src/dream_test/reporters/progress.gleam @@ -0,0 +1,243 @@ +//// Live progress bar reporter. +//// +//// This module renders a single-line progress bar that updates in-place using +//// carriage returns, and adapts to the current terminal width. +//// +//// Because it uses `\r` (carriage return) to rewrite the current line, it is +//// best suited for interactive terminals. In CI logs (or when other output is +//// printed concurrently), the output may be less readable. +//// +//// ## Terminal width +//// +//// Width is detected via Erlang `io:columns/0` with fallbacks: +//// +//// - if `io:columns/0` fails, it reads the `COLUMNS` environment variable +//// - if that is missing/invalid, it defaults to 80 columns +//// +//// It is designed to be driven by `dream_test/reporters/types.ReporterEvent`, +//// but most users should not call it directly. Prefer attaching it via +//// `runner.progress_reporter(progress.new())` and letting the runner drive events. +//// +//// ## Example +//// +//// ```gleam +//// pub fn main() { +//// runner.new([tests()]) +//// |> runner.progress_reporter(progress.new()) +//// |> runner.exit_on_failure() +//// |> runner.run() +//// } +//// ``` + +import dream_test/reporters/types as reporter_types +import dream_test/types.{type TestResult} +import gleam/int +import gleam/list +import gleam/option.{type Option, None, Some} +import gleam/string + +/// A progress-bar reporter that renders a single-line UI during a run. +/// +/// Construct one with `progress.new()` and attach it to the runner via +/// `runner.progress_reporter(...)`. +pub opaque type ProgressReporter { + ProgressReporter +} + +/// Create a new progress reporter. +pub fn new() -> ProgressReporter { + ProgressReporter +} + +/// Handle a single reporter event by writing an in-place progress bar line. +/// +/// - For `RunStarted`, prints an initial 0% bar. +/// - For `TestFinished`, prints an updated bar using the included counts. +/// - For `RunFinished`, prints a final 100% bar and a newline. +/// +/// This function ignores hook events (`HookStarted` / `HookFinished`) so hook +/// chatter doesn’t scramble the single-line UI. +/// +/// ## Parameters +/// +/// - `event`: a `ReporterEvent` emitted by the runner +/// - `write`: an output sink (typically `io.print`). For best results this +/// should write **without adding extra newlines**. +/// +/// ## When should I use this? +/// +/// Usually you shouldn’t call it directly—prefer attaching it via +/// `runner.progress_reporter(progress.new())` and letting the runner drive events. +/// +/// You may call it directly only if you are building your own reporter/driver +/// and you are already receiving `ReporterEvent`s. +/// +/// ## Example +/// +/// ```gleam +/// progress.handle_event( +/// reporter_types.RunStarted(total: 10), +/// write_progress_line_to_file, +/// ) +/// +/// use text <- result.try( +/// file.read("test/tmp/progress_handle_event.txt") +/// |> result.map_error(file.error_to_string), +/// ) +/// +/// text +/// |> should +/// |> match_snapshot("./test/snapshots/progress_handle_event_run_started.snap") +/// |> or_fail_with("expected handle_event output snapshot match") +/// ``` +pub fn handle_event( + reporter reporter: ProgressReporter, + event event: reporter_types.ReporterEvent, +) -> Option(String) { + let _ = reporter + let cols = terminal_columns() + let line = render(cols, event) + + case event { + reporter_types.HookStarted(..) -> None + reporter_types.HookFinished(..) -> None + reporter_types.RunFinished(..) -> Some("\r" <> line <> "\n") + _ -> Some("\r" <> line) + } +} + +/// Render a progress bar line for a given terminal width. +/// +/// This is pure and is intended for testing and for custom reporter work. +/// +/// Width is measured in **graphemes** (user-visible characters), so Unicode +/// stays aligned. The width is clamped to a minimum of 20 so the bar remains +/// readable. +/// +/// Note: The `columns` input is typically a *terminal column count* (from +/// `io:columns/0`), but rendering is done by grapheme count so we can safely +/// pad/truncate Unicode. +/// +/// ## Parameters +/// +/// - `columns`: terminal width (in columns) +/// - `event`: event to render +/// +/// ## Returns +/// +/// A single line of text **exactly** `columns` graphemes wide (after clamping). +/// +/// ## Example +/// +/// ```gleam +/// progress.render(30, reporter_types.RunStarted(total: 10)) +/// |> should +/// |> match_snapshot("./test/snapshots/progress_render_run_started.snap") +/// |> or_fail_with("expected render output snapshot match") +/// ``` +pub fn render( + columns columns: Int, + event event: reporter_types.ReporterEvent, +) -> String { + let cols = clamp_min(columns, 20) + case event { + reporter_types.RunStarted(total: total) -> render_line(cols, 0, total, "") + + reporter_types.TestFinished( + completed: completed, + total: total, + result: result, + ) -> render_line(cols, completed, total, format_result_name(result)) + + reporter_types.RunFinished( + completed: completed, + total: total, + results: _results, + ) -> render_line(cols, completed, total, "done") + + // Hook events do not affect the progress bar. + _ -> render_line(cols, 0, 1, "") + } +} + +fn render_line( + columns: Int, + completed: Int, + total: Int, + label: String, +) -> String { + let safe_total = case total <= 0 { + True -> 1 + False -> total + } + let safe_completed = clamp_range(completed, 0, safe_total) + let percent = safe_completed * 100 / safe_total + + let counter = + int.to_string(safe_completed) <> "/" <> int.to_string(safe_total) + let percent_text = int.to_string(percent) <> "%" + let prefix = counter <> " " + let suffix = case label { + "" -> " " <> percent_text + _ -> " " <> percent_text <> " " <> label + } + + // Layout: " []