diff --git a/CMakeLists.txt b/CMakeLists.txt index 182763b..8137bea 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -24,11 +24,8 @@ project(async_context LANGUAGES CXX) # Library # ============================================================================== -find_package(strong_ptr REQUIRED) - add_library(async_context STATIC) target_compile_features(async_context PUBLIC cxx_std_23) -target_link_libraries(async_context PUBLIC strong_ptr) target_sources(async_context PUBLIC FILE_SET CXX_MODULES TYPE CXX_MODULES @@ -63,10 +60,18 @@ install( EXPORT_PACKAGE_DEPENDENCIES ) +# Always run this custom target by making it depend on ALL +add_custom_target(copy_compile_commands ALL + COMMAND ${CMAKE_COMMAND} -E copy_if_different + ${CMAKE_BINARY_DIR}/compile_commands.json + ${CMAKE_SOURCE_DIR}/compile_commands.json + DEPENDS ${CMAKE_BINARY_DIR}/compile_commands.json) + # ============================================================================== # Unit testing # ============================================================================== +if(TRUE) if(CMAKE_CROSSCOMPILING) message(STATUS "Cross compiling, skipping unit test execution") else() @@ -98,18 +103,13 @@ else() add_custom_target(run_tests ALL DEPENDS async_unit_test COMMAND async_unit_test) endif() - -# Always run this custom target by making it depend on ALL -add_custom_target(copy_compile_commands ALL - COMMAND ${CMAKE_COMMAND} -E copy_if_different - ${CMAKE_BINARY_DIR}/compile_commands.json - ${CMAKE_SOURCE_DIR}/compile_commands.json - DEPENDS ${CMAKE_BINARY_DIR}/compile_commands.json) +endif() # ============================================================================== # Benchmarking # ============================================================================== +if(FALSE) if(CMAKE_CROSSCOMPILING) message(STATUS "Cross compiling, skipping benchmarks") else() @@ -135,3 +135,4 @@ else() add_custom_target(run_benchmark ALL DEPENDS async_benchmark COMMAND async_benchmark) endif() +endif() diff --git a/benchmarks/benchmark.cpp b/benchmarks/benchmark.cpp index 1cf75f6..3f4103f 100644 --- a/benchmarks/benchmark.cpp +++ b/benchmarks/benchmark.cpp @@ -34,9 +34,6 @@ import async_context; // Quick Bench: https://quick-bench.com/ // Compiler flags: -std=c++23 -O3 -DNDEBUG -// -// Include your de-moduled async_context code above this section -// ============================================================================ // ============================================================================ // BENCHMARKS @@ -251,63 +248,25 @@ __attribute__((noinline)) async::future sync_future_level1( auto f = sync_future_level2(ctx, x); return f.sync_wait() + 1; } -struct test_scheduler - : public async::scheduler - , mem::enable_strong_from_this +struct benchmark_context : public async::context { - int sleep_count = 0; - async::context* sync_context = nullptr; - bool io_block = false; + std::array m_stack{}; - test_scheduler(mem::strong_ptr_only_token) + benchmark_context() { + this->initialize_stack_memory(m_stack); } private: - void do_schedule([[maybe_unused]] async::context& p_context, - [[maybe_unused]] async::blocked_by p_block_state, - [[maybe_unused]] async::scheduler::block_info - p_block_info) noexcept override + void do_schedule(async::blocked_by, async::block_info) noexcept override { - switch (p_block_state) { - case async::blocked_by::time: { - if (std::holds_alternative(p_block_info)) { - sleep_count++; - } - break; - } - case async::blocked_by::sync: { - if (std::holds_alternative(p_block_info)) { - auto* context = std::get(p_block_info); - sync_context = context; - } - break; - } - case async::blocked_by::io: { - io_block = true; - break; - } - case async::blocked_by::nothing: { - break; - } - default: { - break; - } - } - } - - std::pmr::memory_resource& do_get_allocator() noexcept override - { - return *strong_from_this().get_allocator(); + // Do nothing for the benchmark } }; -auto scheduler = - mem::make_strong_ptr(std::pmr::new_delete_resource()); - static void bm_future_sync_return(benchmark::State& state) { - async::context ctx(scheduler, 4096); + benchmark_context ctx; int input = 42; for (auto _ : state) { @@ -344,7 +303,7 @@ __attribute__((noinline)) async::future coro_level1(async::context& ctx, static void bm_future_coroutine(benchmark::State& state) { - async::context ctx(scheduler, 4096); + benchmark_context ctx; int input = 42; for (auto _ : state) { @@ -385,7 +344,7 @@ __attribute__((noinline)) async::future sync_in_coro_level1( static void bm_future_sync_await(benchmark::State& state) { - async::context ctx(scheduler, 4096); + benchmark_context ctx; int input = 42; for (auto _ : state) { @@ -425,7 +384,7 @@ __attribute__((noinline)) async::future mixed_coro_level1( static void bm_future_mixed(benchmark::State& state) { - async::context ctx(scheduler, 4096); + benchmark_context ctx; int input = 42; for (auto _ : state) { @@ -466,7 +425,7 @@ void_coro_level1(async::context& ctx, int& out, int x) static void bm_future_void_coroutine(benchmark::State& state) { - async::context ctx(scheduler, 4096); + benchmark_context ctx; int input = 42; int output = 0; @@ -481,7 +440,7 @@ BENCHMARK(bm_future_void_coroutine); static void bm_future_void_coroutine_context_resume(benchmark::State& state) { - async::context ctx(scheduler, 4096); + benchmark_context ctx; int input = 42; int output = 0; diff --git a/conanfile.py b/conanfile.py index 31c8a2a..9f7b777 100644 --- a/conanfile.py +++ b/conanfile.py @@ -91,7 +91,7 @@ def build_requirements(self): self.test_requires("benchmark/1.9.4") def requirements(self): - self.requires("strong_ptr/0.1.2") + pass def layout(self): cmake_layout(self) diff --git a/modules/async_context.cppm b/modules/async_context.cppm index 3409f1d..42ae6e2 100644 --- a/modules/async_context.cppm +++ b/modules/async_context.cppm @@ -36,8 +36,6 @@ module; export module async_context; -export import strong_ptr; - namespace async::inline v0 { export using u8 = std::uint8_t; @@ -126,88 +124,12 @@ export class operation_cancelled : public std::exception */ using sleep_duration = std::chrono::nanoseconds; -// TODO(#39): Merge scheduler into context /** - * @brief + * @brief Information about the block state when context::schedule is called * */ -export class scheduler -{ -public: - using block_info = std::variant; - - /** - * @brief - * - * It is up to the scheduler to ensure that concurrent calls to this API are - * serialized appropriately. For a single threaded event loop, syncronization - * and serialization is not necessary. For a thread pool implementation, - * syncronization nd serialization must be considered. - * - * @param p_context - the context that is requested to be scheduled - * @param p_block_state - the type of blocking event the context has - * encountered. - * @param p_block_info - Information about what exactly is blocking this - * context. If p_block_info is a sleep_duration, and the p_block_state is - * blocked_by::time, then this context is requesting to be scheduled at that - * or a later time. If the p_block_info is a sleep_duration, and the block - * state isn't blocked_by::time, then this sleep duration is a hint to the - * scheduler to when it would be appropriate to reschedule this context. The - * scheduler does not have to be abided by this. If p_block_info is a pointer - * to a context, then the pointed to context is currently blocking p_context. - * This can be used to determine when to schedule p_context again, but does - * not have to be abided by for proper function. - */ - void schedule(context& p_context, - blocked_by p_block_state, - block_info p_block_info) noexcept - { - return do_schedule(p_context, p_block_state, p_block_info); - } - - /** - * @brief Get allocator from scheduler - * - * The memory_resource returned be owned or embedded within the scheduler. The - * memory_resource and its backing memory must live as long as the scheduler. - * The returned reference MUST NOT be bound to a nullptr. - * - * @return std::pmr::memory_resource& - the memory resource to be used to - * allocate memory for async::context stack memory. The memory_resource must - * be owned or embedded within the scheduler. - */ - std::pmr::memory_resource& get_allocator() noexcept - { - return do_get_allocator(); - } - -private: - virtual void do_schedule(context& p_context, - blocked_by p_block_state, - block_info p_block_info) noexcept = 0; - - virtual std::pmr::memory_resource& do_get_allocator() noexcept = 0; -}; - -export constexpr mem::strong_ptr noop_scheduler() -{ - struct noop_scheduler : scheduler - { - void do_schedule(context&, blocked_by, block_info) noexcept override - { - return; - } - - std::pmr::memory_resource& do_get_allocator() noexcept override - { - std::terminate(); - } - }; - - static noop_scheduler sched; - - return mem::strong_ptr(mem::unsafe_assume_static_tag{}, sched); -} +export using block_info = + std::variant; class promise_base; @@ -215,27 +137,25 @@ export class context { public: static auto constexpr default_timeout = sleep_duration(0); - using scheduler_t = mem::strong_ptr; - // with something thats easier and safer to work with. + context() = default; + context(context const&) = delete; + context& operator=(context const&) = delete; + context(context&&) = delete; + context& operator=(context&&) = delete; + /** - * @brief Construct a new context object + * @brief Implementations of context must call this API in their constructor + * in order to initialize the stack memory of this context. * - * @param p_scheduler - a pointer to a transition handler that - * handles transitions in blocked_by state. - * @param p_stack_size - Number of bytes to allocate for the context's stack - * memory. + * @param p_stack_memory - stack memory provided by the derived context. It is + * the responsibility of the derived context to manager this memory. If this + * memory was dynamically allocated, then it is the responsibility of the + * derived class to deallocate that memory. */ - context(scheduler_t const& p_scheduler, usize p_stack_size) - : m_proxy(p_scheduler) + constexpr void initialize_stack_memory(std::span p_stack_memory) { - using poly_allocator = std::pmr::polymorphic_allocator; - auto allocator = poly_allocator(&p_scheduler->get_allocator()); - - // Allocate memory for stack and assign to m_stack - auto const words_to_allocate = 1uz + ((p_stack_size + mask) >> shift); - m_stack = { allocator.allocate_object(words_to_allocate), - words_to_allocate }; + m_stack = p_stack_memory; m_stack_pointer = m_stack.data(); } @@ -328,48 +248,8 @@ public: return capacity() - memory_used(); } - [[nodiscard]] constexpr bool is_proxy() const noexcept - { - return std::holds_alternative(m_proxy); - } - - /** - * @brief Prevent a temporary context from being borrowed - * - * Required to prevent proxies with dangling references to a context - * - * @return context - (never returns generates compile time error) - */ - context borrow_proxy() && = delete; - - /** - * @brief - * - * @return context - */ - context borrow_proxy() & - { - return { proxy_tag{}, *this }; - } - - ~context() - { - // We need to destroy the entire coroutine chain here! - // TODO(#40): Perform cancellation on context destruction - // unsafe_cancel(); - - if (is_proxy()) { - auto* parent = std::get(m_proxy).parent; - // Unshrink parent stack, by setting its range to be the start of its - // stack and the end to be the end of this stack. - parent->m_stack = std::span(parent->m_stack.begin(), m_stack.end()); - } else { - using poly_allocator = std::pmr::polymorphic_allocator; - auto scheduler = std::get(m_proxy); - auto allocator = poly_allocator(&scheduler->get_allocator()); - allocator.deallocate_object(m_stack.data(), m_stack.size()); - } - }; + // TODO(#40): Perform cancellation on context destruction + virtual ~context() = default; private: friend class promise_base; @@ -378,76 +258,24 @@ private: struct proxy_info { - context* origin = nullptr; + context* original = nullptr; context* parent = nullptr; }; - struct proxy_tag - {}; - - context(proxy_tag, context& p_parent) - : m_active_handle(std::noop_coroutine()) - , m_proxy(proxy_info{}) - { - // We need to manually set: - // 1. m_stack - // 2. m_stack_pointer - // 3. m_proxy - - // Our proxy will take control over the rest of the unused stack memory from - // the above context. - m_stack = - p_parent.m_stack.last(p_parent.m_stack_pointer - p_parent.m_stack.data()); - m_stack_pointer = m_stack.data(); - - // Shrink the stack of the parent context to be equal to the current stack - // index. This will prevent the parent context from being used again. - p_parent.m_stack = std::span(p_parent.m_stack.data(), m_stack_pointer); - - // If this is a proxy, take its pointer to the origin - if (p_parent.is_proxy()) { - auto info = std::get(p_parent.m_proxy); - m_proxy = proxy_info{ - .origin = info.origin, - .parent = &p_parent, - }; - } else { // Otherwise, the current parent is the origin. - m_proxy = proxy_info{ - .origin = &p_parent, - .parent = &p_parent, - }; - } - } - - [[nodiscard]] constexpr context* origin() noexcept - { - if (is_proxy()) { - return std::get(m_proxy).origin; - } - return this; - } - - [[nodiscard]] constexpr context const* origin() const noexcept + [[nodiscard]] constexpr bool is_proxy() const noexcept { - if (is_proxy()) { - return std::get(m_proxy).origin; - } - return this; + return m_proxy.parent == nullptr; } - constexpr void transition_to( - blocked_by p_new_state, - scheduler::block_info p_info = std::monostate{}) noexcept + constexpr void transition_to(blocked_by p_new_state, + block_info p_info = std::monostate{}) noexcept { - auto* origin_ptr = origin(); - origin_ptr->m_state = p_new_state; - std::get(origin_ptr->m_proxy) - ->schedule(*origin_ptr, p_new_state, p_info); + m_state = p_new_state; + schedule(p_new_state, p_info); } [[nodiscard]] constexpr void* allocate(std::size_t p_bytes) { - // The extra 1 word is for the stack pointer's address size_t const words_to_allocate = 1uz + ((p_bytes + mask) >> shift); auto const new_stack_index = m_stack_pointer + words_to_allocate; @@ -475,20 +303,128 @@ private: return coroutine_frame_stack_address; } - using proxy_state = std::variant>; + /** + * @brief Wrapper around call to do_schedule + * + * This wrapper exists to allow future extensibility + * + * @param p_block_state - state that this context has been set to + * @param p_block_info - information about the blocking conditions + */ + void schedule(blocked_by p_block_state, block_info p_block_info) noexcept + { + return do_schedule(p_block_state, p_block_info); + } - // Should stay close to a standard cache-line of 64 bytes (8 words). - // Unfortunately we cannot achieve that if we want proxy support, so we must - // deal with that by putting the scheduler towards the end since it is the - // least hot part of the data. - std::coroutine_handle<> m_active_handle = std::noop_coroutine(); // word 1 - std::span m_stack{}; // word 2-3 - uptr* m_stack_pointer = nullptr; // word 4 - blocked_by m_state = blocked_by::nothing; // word 5 - proxy_state m_proxy{ proxy_info{} }; // word 6-8 + /** + * @brief Implementations of context use this to notify their scheduler of + * changes to this async context. + * + * It is up to the scheduler to ensure that concurrent calls to this API are + * serialized appropriately. For a single threaded event loop, syncronization + * and serialization is not necessary. For a thread pool implementation, + * syncronization and serialization must be considered. + * + * @param p_block_state - the type of blocking event the context has + * occurred. + * @param p_block_info - Information about what exactly is blocking this + * context. If p_block_info is a sleep_duration, and the p_block_state is + * blocked_by::time, then this context is requesting to be scheduled at that + * or a later time. If the p_block_info is a sleep_duration, and the block + * state isn't blocked_by::time, then this sleep duration is a hint to the + * scheduler to when it would be appropriate to reschedule this context. The + * scheduler does not have to be abided by this. If p_block_info is a pointer + * to a context, then the pointed to context is currently blocking p_context. + * This can be used to determine when to schedule p_context again, but does + * not have to be abided by for proper function. + */ + virtual void do_schedule(blocked_by p_block_state, + block_info p_block_info) noexcept = 0; + friend class proxy_context; + + /* vtable ptr */ // word 1 + std::coroutine_handle<> m_active_handle = std::noop_coroutine(); // word 2 + std::span m_stack{}; // word 3-4 + uptr* m_stack_pointer = nullptr; // word 5 + blocked_by m_state = blocked_by::nothing; // word 6 + proxy_info m_proxy{}; // word 7-8 }; -static_assert(sizeof(context) <= std::hardware_constructive_interference_size); +// Context should stay close to a standard cache-line of 64 bytes (8 words) for +// a 64-bit system. This compile time check ensures that the context does not +// exceed the this boundary for the platform. +static_assert(sizeof(context) <= std::hardware_constructive_interference_size, + "Context cannot be contained within a cache-line (as specified " + "by std::hardware_constructive_interference_size)"); + +export class proxy_context : public context +{ +public: + proxy_context(proxy_context const&) = delete; + proxy_context& operator=(proxy_context const&) = delete; + proxy_context(proxy_context&&) = delete; + proxy_context& operator=(proxy_context&&) = delete; + + static proxy_context from(context& p_parent) + { + return { p_parent }; + } + + ~proxy_context() override + { + // Unshrink parent stack, by setting its range to be the start of its + // stack and the end to be the end of this stack. + m_proxy.parent->m_stack = { m_proxy.parent->m_stack.begin(), + m_stack.end() }; + } + +private: + proxy_context(context& p_parent) + { + m_active_handle = std::noop_coroutine(); + m_proxy = {}; + + // We need to manually set: + // 1. m_stack + // 2. m_stack_pointer + // 3. m_proxy + + // Our proxy will take control over the rest of the unused stack memory from + // the above context. + auto remaining_words = p_parent.m_stack_pointer - p_parent.m_stack.data(); + m_stack = p_parent.m_stack.last(remaining_words); + m_stack_pointer = m_stack.data(); + + // Shrink the parent's stack to its current stack pointer, preventing it + // from allocating again. + p_parent.m_stack = { p_parent.m_stack.data(), p_parent.m_stack_pointer }; + + // If this is a proxy, take its pointer to the origin + if (p_parent.is_proxy()) { + m_proxy = proxy_info{ + .original = m_proxy.original, + .parent = &p_parent, + }; + } else { // Otherwise, the current parent is the origin. + m_proxy = proxy_info{ + .original = &p_parent, + .parent = &p_parent, + }; + } + } + + /** + * @brief Forwards the schedule call to the original context + * + * @param p_block_state - state that this context has been set to + * @param p_block_info - information about the blocking conditions + */ + void do_schedule(blocked_by p_block_state, + block_info p_block_info) noexcept override + { + m_proxy.original->schedule(p_block_state, p_block_info); + } +}; export class context_token { diff --git a/test_package/main.cpp b/test_package/main.cpp index 2967b50..b3267e4 100644 --- a/test_package/main.cpp +++ b/test_package/main.cpp @@ -22,31 +22,23 @@ import async_context; -struct test_scheduler - : public async::scheduler - , mem::enable_strong_from_this +struct test_context : public async::context { + std::array m_stack{}; int sleep_count = 0; - - test_scheduler(mem::strong_ptr_only_token) + test_context() { + this->initialize_stack_memory(m_stack); } private: - void do_schedule([[maybe_unused]] async::context& p_context, - [[maybe_unused]] async::blocked_by p_block_state, - [[maybe_unused]] async::scheduler::block_info - p_block_info) noexcept override + void do_schedule(async::blocked_by p_blocked_state, + async::block_info) noexcept override { - if (std::holds_alternative(p_block_info)) { + if (p_blocked_state == async::blocked_by::time) { sleep_count++; } } - - std::pmr::memory_resource& do_get_allocator() noexcept override - { - return *strong_from_this().get_allocator(); - } }; async::future coro_double_delay(async::context&) @@ -62,21 +54,19 @@ async::future coro_double_delay(async::context&) int main() { - auto scheduler = - mem::make_strong_ptr(std::pmr::new_delete_resource()); - async::context my_context(scheduler, 1024); + test_context ctx; - auto future_delay = coro_double_delay(my_context); + auto future_delay = coro_double_delay(ctx); assert(not future_delay.done()); future_delay.resume(); - assert(scheduler->sleep_count == 1); + assert(ctx.sleep_count == 1); future_delay.resume(); - assert(scheduler->sleep_count == 2); + assert(ctx.sleep_count == 2); assert(not future_delay.done()); future_delay.resume(); diff --git a/tests/async.test.cpp b/tests/async.test.cpp index 824f833..6a16a0f 100644 --- a/tests/async.test.cpp +++ b/tests/async.test.cpp @@ -46,33 +46,43 @@ std::ostream& operator<<(std::ostream& out, blocked_by b) } // namespace async bool resumption_occurred = false; -struct test_scheduler - : public async::scheduler - , mem::enable_strong_from_this + +struct thread_info { - int sleep_count = 0; async::context* sync_context = nullptr; + int sleep_count = 0; bool io_block = false; +}; - test_scheduler(mem::strong_ptr_only_token) +struct test_context : public async::context +{ + std::shared_ptr info; + std::array m_stack{}; + + test_context(std::shared_ptr const& p_info) + : info(p_info) + { + this->initialize_stack_memory(m_stack); + } + test_context() + : info(std::make_shared()) { + this->initialize_stack_memory(m_stack); } private: - void do_schedule([[maybe_unused]] async::context& p_context, - [[maybe_unused]] async::blocked_by p_block_state, - [[maybe_unused]] async::scheduler::block_info - p_block_info) noexcept override + void do_schedule(async::blocked_by p_block_state, + async::block_info p_block_info) noexcept override { - std::println("Scheduler called!", sleep_count); + std::println("Scheduler called!", info->sleep_count); switch (p_block_state) { case async::blocked_by::time: { if (std::holds_alternative(p_block_info)) { std::println("sleep for: {}", std::get(p_block_info)); - sleep_count++; - std::println("Sleep count = {}!", sleep_count); + info->sleep_count++; + std::println("Sleep count = {}!", info->sleep_count); } break; } @@ -81,19 +91,19 @@ struct test_scheduler auto* context = std::get(p_block_info); std::println( "Coroutine ({}) is blocked by syncronization with coroutine ({})", - static_cast(&p_context), + static_cast(this), static_cast(context)); - sync_context = context; + info->sync_context = context; } break; } case async::blocked_by::io: { - io_block = true; + info->io_block = true; break; } case async::blocked_by::nothing: { std::println("Context ({}) has been unblocked!", - static_cast(&p_context)); + static_cast(this)); break; } default: { @@ -101,11 +111,6 @@ struct test_scheduler } } } - - std::pmr::memory_resource& do_get_allocator() noexcept override - { - return *strong_from_this().get_allocator(); - } }; namespace async { @@ -115,9 +120,7 @@ void async_context_suite() "coroutine with time-based blocking and sync_wait"_test = []() { // Setup - auto scheduler = - mem::make_strong_ptr(std::pmr::new_delete_resource()); - async::context ctx(scheduler, 8192); + test_context ctx; static constexpr int expected_return_value = 5; @@ -139,16 +142,15 @@ void async_context_suite() expect(that % resumption_occurred); expect(that % future_print.done()); expect(that % 0 == ctx.memory_used()); - expect(that % 2 == scheduler->sleep_count); + expect(that % 2 == ctx.info->sleep_count); expect(that % expected_return_value == value); }; "block_by_io and block_by_sync notify scheduler correctly"_test = []() { // Setup - auto scheduler = - mem::make_strong_ptr(std::pmr::new_delete_resource()); - async::context ctx1(scheduler, 8192); - async::context ctx2(scheduler, 8192); + auto info = std::make_shared(); + test_context ctx1(info); + test_context ctx2(info); resumption_occurred = false; @@ -162,28 +164,30 @@ void async_context_suite() co_return; }; - // Exercise + // Exercise 1 auto blocked_by_testing = test_coro(ctx1); + + // Verify 1 expect(that % not resumption_occurred); expect(that % 0 < ctx1.memory_used()); expect(that % 0 == ctx2.memory_used()); + + // Exercise 2 blocked_by_testing.sync_wait(); - // Verify + // Verify 2 expect(that % resumption_occurred); expect(that % blocked_by_testing.done()); - expect(that % scheduler->io_block); - expect(that % &ctx2 == scheduler->sync_context); + expect(that % info->io_block); + expect(that % &ctx2 == info->sync_context); expect(that % 0 == ctx1.memory_used()); expect(that % 0 == ctx2.memory_used()); }; "Context Token"_test = []() { // Setup - auto scheduler = - mem::make_strong_ptr(std::pmr::new_delete_resource()); - async::context ctx1(scheduler, 8192); - async::context ctx2(scheduler, 8192); + test_context ctx1; + test_context ctx2; async::context_token io_in_use; @@ -229,9 +233,8 @@ void async_context_suite() [&](async::blocked_by p_state = async::blocked_by::io, std::source_location const& p_location = std::source_location::current()) { - expect(that % static_cast(p_state) == - static_cast(ctx1.state())) - << "line: " << p_location.line() << '\n'; + expect(that % p_state == ctx1.state()) + << "ctx1 state mismatch, line: " << p_location.line() << '\n'; }; auto check_access_second_blocked_by = @@ -239,7 +242,7 @@ void async_context_suite() std::source_location const& p_location = std::source_location::current()) { expect(that % p_state == ctx2.state()) - << "line: " << p_location.line() << '\n'; + << "ctx2 state mismatch, line: " << p_location.line() << '\n'; }; // access_first will claim the resource and will return control, and be @@ -300,33 +303,31 @@ void async_context_suite() expect(that % 0 == ctx2.memory_used()); }; + struct raii_counter + { + raii_counter(std::pair p_counts) + : counts(p_counts) + { + std::println("🔨 Constructing..."); + (*counts.first)++; + } + + ~raii_counter() // NOLINT(bugprone-exception-escape) + { + std::println("💥 Destructing..."); + (*counts.second)++; + } + std::pair counts; + }; + "Cancellation"_test = []() { // Setup - auto scheduler = - mem::make_strong_ptr(std::pmr::new_delete_resource()); - async::context ctx(scheduler, 8192); + test_context ctx; std::println("===================================="); std::println("Running cancellation test"); std::println("===================================="); - struct raii_counter - { - raii_counter(std::pair p_counts) - : counts(p_counts) - { - std::println("🔨 Constructing..."); - (*counts.first)++; - } - - ~raii_counter() // NOLINT(bugprone-exception-escape) - { - std::println("💥 Destructing..."); - (*counts.second)++; - } - std::pair counts; - }; - std::pair count{ 0, 0 }; int ends_reached = 0; @@ -344,6 +345,7 @@ void async_context_suite() ends_reached++; co_return; }; + auto b = [a, get_counter, &ends_reached](async::context& p_ctx) -> future { std::println("entering b"); @@ -353,6 +355,7 @@ void async_context_suite() ends_reached++; co_return; }; + auto c = [b, get_counter, &ends_reached](async::context& p_ctx) -> future { std::println("entering c"); @@ -393,31 +396,12 @@ void async_context_suite() "Context Cancellation"_test = []() { // Setup - auto scheduler = - mem::make_strong_ptr(std::pmr::new_delete_resource()); - async::context ctx(scheduler, 8192); + test_context ctx; std::println("===================================="); std::println("Running Context Cancellation"); std::println("===================================="); - struct raii_counter - { - raii_counter(std::pair p_counts) - : counts(p_counts) - { - std::println("🔨 Constructing..."); - (*counts.first)++; - } - - ~raii_counter() // NOLINT(bugprone-exception-escape) - { - std::println("💥 Destructing..."); - (*counts.second)++; - } - std::pair counts; - }; - std::pair count{ 0, 0 }; int ends_reached = 0; @@ -487,9 +471,7 @@ void async_context_suite() "Exception Propagation"_test = []() { // Setup - auto scheduler = - mem::make_strong_ptr(std::pmr::new_delete_resource()); - async::context ctx(scheduler, 8192); + test_context ctx; std::println("===================================="); std::println("Running Exception Propagation Test"); @@ -576,34 +558,33 @@ void async_context_suite() expect(that % 0 == ctx.memory_used()); }; - "Proxy Context (no timeout normal behavior)"_test = []() { + "Proxy Context (normal behavior, no timeout)"_test = []() { // Setup - auto scheduler = - mem::make_strong_ptr(std::pmr::new_delete_resource()); - async::context ctx(scheduler, 8192); + test_context ctx; std::println("===================================="); std::println("Running Proxy Context Test (no timeout normal behavior)"); std::println("===================================="); static constexpr auto expected_suspensions = 5; + static constexpr auto timeout_count = expected_suspensions + 2; + auto suspension_count = 0; - auto b = [](async::context&, int p_suspend_count) -> future { - auto result = p_suspend_count; - while (result > 0) { - result--; + auto b = [&suspension_count](async::context&) -> future { + while (suspension_count < expected_suspensions) { + suspension_count++; // For some reason this segfaults on Linux - // std::println("count = {}!", result); + // std::println("p_suspend_count = {}!", suspension_count); co_await std::suspend_always{}; } - co_return p_suspend_count; + co_return expected_suspensions; }; auto a = [b](async::context& p_ctx) -> future { std::println("Entered coroutine a!"); - auto proxy = p_ctx.borrow_proxy(); + auto proxy = async::proxy_context::from(p_ctx); std::println("Made a proxy!"); - int counter = expected_suspensions + 2; - auto supervised_future = b(proxy, expected_suspensions); + int counter = timeout_count; + auto supervised_future = b(proxy); while (not supervised_future.done()) { std::println("supervised_future not done()!"); @@ -637,37 +618,37 @@ void async_context_suite() expect(that % my_future.done()); expect(that % expected_suspensions == value); expect(that % 0 == ctx.memory_used()); + expect(that % suspension_count == expected_suspensions); }; "Proxy Coroutines Timeout"_test = []() { // Setup - auto scheduler = - mem::make_strong_ptr(std::pmr::new_delete_resource()); - async::context ctx1(scheduler, 8192); + test_context ctx; std::println("===================================="); std::println("Running Proxy Context Test (with timeout)"); std::println("===================================="); static constexpr auto expected_suspensions = 5; + static constexpr auto timeout_count = expected_suspensions - 2; + auto suspension_count = 0; - [[maybe_unused]] auto b = [](async::context&, - int p_suspend_count) -> future { - auto const result = p_suspend_count; - while (p_suspend_count > 0) { - p_suspend_count--; + auto b = [&suspension_count](async::context&) -> future { + suspension_count = 0; + while (suspension_count < expected_suspensions) { + suspension_count++; // For some reason this segfaults on Linux - // std::println("p_suspend_count = {}!", p_suspend_count); + // std::println("p_suspend_count = {}!", suspension_count); co_await std::suspend_always{}; } - co_return result; + co_return expected_suspensions; }; auto a = [b](async::context& p_ctx) -> future { std::println("Entered coroutine a!"); - auto proxy = p_ctx.borrow_proxy(); + auto proxy = async::proxy_context::from(p_ctx); std::println("Made a proxy!"); - int counter = expected_suspensions - 2; - auto supervised_future = b(proxy, expected_suspensions); + int counter = timeout_count; + auto supervised_future = b(proxy); while (not supervised_future.done()) { std::println("supervised_future not done()!"); @@ -695,12 +676,17 @@ void async_context_suite() co_return -1; }; - auto my_future = a(ctx1); + auto my_future = a(ctx); auto value = my_future.sync_wait(); + auto value2 = a(ctx).sync_wait(); expect(that % my_future.done()); expect(that % -1 == value); - expect(that % 0 == ctx1.memory_used()); + expect(that % -1 == value2); + expect(that % suspension_count == timeout_count); + expect(that % 0 == ctx.memory_used()); }; +#if 0 +#endif }; } // namespace async